asr.c revision 128964
1/*- 2 * Copyright (c) 1996-2000 Distributed Processing Technology Corporation 3 * Copyright (c) 2000-2001 Adaptec Corporation 4 * All rights reserved. 5 * 6 * TERMS AND CONDITIONS OF USE 7 * 8 * Redistribution and use in source form, with or without modification, are 9 * permitted provided that redistributions of source code must retain the 10 * above copyright notice, this list of conditions and the following disclaimer. 11 * 12 * This software is provided `as is' by Adaptec and any express or implied 13 * warranties, including, but not limited to, the implied warranties of 14 * merchantability and fitness for a particular purpose, are disclaimed. In no 15 * event shall Adaptec be liable for any direct, indirect, incidental, special, 16 * exemplary or consequential damages (including, but not limited to, 17 * procurement of substitute goods or services; loss of use, data, or profits; 18 * or business interruptions) however caused and on any theory of liability, 19 * whether in contract, strict liability, or tort (including negligence or 20 * otherwise) arising in any way out of the use of this driver software, even 21 * if advised of the possibility of such damage. 22 * 23 * SCSI I2O host adapter driver 24 * 25 * V1.10 2004/05/05 scottl@freebsd.org 26 * - Massive cleanup of the driver to remove dead code and 27 * non-conformant style. 28 * - Removed most i386-specific code to make it more portable. 29 * - Converted to the bus_space API. 30 * V1.08 2001/08/21 Mark_Salyzyn@adaptec.com 31 * - The 2000S and 2005S do not initialize on some machines, 32 * increased timeout to 255ms from 50ms for the StatusGet 33 * command. 34 * V1.07 2001/05/22 Mark_Salyzyn@adaptec.com 35 * - I knew this one was too good to be true. The error return 36 * on ioctl commands needs to be compared to CAM_REQ_CMP, not 37 * to the bit masked status. 38 * V1.06 2001/05/08 Mark_Salyzyn@adaptec.com 39 * - The 2005S that was supported is affectionately called the 40 * Conjoined BAR Firmware. In order to support RAID-5 in a 41 * 16MB low-cost configuration, Firmware was forced to go 42 * to a Split BAR Firmware. This requires a separate IOP and 43 * Messaging base address. 44 * V1.05 2001/04/25 Mark_Salyzyn@adaptec.com 45 * - Handle support for 2005S Zero Channel RAID solution. 46 * - System locked up if the Adapter locked up. Do not try 47 * to send other commands if the resetIOP command fails. The 48 * fail outstanding command discovery loop was flawed as the 49 * removal of the command from the list prevented discovering 50 * all the commands. 51 * - Comment changes to clarify driver. 52 * - SysInfo searched for an EATA SmartROM, not an I2O SmartROM. 53 * - We do not use the AC_FOUND_DEV event because of I2O. 54 * Removed asr_async. 55 * V1.04 2000/09/22 Mark_Salyzyn@adaptec.com, msmith@freebsd.org, 56 * lampa@fee.vutbr.cz and Scott_Long@adaptec.com. 57 * - Removed support for PM1554, PM2554 and PM2654 in Mode-0 58 * mode as this is confused with competitor adapters in run 59 * mode. 60 * - critical locking needed in ASR_ccbAdd and ASR_ccbRemove 61 * to prevent operating system panic. 62 * - moved default major number to 154 from 97. 63 * V1.03 2000/07/12 Mark_Salyzyn@adaptec.com 64 * - The controller is not actually an ASR (Adaptec SCSI RAID) 65 * series that is visible, it's more of an internal code name. 66 * remove any visible references within reason for now. 67 * - bus_ptr->LUN was not correctly zeroed when initially 68 * allocated causing a possible panic of the operating system 69 * during boot. 70 * V1.02 2000/06/26 Mark_Salyzyn@adaptec.com 71 * - Code always fails for ASR_getTid affecting performance. 72 * - initiated a set of changes that resulted from a formal 73 * code inspection by Mark_Salyzyn@adaptec.com, 74 * George_Dake@adaptec.com, Jeff_Zeak@adaptec.com, 75 * Martin_Wilson@adaptec.com and Vincent_Trandoan@adaptec.com. 76 * Their findings were focussed on the LCT & TID handler, and 77 * all resulting changes were to improve code readability, 78 * consistency or have a positive effect on performance. 79 * V1.01 2000/06/14 Mark_Salyzyn@adaptec.com 80 * - Passthrough returned an incorrect error. 81 * - Passthrough did not migrate the intrinsic scsi layer wakeup 82 * on command completion. 83 * - generate control device nodes using make_dev and delete_dev. 84 * - Performance affected by TID caching reallocing. 85 * - Made suggested changes by Justin_Gibbs@adaptec.com 86 * - use splcam instead of splbio. 87 * - use cam_imask instead of bio_imask. 88 * - use u_int8_t instead of u_char. 89 * - use u_int16_t instead of u_short. 90 * - use u_int32_t instead of u_long where appropriate. 91 * - use 64 bit context handler instead of 32 bit. 92 * - create_ccb should only allocate the worst case 93 * requirements for the driver since CAM may evolve 94 * making union ccb much larger than needed here. 95 * renamed create_ccb to asr_alloc_ccb. 96 * - go nutz justifying all debug prints as macros 97 * defined at the top and remove unsightly ifdefs. 98 * - INLINE STATIC viewed as confusing. Historically 99 * utilized to affect code performance and debug 100 * issues in OS, Compiler or OEM specific situations. 101 * V1.00 2000/05/31 Mark_Salyzyn@adaptec.com 102 * - Ported from FreeBSD 2.2.X DPT I2O driver. 103 * changed struct scsi_xfer to union ccb/struct ccb_hdr 104 * changed variable name xs to ccb 105 * changed struct scsi_link to struct cam_path 106 * changed struct scsibus_data to struct cam_sim 107 * stopped using fordriver for holding on to the TID 108 * use proprietary packet creation instead of scsi_inquire 109 * CAM layer sends synchronize commands. 110 */ 111 112#include <sys/cdefs.h> 113#include <sys/param.h> /* TRUE=1 and FALSE=0 defined here */ 114#include <sys/kernel.h> 115#include <sys/systm.h> 116#include <sys/malloc.h> 117#include <sys/conf.h> 118#include <sys/ioccom.h> 119#include <sys/proc.h> 120#include <sys/bus.h> 121#include <machine/resource.h> 122#include <machine/bus.h> 123#include <sys/rman.h> 124#include <sys/stat.h> 125 126#include <cam/cam.h> 127#include <cam/cam_ccb.h> 128#include <cam/cam_sim.h> 129#include <cam/cam_xpt_sim.h> 130#include <cam/cam_xpt_periph.h> 131 132#include <cam/scsi/scsi_all.h> 133#include <cam/scsi/scsi_message.h> 134 135#include <vm/vm.h> 136#include <vm/pmap.h> 137 138#if defined(__i386__) 139#include <i386/include/cputypes.h> 140#elif defined(__alpha__) 141#include <alpha/include/pmap.h> 142#endif 143#include <machine/vmparam.h> 144 145#include <dev/pci/pcivar.h> 146#include <dev/pci/pcireg.h> 147 148#define osdSwap4(x) ((u_long)ntohl((u_long)(x))) 149#define KVTOPHYS(x) vtophys(x) 150#include "dev/asr/dptalign.h" 151#include "dev/asr/i2oexec.h" 152#include "dev/asr/i2obscsi.h" 153#include "dev/asr/i2odpt.h" 154#include "dev/asr/i2oadptr.h" 155 156#include "dev/asr/sys_info.h" 157 158__FBSDID("$FreeBSD: head/sys/dev/asr/asr.c 128964 2004-05-05 14:19:43Z scottl $"); 159 160#define ASR_VERSION 1 161#define ASR_REVISION '1' 162#define ASR_SUBREVISION '0' 163#define ASR_MONTH 5 164#define ASR_DAY 5 165#define ASR_YEAR (2004 - 1980) 166 167/* 168 * Debug macros to reduce the unsightly ifdefs 169 */ 170#if (defined(DEBUG_ASR) || defined(DEBUG_ASR_USR_CMD) || defined(DEBUG_ASR_CMD)) 171static __inline void 172debug_asr_message(PI2O_MESSAGE_FRAME message) 173{ 174 u_int32_t * pointer = (u_int32_t *)message; 175 u_int32_t length = I2O_MESSAGE_FRAME_getMessageSize(message); 176 u_int32_t counter = 0; 177 178 while (length--) { 179 printf("%08lx%c", (u_long)*(pointer++), 180 (((++counter & 7) == 0) || (length == 0)) ? '\n' : ' '); 181 } 182} 183#endif /* DEBUG_ASR || DEBUG_ASR_USR_CMD || DEBUG_ASR_CMD */ 184 185#ifdef DEBUG_ASR 186 /* Breaks on none STDC based compilers :-( */ 187#define debug_asr_printf(fmt,args...) printf(fmt, ##args) 188#define debug_asr_dump_message(message) debug_asr_message(message) 189#define debug_asr_print_path(ccb) xpt_print_path(ccb->ccb_h.path); 190#else /* DEBUG_ASR */ 191#define debug_asr_printf(fmt,args...) 192#define debug_asr_dump_message(message) 193#define debug_asr_print_path(ccb) 194#endif /* DEBUG_ASR */ 195 196/* 197 * If DEBUG_ASR_CMD is defined: 198 * 0 - Display incoming SCSI commands 199 * 1 - add in a quick character before queueing. 200 * 2 - add in outgoing message frames. 201 */ 202#if (defined(DEBUG_ASR_CMD)) 203#define debug_asr_cmd_printf(fmt,args...) printf(fmt,##args) 204static __inline void 205debug_asr_dump_ccb(union ccb *ccb) 206{ 207 u_int8_t *cp = (unsigned char *)&(ccb->csio.cdb_io); 208 int len = ccb->csio.cdb_len; 209 210 while (len) { 211 debug_asr_cmd_printf (" %02x", *(cp++)); 212 --len; 213 } 214} 215#if (DEBUG_ASR_CMD > 0) 216#define debug_asr_cmd1_printf debug_asr_cmd_printf 217#else 218#define debug_asr_cmd1_printf(fmt,args...) 219#endif 220#if (DEBUG_ASR_CMD > 1) 221#define debug_asr_cmd2_printf debug_asr_cmd_printf 222#define debug_asr_cmd2_dump_message(message) debug_asr_message(message) 223#else 224#define debug_asr_cmd2_printf(fmt,args...) 225#define debug_asr_cmd2_dump_message(message) 226#endif 227#else /* DEBUG_ASR_CMD */ 228#define debug_asr_cmd_printf(fmt,args...) 229#define debug_asr_dump_ccb(ccb) 230#define debug_asr_cmd1_printf(fmt,args...) 231#define debug_asr_cmd2_printf(fmt,args...) 232#define debug_asr_cmd2_dump_message(message) 233#endif /* DEBUG_ASR_CMD */ 234 235#if (defined(DEBUG_ASR_USR_CMD)) 236#define debug_usr_cmd_printf(fmt,args...) printf(fmt,##args) 237#define debug_usr_cmd_dump_message(message) debug_usr_message(message) 238#else /* DEBUG_ASR_USR_CMD */ 239#define debug_usr_cmd_printf(fmt,args...) 240#define debug_usr_cmd_dump_message(message) 241#endif /* DEBUG_ASR_USR_CMD */ 242 243#include "dev/asr/dptsig.h" 244 245static dpt_sig_S ASR_sig = { 246 { 'd', 'P', 't', 'S', 'i', 'G'}, SIG_VERSION, PROC_INTEL, 247 PROC_386 | PROC_486 | PROC_PENTIUM | PROC_SEXIUM, FT_HBADRVR, 0, 248 OEM_DPT, OS_FREE_BSD, CAP_ABOVE16MB, DEV_ALL, ADF_ALL_SC5, 249 0, 0, ASR_VERSION, ASR_REVISION, ASR_SUBREVISION, 250 ASR_MONTH, ASR_DAY, ASR_YEAR, 251/* 01234567890123456789012345678901234567890123456789 < 50 chars */ 252 "Adaptec FreeBSD 4.0.0 Unix SCSI I2O HBA Driver" 253 /* ^^^^^ asr_attach alters these to match OS */ 254}; 255 256/* Configuration Definitions */ 257 258#define SG_SIZE 58 /* Scatter Gather list Size */ 259#define MAX_TARGET_ID 126 /* Maximum Target ID supported */ 260#define MAX_LUN 255 /* Maximum LUN Supported */ 261#define MAX_CHANNEL 7 /* Maximum Channel # Supported by driver */ 262#define MAX_INBOUND 2000 /* Max CCBs, Also Max Queue Size */ 263#define MAX_OUTBOUND 256 /* Maximum outbound frames/adapter */ 264#define MAX_INBOUND_SIZE 512 /* Maximum inbound frame size */ 265#define MAX_MAP 4194304L /* Maximum mapping size of IOP */ 266 /* Also serves as the minimum map for */ 267 /* the 2005S zero channel RAID product */ 268 269/* I2O register set */ 270#define I2O_REG_STATUS 0x30 271#define I2O_REG_MASK 0x34 272#define I2O_REG_TOFIFO 0x40 273#define I2O_REG_FROMFIFO 0x44 274 275#define Mask_InterruptsDisabled 0x08 276 277/* 278 * A MIX of performance and space considerations for TID lookups 279 */ 280typedef u_int16_t tid_t; 281 282typedef struct { 283 u_int32_t size; /* up to MAX_LUN */ 284 tid_t TID[1]; 285} lun2tid_t; 286 287typedef struct { 288 u_int32_t size; /* up to MAX_TARGET */ 289 lun2tid_t * LUN[1]; 290} target2lun_t; 291 292/* 293 * To ensure that we only allocate and use the worst case ccb here, lets 294 * make our own local ccb union. If asr_alloc_ccb is utilized for another 295 * ccb type, ensure that you add the additional structures into our local 296 * ccb union. To ensure strict type checking, we will utilize the local 297 * ccb definition wherever possible. 298 */ 299union asr_ccb { 300 struct ccb_hdr ccb_h; /* For convenience */ 301 struct ccb_scsiio csio; 302 struct ccb_setasync csa; 303}; 304 305/************************************************************************** 306** ASR Host Adapter structure - One Structure For Each Host Adapter That ** 307** Is Configured Into The System. The Structure Supplies Configuration ** 308** Information, Status Info, Queue Info And An Active CCB List Pointer. ** 309***************************************************************************/ 310 311typedef struct Asr_softc { 312 u_int16_t ha_irq; 313 u_long ha_Base; /* base port for each board */ 314 bus_size_t ha_blinkLED; 315 bus_space_handle_t ha_i2o_bhandle; 316 bus_space_tag_t ha_i2o_btag; 317 bus_space_handle_t ha_frame_bhandle; 318 bus_space_tag_t ha_frame_btag; 319 I2O_IOP_ENTRY ha_SystemTable; 320 LIST_HEAD(,ccb_hdr) ha_ccb; /* ccbs in use */ 321 struct cam_path * ha_path[MAX_CHANNEL+1]; 322 struct cam_sim * ha_sim[MAX_CHANNEL+1]; 323 struct resource * ha_mem_res; 324 struct resource * ha_mes_res; 325 struct resource * ha_irq_res; 326 void * ha_intr; 327 PI2O_LCT ha_LCT; /* Complete list of devices */ 328#define le_type IdentityTag[0] 329#define I2O_BSA 0x20 330#define I2O_FCA 0x40 331#define I2O_SCSI 0x00 332#define I2O_PORT 0x80 333#define I2O_UNKNOWN 0x7F 334#define le_bus IdentityTag[1] 335#define le_target IdentityTag[2] 336#define le_lun IdentityTag[3] 337 target2lun_t * ha_targets[MAX_CHANNEL+1]; 338 PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME ha_Msgs; 339 u_long ha_Msgs_Phys; 340 341 u_int8_t ha_in_reset; 342#define HA_OPERATIONAL 0 343#define HA_IN_RESET 1 344#define HA_OFF_LINE 2 345#define HA_OFF_LINE_RECOVERY 3 346 /* Configuration information */ 347 /* The target id maximums we take */ 348 u_int8_t ha_MaxBus; /* Maximum bus */ 349 u_int8_t ha_MaxId; /* Maximum target ID */ 350 u_int8_t ha_MaxLun; /* Maximum target LUN */ 351 u_int8_t ha_SgSize; /* Max SG elements */ 352 u_int8_t ha_pciBusNum; 353 u_int8_t ha_pciDeviceNum; 354 u_int8_t ha_adapter_target[MAX_CHANNEL+1]; 355 u_int16_t ha_QueueSize; /* Max outstanding commands */ 356 u_int16_t ha_Msgs_Count; 357 358 /* Links into other parents and HBAs */ 359 struct Asr_softc * ha_next; /* HBA list */ 360 dev_t ha_devt; 361} Asr_softc_t; 362 363static Asr_softc_t * Asr_softc; 364 365/* 366 * Prototypes of the routines we have in this object. 367 */ 368 369/* I2O HDM interface */ 370static int asr_probe(device_t tag); 371static int asr_attach(device_t tag); 372 373static int asr_ioctl(dev_t dev, u_long cmd, caddr_t data, int flag, 374 struct thread *td); 375static int asr_open(dev_t dev, int32_t flags, int32_t ifmt, 376 struct thread *td); 377static int asr_close(dev_t dev, int flags, int ifmt, struct thread *td); 378static int asr_intr(Asr_softc_t *sc); 379static void asr_timeout(void *arg); 380static int ASR_init(Asr_softc_t *sc); 381static int ASR_acquireLct(Asr_softc_t *sc); 382static int ASR_acquireHrt(Asr_softc_t *sc); 383static void asr_action(struct cam_sim *sim, union ccb *ccb); 384static void asr_poll(struct cam_sim *sim); 385static int ASR_queue(Asr_softc_t *sc, PI2O_MESSAGE_FRAME Message); 386 387/* 388 * Here is the auto-probe structure used to nest our tests appropriately 389 * during the startup phase of the operating system. 390 */ 391static device_method_t asr_methods[] = { 392 DEVMETHOD(device_probe, asr_probe), 393 DEVMETHOD(device_attach, asr_attach), 394 { 0, 0 } 395}; 396 397static driver_t asr_driver = { 398 "asr", 399 asr_methods, 400 sizeof(Asr_softc_t) 401}; 402 403static devclass_t asr_devclass; 404DRIVER_MODULE(asr, pci, asr_driver, asr_devclass, 0, 0); 405 406/* 407 * devsw for asr hba driver 408 * 409 * only ioctl is used. the sd driver provides all other access. 410 */ 411static struct cdevsw asr_cdevsw = { 412 .d_version = D_VERSION, 413 .d_flags = D_NEEDGIANT, 414 .d_open = asr_open, 415 .d_close = asr_close, 416 .d_ioctl = asr_ioctl, 417 .d_name = "asr", 418}; 419 420/* I2O support routines */ 421 422static __inline u_int32_t 423asr_get_FromFIFO(Asr_softc_t *sc) 424{ 425 return (bus_space_read_4(sc->ha_i2o_btag, sc->ha_i2o_bhandle, 426 I2O_REG_FROMFIFO)); 427} 428 429static __inline u_int32_t 430asr_get_ToFIFO(Asr_softc_t *sc) 431{ 432 return (bus_space_read_4(sc->ha_i2o_btag, sc->ha_i2o_bhandle, 433 I2O_REG_TOFIFO)); 434} 435 436static __inline u_int32_t 437asr_get_intr(Asr_softc_t *sc) 438{ 439 return (bus_space_read_4(sc->ha_i2o_btag, sc->ha_i2o_bhandle, 440 I2O_REG_MASK)); 441} 442 443static __inline u_int32_t 444asr_get_status(Asr_softc_t *sc) 445{ 446 return (bus_space_read_4(sc->ha_i2o_btag, sc->ha_i2o_bhandle, 447 I2O_REG_STATUS)); 448} 449 450static __inline void 451asr_set_FromFIFO(Asr_softc_t *sc, u_int32_t val) 452{ 453 bus_space_write_4(sc->ha_i2o_btag, sc->ha_i2o_bhandle, I2O_REG_FROMFIFO, 454 val); 455} 456 457static __inline void 458asr_set_ToFIFO(Asr_softc_t *sc, u_int32_t val) 459{ 460 bus_space_write_4(sc->ha_i2o_btag, sc->ha_i2o_bhandle, I2O_REG_TOFIFO, 461 val); 462} 463 464static __inline void 465asr_set_intr(Asr_softc_t *sc, u_int32_t val) 466{ 467 bus_space_write_4(sc->ha_i2o_btag, sc->ha_i2o_bhandle, I2O_REG_MASK, 468 val); 469} 470 471static __inline void 472asr_set_frame(Asr_softc_t *sc, void *frame, u_int32_t offset, int len) 473{ 474 bus_space_write_region_4(sc->ha_frame_btag, sc->ha_frame_bhandle, 475 offset, (u_int32_t *)frame, len); 476} 477 478/* 479 * Fill message with default. 480 */ 481static PI2O_MESSAGE_FRAME 482ASR_fillMessage(void *Message, u_int16_t size) 483{ 484 PI2O_MESSAGE_FRAME Message_Ptr; 485 486 Message_Ptr = (I2O_MESSAGE_FRAME *)Message; 487 bzero(Message_Ptr, size); 488 I2O_MESSAGE_FRAME_setVersionOffset(Message_Ptr, I2O_VERSION_11); 489 I2O_MESSAGE_FRAME_setMessageSize(Message_Ptr, 490 (size + sizeof(U32) - 1) >> 2); 491 I2O_MESSAGE_FRAME_setInitiatorAddress (Message_Ptr, 1); 492 KASSERT(Message_Ptr != NULL, ("Message_Ptr == NULL")); 493 return (Message_Ptr); 494} /* ASR_fillMessage */ 495 496#define EMPTY_QUEUE (-1L) 497 498static __inline U32 499ASR_getMessage(Asr_softc_t *sc) 500{ 501 U32 MessageOffset; 502 503 MessageOffset = asr_get_ToFIFO(sc); 504 if (MessageOffset == EMPTY_QUEUE) 505 MessageOffset = asr_get_ToFIFO(sc); 506 507 return (MessageOffset); 508} /* ASR_getMessage */ 509 510/* Issue a polled command */ 511static U32 512ASR_initiateCp(Asr_softc_t *sc, PI2O_MESSAGE_FRAME Message) 513{ 514 U32 Mask = -1L; 515 U32 MessageOffset; 516 u_int Delay = 1500; 517 518 /* 519 * ASR_initiateCp is only used for synchronous commands and will 520 * be made more resiliant to adapter delays since commands like 521 * resetIOP can cause the adapter to be deaf for a little time. 522 */ 523 while (((MessageOffset = ASR_getMessage(sc)) == EMPTY_QUEUE) 524 && (--Delay != 0)) { 525 DELAY (10000); 526 } 527 if (MessageOffset != EMPTY_QUEUE) { 528 asr_set_frame(sc, Message, MessageOffset, 529 I2O_MESSAGE_FRAME_getMessageSize(Message)); 530 /* 531 * Disable the Interrupts 532 */ 533 Mask = asr_get_intr(sc); 534 asr_set_intr(sc, Mask | Mask_InterruptsDisabled); 535 asr_set_ToFIFO(sc, MessageOffset); 536 } 537 return (Mask); 538} /* ASR_initiateCp */ 539 540/* 541 * Reset the adapter. 542 */ 543static U32 544ASR_resetIOP(Asr_softc_t *sc) 545{ 546 struct resetMessage { 547 I2O_EXEC_IOP_RESET_MESSAGE M; 548 U32 R; 549 } Message; 550 PI2O_EXEC_IOP_RESET_MESSAGE Message_Ptr; 551 U32 * volatile Reply_Ptr; 552 U32 Old; 553 554 /* 555 * Build up our copy of the Message. 556 */ 557 Message_Ptr = (PI2O_EXEC_IOP_RESET_MESSAGE)ASR_fillMessage(&Message, 558 sizeof(I2O_EXEC_IOP_RESET_MESSAGE)); 559 I2O_EXEC_IOP_RESET_MESSAGE_setFunction(Message_Ptr, I2O_EXEC_IOP_RESET); 560 /* 561 * Reset the Reply Status 562 */ 563 *(Reply_Ptr = (U32 *)((char *)Message_Ptr 564 + sizeof(I2O_EXEC_IOP_RESET_MESSAGE))) = 0; 565 I2O_EXEC_IOP_RESET_MESSAGE_setStatusWordLowAddress(Message_Ptr, 566 KVTOPHYS((void *)Reply_Ptr)); 567 /* 568 * Send the Message out 569 */ 570 if ((Old = ASR_initiateCp(sc, 571 (PI2O_MESSAGE_FRAME)Message_Ptr)) != -1L) { 572 /* 573 * Wait for a response (Poll), timeouts are dangerous if 574 * the card is truly responsive. We assume response in 2s. 575 */ 576 u_int8_t Delay = 200; 577 578 while ((*Reply_Ptr == 0) && (--Delay != 0)) { 579 DELAY (10000); 580 } 581 /* 582 * Re-enable the interrupts. 583 */ 584 asr_set_intr(sc, Old); 585 KASSERT(*Reply_Ptr != 0, ("*Reply_Ptr == 0")); 586 return(*Reply_Ptr); 587 } 588 KASSERT(Old != -1L, ("Old == -1")); 589 return (0); 590} /* ASR_resetIOP */ 591 592/* 593 * Get the curent state of the adapter 594 */ 595static PI2O_EXEC_STATUS_GET_REPLY 596ASR_getStatus(Asr_softc_t *sc, PI2O_EXEC_STATUS_GET_REPLY buffer) 597{ 598 I2O_EXEC_STATUS_GET_MESSAGE Message; 599 PI2O_EXEC_STATUS_GET_MESSAGE Message_Ptr; 600 U32 Old; 601 602 /* 603 * Build up our copy of the Message. 604 */ 605 Message_Ptr = (PI2O_EXEC_STATUS_GET_MESSAGE)ASR_fillMessage(&Message, 606 sizeof(I2O_EXEC_STATUS_GET_MESSAGE)); 607 I2O_EXEC_STATUS_GET_MESSAGE_setFunction(Message_Ptr, 608 I2O_EXEC_STATUS_GET); 609 I2O_EXEC_STATUS_GET_MESSAGE_setReplyBufferAddressLow(Message_Ptr, 610 KVTOPHYS((void *)buffer)); 611 /* This one is a Byte Count */ 612 I2O_EXEC_STATUS_GET_MESSAGE_setReplyBufferLength(Message_Ptr, 613 sizeof(I2O_EXEC_STATUS_GET_REPLY)); 614 /* 615 * Reset the Reply Status 616 */ 617 bzero(buffer, sizeof(I2O_EXEC_STATUS_GET_REPLY)); 618 /* 619 * Send the Message out 620 */ 621 if ((Old = ASR_initiateCp(sc, 622 (PI2O_MESSAGE_FRAME)Message_Ptr)) != -1L) { 623 /* 624 * Wait for a response (Poll), timeouts are dangerous if 625 * the card is truly responsive. We assume response in 50ms. 626 */ 627 u_int8_t Delay = 255; 628 629 while (*((U8 * volatile)&(buffer->SyncByte)) == 0) { 630 if (--Delay == 0) { 631 buffer = NULL; 632 break; 633 } 634 DELAY (1000); 635 } 636 /* 637 * Re-enable the interrupts. 638 */ 639 asr_set_intr(sc, Old); 640 return (buffer); 641 } 642 return (NULL); 643} /* ASR_getStatus */ 644 645/* 646 * Check if the device is a SCSI I2O HBA, and add it to the list. 647 */ 648 649/* 650 * Probe for ASR controller. If we find it, we will use it. 651 * virtual adapters. 652 */ 653static int 654asr_probe(device_t tag) 655{ 656 u_int32_t id; 657 658 id = (pci_get_device(tag) << 16) | pci_get_vendor(tag); 659 if ((id == 0xA5011044) || (id == 0xA5111044)) { 660 device_set_desc(tag, "Adaptec Caching SCSI RAID"); 661 return (-10); 662 } 663 return (ENXIO); 664} /* asr_probe */ 665 666static __inline union asr_ccb * 667asr_alloc_ccb(Asr_softc_t *sc) 668{ 669 union asr_ccb *new_ccb; 670 671 if ((new_ccb = (union asr_ccb *)malloc(sizeof(*new_ccb), 672 M_DEVBUF, M_WAITOK | M_ZERO)) != NULL) { 673 new_ccb->ccb_h.pinfo.priority = 1; 674 new_ccb->ccb_h.pinfo.index = CAM_UNQUEUED_INDEX; 675 new_ccb->ccb_h.spriv_ptr0 = sc; 676 } 677 return (new_ccb); 678} /* asr_alloc_ccb */ 679 680static __inline void 681asr_free_ccb(union asr_ccb *free_ccb) 682{ 683 free(free_ccb, M_DEVBUF); 684} /* asr_free_ccb */ 685 686/* 687 * Print inquiry data `carefully' 688 */ 689static void 690ASR_prstring(u_int8_t *s, int len) 691{ 692 while ((--len >= 0) && (*s) && (*s != ' ') && (*s != '-')) { 693 printf ("%c", *(s++)); 694 } 695} /* ASR_prstring */ 696 697/* 698 * Send a message synchronously and without Interrupt to a ccb. 699 */ 700static int 701ASR_queue_s(union asr_ccb *ccb, PI2O_MESSAGE_FRAME Message) 702{ 703 int s; 704 U32 Mask; 705 Asr_softc_t *sc = (Asr_softc_t *)(ccb->ccb_h.spriv_ptr0); 706 707 /* 708 * We do not need any (optional byteswapping) method access to 709 * the Initiator context field. 710 */ 711 I2O_MESSAGE_FRAME_setInitiatorContext64(Message, (long)ccb); 712 713 /* Prevent interrupt service */ 714 s = splcam (); 715 Mask = asr_get_intr(sc); 716 asr_set_intr(sc, Mask | Mask_InterruptsDisabled); 717 718 if (ASR_queue(sc, Message) == EMPTY_QUEUE) { 719 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 720 ccb->ccb_h.status |= CAM_REQUEUE_REQ; 721 } 722 723 /* 724 * Wait for this board to report a finished instruction. 725 */ 726 while ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) { 727 (void)asr_intr (sc); 728 } 729 730 /* Re-enable Interrupts */ 731 asr_set_intr(sc, Mask); 732 splx(s); 733 734 return (ccb->ccb_h.status); 735} /* ASR_queue_s */ 736 737/* 738 * Send a message synchronously to an Asr_softc_t. 739 */ 740static int 741ASR_queue_c(Asr_softc_t *sc, PI2O_MESSAGE_FRAME Message) 742{ 743 union asr_ccb *ccb; 744 int status; 745 746 if ((ccb = asr_alloc_ccb (sc)) == NULL) { 747 return (CAM_REQUEUE_REQ); 748 } 749 750 status = ASR_queue_s (ccb, Message); 751 752 asr_free_ccb(ccb); 753 754 return (status); 755} /* ASR_queue_c */ 756 757/* 758 * Add the specified ccb to the active queue 759 */ 760static __inline void 761ASR_ccbAdd(Asr_softc_t *sc, union asr_ccb *ccb) 762{ 763 int s; 764 765 s = splcam(); 766 LIST_INSERT_HEAD(&(sc->ha_ccb), &(ccb->ccb_h), sim_links.le); 767 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) { 768 if (ccb->ccb_h.timeout == CAM_TIME_DEFAULT) { 769 /* 770 * RAID systems can take considerable time to 771 * complete some commands given the large cache 772 * flashes switching from write back to write thru. 773 */ 774 ccb->ccb_h.timeout = 6 * 60 * 1000; 775 } 776 ccb->ccb_h.timeout_ch = timeout(asr_timeout, (caddr_t)ccb, 777 (ccb->ccb_h.timeout * hz) / 1000); 778 } 779 splx(s); 780} /* ASR_ccbAdd */ 781 782/* 783 * Remove the specified ccb from the active queue. 784 */ 785static __inline void 786ASR_ccbRemove(Asr_softc_t *sc, union asr_ccb *ccb) 787{ 788 int s; 789 790 s = splcam(); 791 untimeout(asr_timeout, (caddr_t)ccb, ccb->ccb_h.timeout_ch); 792 LIST_REMOVE(&(ccb->ccb_h), sim_links.le); 793 splx(s); 794} /* ASR_ccbRemove */ 795 796/* 797 * Fail all the active commands, so they get re-issued by the operating 798 * system. 799 */ 800static void 801ASR_failActiveCommands(Asr_softc_t *sc) 802{ 803 struct ccb_hdr *ccb; 804 int s; 805 806 s = splcam(); 807 /* 808 * We do not need to inform the CAM layer that we had a bus 809 * reset since we manage it on our own, this also prevents the 810 * SCSI_DELAY settling that would be required on other systems. 811 * The `SCSI_DELAY' has already been handled by the card via the 812 * acquisition of the LCT table while we are at CAM priority level. 813 * for (int bus = 0; bus <= sc->ha_MaxBus; ++bus) { 814 * xpt_async (AC_BUS_RESET, sc->ha_path[bus], NULL); 815 * } 816 */ 817 while ((ccb = LIST_FIRST(&(sc->ha_ccb))) != NULL) { 818 ASR_ccbRemove (sc, (union asr_ccb *)ccb); 819 820 ccb->status &= ~CAM_STATUS_MASK; 821 ccb->status |= CAM_REQUEUE_REQ; 822 /* Nothing Transfered */ 823 ((struct ccb_scsiio *)ccb)->resid 824 = ((struct ccb_scsiio *)ccb)->dxfer_len; 825 826 if (ccb->path) { 827 xpt_done ((union ccb *)ccb); 828 } else { 829 wakeup (ccb); 830 } 831 } 832 splx(s); 833} /* ASR_failActiveCommands */ 834 835/* 836 * The following command causes the HBA to reset the specific bus 837 */ 838static void 839ASR_resetBus(Asr_softc_t *sc, int bus) 840{ 841 I2O_HBA_BUS_RESET_MESSAGE Message; 842 I2O_HBA_BUS_RESET_MESSAGE *Message_Ptr; 843 PI2O_LCT_ENTRY Device; 844 845 Message_Ptr = (I2O_HBA_BUS_RESET_MESSAGE *)ASR_fillMessage(&Message, 846 sizeof(I2O_HBA_BUS_RESET_MESSAGE)); 847 I2O_MESSAGE_FRAME_setFunction(&Message_Ptr->StdMessageFrame, 848 I2O_HBA_BUS_RESET); 849 for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY) 850 (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT)); 851 ++Device) { 852 if (((Device->le_type & I2O_PORT) != 0) 853 && (Device->le_bus == bus)) { 854 I2O_MESSAGE_FRAME_setTargetAddress( 855 &Message_Ptr->StdMessageFrame, 856 I2O_LCT_ENTRY_getLocalTID(Device)); 857 /* Asynchronous command, with no expectations */ 858 (void)ASR_queue(sc, (PI2O_MESSAGE_FRAME)Message_Ptr); 859 break; 860 } 861 } 862} /* ASR_resetBus */ 863 864static __inline int 865ASR_getBlinkLedCode(Asr_softc_t *sc) 866{ 867 U8 blink; 868 869 if (sc == NULL) 870 return (0); 871 872 blink = bus_space_read_1(sc->ha_frame_btag, 873 sc->ha_frame_bhandle, sc->ha_blinkLED + 1); 874 if (blink != 0xBC) 875 return (0); 876 877 blink = bus_space_read_1(sc->ha_frame_btag, 878 sc->ha_frame_bhandle, sc->ha_blinkLED); 879 return (blink); 880} /* ASR_getBlinkCode */ 881 882/* 883 * Determine the address of an TID lookup. Must be done at high priority 884 * since the address can be changed by other threads of execution. 885 * 886 * Returns NULL pointer if not indexible (but will attempt to generate 887 * an index if `new_entry' flag is set to TRUE). 888 * 889 * All addressible entries are to be guaranteed zero if never initialized. 890 */ 891static tid_t * 892ASR_getTidAddress(Asr_softc_t *sc, int bus, int target, int lun, int new_entry) 893{ 894 target2lun_t *bus_ptr; 895 lun2tid_t *target_ptr; 896 unsigned new_size; 897 898 /* 899 * Validity checking of incoming parameters. More of a bound 900 * expansion limit than an issue with the code dealing with the 901 * values. 902 * 903 * sc must be valid before it gets here, so that check could be 904 * dropped if speed a critical issue. 905 */ 906 if ((sc == NULL) 907 || (bus > MAX_CHANNEL) 908 || (target > sc->ha_MaxId) 909 || (lun > sc->ha_MaxLun)) { 910 debug_asr_printf("(%lx,%d,%d,%d) target out of range\n", 911 (u_long)sc, bus, target, lun); 912 return (NULL); 913 } 914 /* 915 * See if there is an associated bus list. 916 * 917 * for performance, allocate in size of BUS_CHUNK chunks. 918 * BUS_CHUNK must be a power of two. This is to reduce 919 * fragmentation effects on the allocations. 920 */ 921#define BUS_CHUNK 8 922 new_size = ((target + BUS_CHUNK - 1) & ~(BUS_CHUNK - 1)); 923 if ((bus_ptr = sc->ha_targets[bus]) == NULL) { 924 /* 925 * Allocate a new structure? 926 * Since one element in structure, the +1 927 * needed for size has been abstracted. 928 */ 929 if ((new_entry == FALSE) 930 || ((sc->ha_targets[bus] = bus_ptr = (target2lun_t *)malloc ( 931 sizeof(*bus_ptr) + (sizeof(bus_ptr->LUN) * new_size), 932 M_TEMP, M_WAITOK | M_ZERO)) 933 == NULL)) { 934 debug_asr_printf("failed to allocate bus list\n"); 935 return (NULL); 936 } 937 bus_ptr->size = new_size + 1; 938 } else if (bus_ptr->size <= new_size) { 939 target2lun_t * new_bus_ptr; 940 941 /* 942 * Reallocate a new structure? 943 * Since one element in structure, the +1 944 * needed for size has been abstracted. 945 */ 946 if ((new_entry == FALSE) 947 || ((new_bus_ptr = (target2lun_t *)malloc ( 948 sizeof(*bus_ptr) + (sizeof(bus_ptr->LUN) * new_size), 949 M_TEMP, M_WAITOK | M_ZERO)) == NULL)) { 950 debug_asr_printf("failed to reallocate bus list\n"); 951 return (NULL); 952 } 953 /* 954 * Copy the whole thing, safer, simpler coding 955 * and not really performance critical at this point. 956 */ 957 bcopy(bus_ptr, new_bus_ptr, sizeof(*bus_ptr) 958 + (sizeof(bus_ptr->LUN) * (bus_ptr->size - 1))); 959 sc->ha_targets[bus] = new_bus_ptr; 960 free(bus_ptr, M_TEMP); 961 bus_ptr = new_bus_ptr; 962 bus_ptr->size = new_size + 1; 963 } 964 /* 965 * We now have the bus list, lets get to the target list. 966 * Since most systems have only *one* lun, we do not allocate 967 * in chunks as above, here we allow one, then in chunk sizes. 968 * TARGET_CHUNK must be a power of two. This is to reduce 969 * fragmentation effects on the allocations. 970 */ 971#define TARGET_CHUNK 8 972 if ((new_size = lun) != 0) { 973 new_size = ((lun + TARGET_CHUNK - 1) & ~(TARGET_CHUNK - 1)); 974 } 975 if ((target_ptr = bus_ptr->LUN[target]) == NULL) { 976 /* 977 * Allocate a new structure? 978 * Since one element in structure, the +1 979 * needed for size has been abstracted. 980 */ 981 if ((new_entry == FALSE) 982 || ((bus_ptr->LUN[target] = target_ptr = (lun2tid_t *)malloc ( 983 sizeof(*target_ptr) + (sizeof(target_ptr->TID) * new_size), 984 M_TEMP, M_WAITOK | M_ZERO)) == NULL)) { 985 debug_asr_printf("failed to allocate target list\n"); 986 return (NULL); 987 } 988 target_ptr->size = new_size + 1; 989 } else if (target_ptr->size <= new_size) { 990 lun2tid_t * new_target_ptr; 991 992 /* 993 * Reallocate a new structure? 994 * Since one element in structure, the +1 995 * needed for size has been abstracted. 996 */ 997 if ((new_entry == FALSE) 998 || ((new_target_ptr = (lun2tid_t *)malloc ( 999 sizeof(*target_ptr) + (sizeof(target_ptr->TID) * new_size), 1000 M_TEMP, M_WAITOK | M_ZERO)) == NULL)) { 1001 debug_asr_printf("failed to reallocate target list\n"); 1002 return (NULL); 1003 } 1004 /* 1005 * Copy the whole thing, safer, simpler coding 1006 * and not really performance critical at this point. 1007 */ 1008 bcopy(target_ptr, new_target_ptr, sizeof(*target_ptr) 1009 + (sizeof(target_ptr->TID) * (target_ptr->size - 1))); 1010 bus_ptr->LUN[target] = new_target_ptr; 1011 free(target_ptr, M_TEMP); 1012 target_ptr = new_target_ptr; 1013 target_ptr->size = new_size + 1; 1014 } 1015 /* 1016 * Now, acquire the TID address from the LUN indexed list. 1017 */ 1018 return (&(target_ptr->TID[lun])); 1019} /* ASR_getTidAddress */ 1020 1021/* 1022 * Get a pre-existing TID relationship. 1023 * 1024 * If the TID was never set, return (tid_t)-1. 1025 * 1026 * should use mutex rather than spl. 1027 */ 1028static __inline tid_t 1029ASR_getTid(Asr_softc_t *sc, int bus, int target, int lun) 1030{ 1031 tid_t *tid_ptr; 1032 int s; 1033 tid_t retval; 1034 1035 s = splcam(); 1036 if (((tid_ptr = ASR_getTidAddress(sc, bus, target, lun, FALSE)) == NULL) 1037 /* (tid_t)0 or (tid_t)-1 indicate no TID */ 1038 || (*tid_ptr == (tid_t)0)) { 1039 splx(s); 1040 return ((tid_t)-1); 1041 } 1042 retval = *tid_ptr; 1043 splx(s); 1044 return (retval); 1045} /* ASR_getTid */ 1046 1047/* 1048 * Set a TID relationship. 1049 * 1050 * If the TID was not set, return (tid_t)-1. 1051 * 1052 * should use mutex rather than spl. 1053 */ 1054static __inline tid_t 1055ASR_setTid(Asr_softc_t *sc, int bus, int target, int lun, tid_t TID) 1056{ 1057 tid_t *tid_ptr; 1058 int s; 1059 1060 if (TID != (tid_t)-1) { 1061 if (TID == 0) { 1062 return ((tid_t)-1); 1063 } 1064 s = splcam(); 1065 if ((tid_ptr = ASR_getTidAddress(sc, bus, target, lun, TRUE)) 1066 == NULL) { 1067 splx(s); 1068 return ((tid_t)-1); 1069 } 1070 *tid_ptr = TID; 1071 splx(s); 1072 } 1073 return (TID); 1074} /* ASR_setTid */ 1075 1076/*-------------------------------------------------------------------------*/ 1077/* Function ASR_rescan */ 1078/*-------------------------------------------------------------------------*/ 1079/* The Parameters Passed To This Function Are : */ 1080/* Asr_softc_t * : HBA miniport driver's adapter data storage. */ 1081/* */ 1082/* This Function Will rescan the adapter and resynchronize any data */ 1083/* */ 1084/* Return : 0 For OK, Error Code Otherwise */ 1085/*-------------------------------------------------------------------------*/ 1086 1087static int 1088ASR_rescan(Asr_softc_t *sc) 1089{ 1090 int bus; 1091 int error; 1092 1093 /* 1094 * Re-acquire the LCT table and synchronize us to the adapter. 1095 */ 1096 if ((error = ASR_acquireLct(sc)) == 0) { 1097 error = ASR_acquireHrt(sc); 1098 } 1099 1100 if (error != 0) { 1101 return error; 1102 } 1103 1104 bus = sc->ha_MaxBus; 1105 /* Reset all existing cached TID lookups */ 1106 do { 1107 int target, event = 0; 1108 1109 /* 1110 * Scan for all targets on this bus to see if they 1111 * got affected by the rescan. 1112 */ 1113 for (target = 0; target <= sc->ha_MaxId; ++target) { 1114 int lun; 1115 1116 /* Stay away from the controller ID */ 1117 if (target == sc->ha_adapter_target[bus]) { 1118 continue; 1119 } 1120 for (lun = 0; lun <= sc->ha_MaxLun; ++lun) { 1121 PI2O_LCT_ENTRY Device; 1122 tid_t TID = (tid_t)-1; 1123 tid_t LastTID; 1124 1125 /* 1126 * See if the cached TID changed. Search for 1127 * the device in our new LCT. 1128 */ 1129 for (Device = sc->ha_LCT->LCTEntry; 1130 Device < (PI2O_LCT_ENTRY)(((U32 *)sc->ha_LCT) 1131 + I2O_LCT_getTableSize(sc->ha_LCT)); 1132 ++Device) { 1133 if ((Device->le_type != I2O_UNKNOWN) 1134 && (Device->le_bus == bus) 1135 && (Device->le_target == target) 1136 && (Device->le_lun == lun) 1137 && (I2O_LCT_ENTRY_getUserTID(Device) 1138 == 0xFFF)) { 1139 TID = I2O_LCT_ENTRY_getLocalTID( 1140 Device); 1141 break; 1142 } 1143 } 1144 /* 1145 * Indicate to the OS that the label needs 1146 * to be recalculated, or that the specific 1147 * open device is no longer valid (Merde) 1148 * because the cached TID changed. 1149 */ 1150 LastTID = ASR_getTid (sc, bus, target, lun); 1151 if (LastTID != TID) { 1152 struct cam_path * path; 1153 1154 if (xpt_create_path(&path, 1155 /*periph*/NULL, 1156 cam_sim_path(sc->ha_sim[bus]), 1157 target, lun) != CAM_REQ_CMP) { 1158 if (TID == (tid_t)-1) { 1159 event |= AC_LOST_DEVICE; 1160 } else { 1161 event |= AC_INQ_CHANGED 1162 | AC_GETDEV_CHANGED; 1163 } 1164 } else { 1165 if (TID == (tid_t)-1) { 1166 xpt_async( 1167 AC_LOST_DEVICE, 1168 path, NULL); 1169 } else if (LastTID == (tid_t)-1) { 1170 struct ccb_getdev ccb; 1171 1172 xpt_setup_ccb( 1173 &(ccb.ccb_h), 1174 path, /*priority*/5); 1175 xpt_async( 1176 AC_FOUND_DEVICE, 1177 path, 1178 &ccb); 1179 } else { 1180 xpt_async( 1181 AC_INQ_CHANGED, 1182 path, NULL); 1183 xpt_async( 1184 AC_GETDEV_CHANGED, 1185 path, NULL); 1186 } 1187 } 1188 } 1189 /* 1190 * We have the option of clearing the 1191 * cached TID for it to be rescanned, or to 1192 * set it now even if the device never got 1193 * accessed. We chose the later since we 1194 * currently do not use the condition that 1195 * the TID ever got cached. 1196 */ 1197 ASR_setTid (sc, bus, target, lun, TID); 1198 } 1199 } 1200 /* 1201 * The xpt layer can not handle multiple events at the 1202 * same call. 1203 */ 1204 if (event & AC_LOST_DEVICE) { 1205 xpt_async(AC_LOST_DEVICE, sc->ha_path[bus], NULL); 1206 } 1207 if (event & AC_INQ_CHANGED) { 1208 xpt_async(AC_INQ_CHANGED, sc->ha_path[bus], NULL); 1209 } 1210 if (event & AC_GETDEV_CHANGED) { 1211 xpt_async(AC_GETDEV_CHANGED, sc->ha_path[bus], NULL); 1212 } 1213 } while (--bus >= 0); 1214 return (error); 1215} /* ASR_rescan */ 1216 1217/*-------------------------------------------------------------------------*/ 1218/* Function ASR_reset */ 1219/*-------------------------------------------------------------------------*/ 1220/* The Parameters Passed To This Function Are : */ 1221/* Asr_softc_t * : HBA miniport driver's adapter data storage. */ 1222/* */ 1223/* This Function Will reset the adapter and resynchronize any data */ 1224/* */ 1225/* Return : None */ 1226/*-------------------------------------------------------------------------*/ 1227 1228static int 1229ASR_reset(Asr_softc_t *sc) 1230{ 1231 int s, retVal; 1232 1233 s = splcam(); 1234 if ((sc->ha_in_reset == HA_IN_RESET) 1235 || (sc->ha_in_reset == HA_OFF_LINE_RECOVERY)) { 1236 splx (s); 1237 return (EBUSY); 1238 } 1239 /* 1240 * Promotes HA_OPERATIONAL to HA_IN_RESET, 1241 * or HA_OFF_LINE to HA_OFF_LINE_RECOVERY. 1242 */ 1243 ++(sc->ha_in_reset); 1244 if (ASR_resetIOP(sc) == 0) { 1245 debug_asr_printf ("ASR_resetIOP failed\n"); 1246 /* 1247 * We really need to take this card off-line, easier said 1248 * than make sense. Better to keep retrying for now since if a 1249 * UART cable is connected the blinkLEDs the adapter is now in 1250 * a hard state requiring action from the monitor commands to 1251 * the HBA to continue. For debugging waiting forever is a 1252 * good thing. In a production system, however, one may wish 1253 * to instead take the card off-line ... 1254 */ 1255 /* Wait Forever */ 1256 while (ASR_resetIOP(sc) == 0); 1257 } 1258 retVal = ASR_init (sc); 1259 splx (s); 1260 if (retVal != 0) { 1261 debug_asr_printf ("ASR_init failed\n"); 1262 sc->ha_in_reset = HA_OFF_LINE; 1263 return (ENXIO); 1264 } 1265 if (ASR_rescan (sc) != 0) { 1266 debug_asr_printf ("ASR_rescan failed\n"); 1267 } 1268 ASR_failActiveCommands (sc); 1269 if (sc->ha_in_reset == HA_OFF_LINE_RECOVERY) { 1270 printf ("asr%d: Brining adapter back on-line\n", 1271 sc->ha_path[0] 1272 ? cam_sim_unit(xpt_path_sim(sc->ha_path[0])) 1273 : 0); 1274 } 1275 sc->ha_in_reset = HA_OPERATIONAL; 1276 return (0); 1277} /* ASR_reset */ 1278 1279/* 1280 * Device timeout handler. 1281 */ 1282static void 1283asr_timeout(void *arg) 1284{ 1285 union asr_ccb *ccb = (union asr_ccb *)arg; 1286 Asr_softc_t *sc = (Asr_softc_t *)(ccb->ccb_h.spriv_ptr0); 1287 int s; 1288 1289 debug_asr_print_path(ccb); 1290 debug_asr_printf("timed out"); 1291 1292 /* 1293 * Check if the adapter has locked up? 1294 */ 1295 if ((s = ASR_getBlinkLedCode(sc)) != 0) { 1296 /* Reset Adapter */ 1297 printf ("asr%d: Blink LED 0x%x resetting adapter\n", 1298 cam_sim_unit(xpt_path_sim(ccb->ccb_h.path)), s); 1299 if (ASR_reset (sc) == ENXIO) { 1300 /* Try again later */ 1301 ccb->ccb_h.timeout_ch = timeout(asr_timeout, 1302 (caddr_t)ccb, 1303 (ccb->ccb_h.timeout * hz) / 1000); 1304 } 1305 return; 1306 } 1307 /* 1308 * Abort does not function on the ASR card!!! Walking away from 1309 * the SCSI command is also *very* dangerous. A SCSI BUS reset is 1310 * our best bet, followed by a complete adapter reset if that fails. 1311 */ 1312 s = splcam(); 1313 /* Check if we already timed out once to raise the issue */ 1314 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_CMD_TIMEOUT) { 1315 debug_asr_printf (" AGAIN\nreinitializing adapter\n"); 1316 if (ASR_reset (sc) == ENXIO) { 1317 ccb->ccb_h.timeout_ch = timeout(asr_timeout, 1318 (caddr_t)ccb, 1319 (ccb->ccb_h.timeout * hz) / 1000); 1320 } 1321 splx(s); 1322 return; 1323 } 1324 debug_asr_printf ("\nresetting bus\n"); 1325 /* If the BUS reset does not take, then an adapter reset is next! */ 1326 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 1327 ccb->ccb_h.status |= CAM_CMD_TIMEOUT; 1328 ccb->ccb_h.timeout_ch = timeout(asr_timeout, (caddr_t)ccb, 1329 (ccb->ccb_h.timeout * hz) / 1000); 1330 ASR_resetBus (sc, cam_sim_bus(xpt_path_sim(ccb->ccb_h.path))); 1331 xpt_async (AC_BUS_RESET, ccb->ccb_h.path, NULL); 1332 splx(s); 1333} /* asr_timeout */ 1334 1335/* 1336 * send a message asynchronously 1337 */ 1338static int 1339ASR_queue(Asr_softc_t *sc, PI2O_MESSAGE_FRAME Message) 1340{ 1341 U32 MessageOffset; 1342 union asr_ccb *ccb; 1343 1344 debug_asr_printf("Host Command Dump:\n"); 1345 debug_asr_dump_message(Message); 1346 1347 ccb = (union asr_ccb *)(long) 1348 I2O_MESSAGE_FRAME_getInitiatorContext64(Message); 1349 1350 if ((MessageOffset = ASR_getMessage(sc)) != EMPTY_QUEUE) { 1351 asr_set_frame(sc, Message, MessageOffset, 1352 I2O_MESSAGE_FRAME_getMessageSize(Message)); 1353 if (ccb) { 1354 ASR_ccbAdd (sc, ccb); 1355 } 1356 /* Post the command */ 1357 asr_set_ToFIFO(sc, MessageOffset); 1358 } else { 1359 if (ASR_getBlinkLedCode(sc)) { 1360 /* 1361 * Unlikely we can do anything if we can't grab a 1362 * message frame :-(, but lets give it a try. 1363 */ 1364 (void)ASR_reset(sc); 1365 } 1366 } 1367 return (MessageOffset); 1368} /* ASR_queue */ 1369 1370 1371/* Simple Scatter Gather elements */ 1372#define SG(SGL,Index,Flags,Buffer,Size) \ 1373 I2O_FLAGS_COUNT_setCount( \ 1374 &(((PI2O_SG_ELEMENT)(SGL))->u.Simple[Index].FlagsCount), \ 1375 Size); \ 1376 I2O_FLAGS_COUNT_setFlags( \ 1377 &(((PI2O_SG_ELEMENT)(SGL))->u.Simple[Index].FlagsCount), \ 1378 I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT | (Flags)); \ 1379 I2O_SGE_SIMPLE_ELEMENT_setPhysicalAddress( \ 1380 &(((PI2O_SG_ELEMENT)(SGL))->u.Simple[Index]), \ 1381 (Buffer == NULL) ? 0 : KVTOPHYS(Buffer)) 1382 1383/* 1384 * Retrieve Parameter Group. 1385 */ 1386static void * 1387ASR_getParams(Asr_softc_t *sc, tid_t TID, int Group, void *Buffer, 1388 unsigned BufferSize) 1389{ 1390 struct paramGetMessage { 1391 I2O_UTIL_PARAMS_GET_MESSAGE M; 1392 char 1393 F[sizeof(I2O_SGE_SIMPLE_ELEMENT)*2 - sizeof(I2O_SG_ELEMENT)]; 1394 struct Operations { 1395 I2O_PARAM_OPERATIONS_LIST_HEADER Header; 1396 I2O_PARAM_OPERATION_ALL_TEMPLATE Template[1]; 1397 } O; 1398 } Message; 1399 struct Operations *Operations_Ptr; 1400 I2O_UTIL_PARAMS_GET_MESSAGE *Message_Ptr; 1401 struct ParamBuffer { 1402 I2O_PARAM_RESULTS_LIST_HEADER Header; 1403 I2O_PARAM_READ_OPERATION_RESULT Read; 1404 char Info[1]; 1405 } *Buffer_Ptr; 1406 1407 Message_Ptr = (I2O_UTIL_PARAMS_GET_MESSAGE *)ASR_fillMessage(&Message, 1408 sizeof(I2O_UTIL_PARAMS_GET_MESSAGE) 1409 + sizeof(I2O_SGE_SIMPLE_ELEMENT)*2 - sizeof(I2O_SG_ELEMENT)); 1410 Operations_Ptr = (struct Operations *)((char *)Message_Ptr 1411 + sizeof(I2O_UTIL_PARAMS_GET_MESSAGE) 1412 + sizeof(I2O_SGE_SIMPLE_ELEMENT)*2 - sizeof(I2O_SG_ELEMENT)); 1413 bzero(Operations_Ptr, sizeof(struct Operations)); 1414 I2O_PARAM_OPERATIONS_LIST_HEADER_setOperationCount( 1415 &(Operations_Ptr->Header), 1); 1416 I2O_PARAM_OPERATION_ALL_TEMPLATE_setOperation( 1417 &(Operations_Ptr->Template[0]), I2O_PARAMS_OPERATION_FIELD_GET); 1418 I2O_PARAM_OPERATION_ALL_TEMPLATE_setFieldCount( 1419 &(Operations_Ptr->Template[0]), 0xFFFF); 1420 I2O_PARAM_OPERATION_ALL_TEMPLATE_setGroupNumber( 1421 &(Operations_Ptr->Template[0]), Group); 1422 Buffer_Ptr = (struct ParamBuffer *)Buffer; 1423 bzero(Buffer_Ptr, BufferSize); 1424 1425 I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame), 1426 I2O_VERSION_11 1427 + (((sizeof(I2O_UTIL_PARAMS_GET_MESSAGE) - sizeof(I2O_SG_ELEMENT)) 1428 / sizeof(U32)) << 4)); 1429 I2O_MESSAGE_FRAME_setTargetAddress (&(Message_Ptr->StdMessageFrame), 1430 TID); 1431 I2O_MESSAGE_FRAME_setFunction (&(Message_Ptr->StdMessageFrame), 1432 I2O_UTIL_PARAMS_GET); 1433 /* 1434 * Set up the buffers as scatter gather elements. 1435 */ 1436 SG(&(Message_Ptr->SGL), 0, 1437 I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_END_OF_BUFFER, 1438 Operations_Ptr, sizeof(struct Operations)); 1439 SG(&(Message_Ptr->SGL), 1, 1440 I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER, 1441 Buffer_Ptr, BufferSize); 1442 1443 if ((ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr) == CAM_REQ_CMP) 1444 && (Buffer_Ptr->Header.ResultCount)) { 1445 return ((void *)(Buffer_Ptr->Info)); 1446 } 1447 return (NULL); 1448} /* ASR_getParams */ 1449 1450/* 1451 * Acquire the LCT information. 1452 */ 1453static int 1454ASR_acquireLct(Asr_softc_t *sc) 1455{ 1456 PI2O_EXEC_LCT_NOTIFY_MESSAGE Message_Ptr; 1457 PI2O_SGE_SIMPLE_ELEMENT sg; 1458 int MessageSizeInBytes; 1459 caddr_t v; 1460 int len; 1461 I2O_LCT Table; 1462 PI2O_LCT_ENTRY Entry; 1463 1464 /* 1465 * sc value assumed valid 1466 */ 1467 MessageSizeInBytes = sizeof(I2O_EXEC_LCT_NOTIFY_MESSAGE) - 1468 sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT); 1469 if ((Message_Ptr = (PI2O_EXEC_LCT_NOTIFY_MESSAGE)malloc( 1470 MessageSizeInBytes, M_TEMP, M_WAITOK)) == NULL) { 1471 return (ENOMEM); 1472 } 1473 (void)ASR_fillMessage((void *)Message_Ptr, MessageSizeInBytes); 1474 I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame), 1475 (I2O_VERSION_11 + (((sizeof(I2O_EXEC_LCT_NOTIFY_MESSAGE) - 1476 sizeof(I2O_SG_ELEMENT)) / sizeof(U32)) << 4))); 1477 I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame), 1478 I2O_EXEC_LCT_NOTIFY); 1479 I2O_EXEC_LCT_NOTIFY_MESSAGE_setClassIdentifier(Message_Ptr, 1480 I2O_CLASS_MATCH_ANYCLASS); 1481 /* 1482 * Call the LCT table to determine the number of device entries 1483 * to reserve space for. 1484 */ 1485 SG(&(Message_Ptr->SGL), 0, 1486 I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER, &Table, 1487 sizeof(I2O_LCT)); 1488 /* 1489 * since this code is reused in several systems, code efficiency 1490 * is greater by using a shift operation rather than a divide by 1491 * sizeof(u_int32_t). 1492 */ 1493 I2O_LCT_setTableSize(&Table, 1494 (sizeof(I2O_LCT) - sizeof(I2O_LCT_ENTRY)) >> 2); 1495 (void)ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr); 1496 /* 1497 * Determine the size of the LCT table. 1498 */ 1499 if (sc->ha_LCT) { 1500 free(sc->ha_LCT, M_TEMP); 1501 } 1502 /* 1503 * malloc only generates contiguous memory when less than a 1504 * page is expected. We must break the request up into an SG list ... 1505 */ 1506 if (((len = (I2O_LCT_getTableSize(&Table) << 2)) <= 1507 (sizeof(I2O_LCT) - sizeof(I2O_LCT_ENTRY))) 1508 || (len > (128 * 1024))) { /* Arbitrary */ 1509 free(Message_Ptr, M_TEMP); 1510 return (EINVAL); 1511 } 1512 if ((sc->ha_LCT = (PI2O_LCT)malloc (len, M_TEMP, M_WAITOK)) == NULL) { 1513 free(Message_Ptr, M_TEMP); 1514 return (ENOMEM); 1515 } 1516 /* 1517 * since this code is reused in several systems, code efficiency 1518 * is greater by using a shift operation rather than a divide by 1519 * sizeof(u_int32_t). 1520 */ 1521 I2O_LCT_setTableSize(sc->ha_LCT, 1522 (sizeof(I2O_LCT) - sizeof(I2O_LCT_ENTRY)) >> 2); 1523 /* 1524 * Convert the access to the LCT table into a SG list. 1525 */ 1526 sg = Message_Ptr->SGL.u.Simple; 1527 v = (caddr_t)(sc->ha_LCT); 1528 for (;;) { 1529 int next, base, span; 1530 1531 span = 0; 1532 next = base = KVTOPHYS(v); 1533 I2O_SGE_SIMPLE_ELEMENT_setPhysicalAddress(sg, base); 1534 1535 /* How far can we go contiguously */ 1536 while ((len > 0) && (base == next)) { 1537 int size; 1538 1539 next = trunc_page(base) + PAGE_SIZE; 1540 size = next - base; 1541 if (size > len) { 1542 size = len; 1543 } 1544 span += size; 1545 v += size; 1546 len -= size; 1547 base = KVTOPHYS(v); 1548 } 1549 1550 /* Construct the Flags */ 1551 I2O_FLAGS_COUNT_setCount(&(sg->FlagsCount), span); 1552 { 1553 int rw = I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT; 1554 if (len <= 0) { 1555 rw = (I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT 1556 | I2O_SGL_FLAGS_LAST_ELEMENT 1557 | I2O_SGL_FLAGS_END_OF_BUFFER); 1558 } 1559 I2O_FLAGS_COUNT_setFlags(&(sg->FlagsCount), rw); 1560 } 1561 1562 if (len <= 0) { 1563 break; 1564 } 1565 1566 /* 1567 * Incrementing requires resizing of the packet. 1568 */ 1569 ++sg; 1570 MessageSizeInBytes += sizeof(*sg); 1571 I2O_MESSAGE_FRAME_setMessageSize( 1572 &(Message_Ptr->StdMessageFrame), 1573 I2O_MESSAGE_FRAME_getMessageSize( 1574 &(Message_Ptr->StdMessageFrame)) 1575 + (sizeof(*sg) / sizeof(U32))); 1576 { 1577 PI2O_EXEC_LCT_NOTIFY_MESSAGE NewMessage_Ptr; 1578 1579 if ((NewMessage_Ptr = (PI2O_EXEC_LCT_NOTIFY_MESSAGE) 1580 malloc(MessageSizeInBytes, M_TEMP, M_WAITOK)) 1581 == NULL) { 1582 free(sc->ha_LCT, M_TEMP); 1583 sc->ha_LCT = NULL; 1584 free(Message_Ptr, M_TEMP); 1585 return (ENOMEM); 1586 } 1587 span = ((caddr_t)sg) - (caddr_t)Message_Ptr; 1588 bcopy(Message_Ptr, NewMessage_Ptr, span); 1589 free(Message_Ptr, M_TEMP); 1590 sg = (PI2O_SGE_SIMPLE_ELEMENT) 1591 (((caddr_t)NewMessage_Ptr) + span); 1592 Message_Ptr = NewMessage_Ptr; 1593 } 1594 } 1595 { int retval; 1596 1597 retval = ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr); 1598 free(Message_Ptr, M_TEMP); 1599 if (retval != CAM_REQ_CMP) { 1600 return (ENODEV); 1601 } 1602 } 1603 /* If the LCT table grew, lets truncate accesses */ 1604 if (I2O_LCT_getTableSize(&Table) < I2O_LCT_getTableSize(sc->ha_LCT)) { 1605 I2O_LCT_setTableSize(sc->ha_LCT, I2O_LCT_getTableSize(&Table)); 1606 } 1607 for (Entry = sc->ha_LCT->LCTEntry; Entry < (PI2O_LCT_ENTRY) 1608 (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT)); 1609 ++Entry) { 1610 Entry->le_type = I2O_UNKNOWN; 1611 switch (I2O_CLASS_ID_getClass(&(Entry->ClassID))) { 1612 1613 case I2O_CLASS_RANDOM_BLOCK_STORAGE: 1614 Entry->le_type = I2O_BSA; 1615 break; 1616 1617 case I2O_CLASS_SCSI_PERIPHERAL: 1618 Entry->le_type = I2O_SCSI; 1619 break; 1620 1621 case I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL: 1622 Entry->le_type = I2O_FCA; 1623 break; 1624 1625 case I2O_CLASS_BUS_ADAPTER_PORT: 1626 Entry->le_type = I2O_PORT | I2O_SCSI; 1627 /* FALLTHRU */ 1628 case I2O_CLASS_FIBRE_CHANNEL_PORT: 1629 if (I2O_CLASS_ID_getClass(&(Entry->ClassID)) == 1630 I2O_CLASS_FIBRE_CHANNEL_PORT) { 1631 Entry->le_type = I2O_PORT | I2O_FCA; 1632 } 1633 { struct ControllerInfo { 1634 I2O_PARAM_RESULTS_LIST_HEADER Header; 1635 I2O_PARAM_READ_OPERATION_RESULT Read; 1636 I2O_HBA_SCSI_CONTROLLER_INFO_SCALAR Info; 1637 } Buffer; 1638 PI2O_HBA_SCSI_CONTROLLER_INFO_SCALAR Info; 1639 1640 Entry->le_bus = 0xff; 1641 Entry->le_target = 0xff; 1642 Entry->le_lun = 0xff; 1643 1644 if ((Info = (PI2O_HBA_SCSI_CONTROLLER_INFO_SCALAR) 1645 ASR_getParams(sc, 1646 I2O_LCT_ENTRY_getLocalTID(Entry), 1647 I2O_HBA_SCSI_CONTROLLER_INFO_GROUP_NO, 1648 &Buffer, sizeof(struct ControllerInfo))) == NULL) { 1649 continue; 1650 } 1651 Entry->le_target 1652 = I2O_HBA_SCSI_CONTROLLER_INFO_SCALAR_getInitiatorID( 1653 Info); 1654 Entry->le_lun = 0; 1655 } /* FALLTHRU */ 1656 default: 1657 continue; 1658 } 1659 { struct DeviceInfo { 1660 I2O_PARAM_RESULTS_LIST_HEADER Header; 1661 I2O_PARAM_READ_OPERATION_RESULT Read; 1662 I2O_DPT_DEVICE_INFO_SCALAR Info; 1663 } Buffer; 1664 PI2O_DPT_DEVICE_INFO_SCALAR Info; 1665 1666 Entry->le_bus = 0xff; 1667 Entry->le_target = 0xff; 1668 Entry->le_lun = 0xff; 1669 1670 if ((Info = (PI2O_DPT_DEVICE_INFO_SCALAR) 1671 ASR_getParams(sc, 1672 I2O_LCT_ENTRY_getLocalTID(Entry), 1673 I2O_DPT_DEVICE_INFO_GROUP_NO, 1674 &Buffer, sizeof(struct DeviceInfo))) == NULL) { 1675 continue; 1676 } 1677 Entry->le_type 1678 |= I2O_DPT_DEVICE_INFO_SCALAR_getDeviceType(Info); 1679 Entry->le_bus 1680 = I2O_DPT_DEVICE_INFO_SCALAR_getBus(Info); 1681 if ((Entry->le_bus > sc->ha_MaxBus) 1682 && (Entry->le_bus <= MAX_CHANNEL)) { 1683 sc->ha_MaxBus = Entry->le_bus; 1684 } 1685 Entry->le_target 1686 = I2O_DPT_DEVICE_INFO_SCALAR_getIdentifier(Info); 1687 Entry->le_lun 1688 = I2O_DPT_DEVICE_INFO_SCALAR_getLunInfo(Info); 1689 } 1690 } 1691 /* 1692 * A zero return value indicates success. 1693 */ 1694 return (0); 1695} /* ASR_acquireLct */ 1696 1697/* 1698 * Initialize a message frame. 1699 * We assume that the CDB has already been set up, so all we do here is 1700 * generate the Scatter Gather list. 1701 */ 1702static PI2O_MESSAGE_FRAME 1703ASR_init_message(union asr_ccb *ccb, PI2O_MESSAGE_FRAME Message) 1704{ 1705 PI2O_MESSAGE_FRAME Message_Ptr; 1706 PI2O_SGE_SIMPLE_ELEMENT sg; 1707 Asr_softc_t *sc = (Asr_softc_t *)(ccb->ccb_h.spriv_ptr0); 1708 vm_size_t size, len; 1709 caddr_t v; 1710 U32 MessageSize; 1711 int next, span, base, rw; 1712 int target = ccb->ccb_h.target_id; 1713 int lun = ccb->ccb_h.target_lun; 1714 int bus =cam_sim_bus(xpt_path_sim(ccb->ccb_h.path)); 1715 tid_t TID; 1716 1717 /* We only need to zero out the PRIVATE_SCSI_SCB_EXECUTE_MESSAGE */ 1718 Message_Ptr = (I2O_MESSAGE_FRAME *)Message; 1719 bzero(Message_Ptr, (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) - 1720 sizeof(I2O_SG_ELEMENT))); 1721 1722 if ((TID = ASR_getTid (sc, bus, target, lun)) == (tid_t)-1) { 1723 PI2O_LCT_ENTRY Device; 1724 1725 TID = 0; 1726 for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY) 1727 (((U32 *)sc->ha_LCT) + I2O_LCT_getTableSize(sc->ha_LCT)); 1728 ++Device) { 1729 if ((Device->le_type != I2O_UNKNOWN) 1730 && (Device->le_bus == bus) 1731 && (Device->le_target == target) 1732 && (Device->le_lun == lun) 1733 && (I2O_LCT_ENTRY_getUserTID(Device) == 0xFFF)) { 1734 TID = I2O_LCT_ENTRY_getLocalTID(Device); 1735 ASR_setTid(sc, Device->le_bus, 1736 Device->le_target, Device->le_lun, 1737 TID); 1738 break; 1739 } 1740 } 1741 } 1742 if (TID == (tid_t)0) { 1743 return (NULL); 1744 } 1745 I2O_MESSAGE_FRAME_setTargetAddress(Message_Ptr, TID); 1746 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setTID( 1747 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr, TID); 1748 I2O_MESSAGE_FRAME_setVersionOffset(Message_Ptr, I2O_VERSION_11 | 1749 (((sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) - sizeof(I2O_SG_ELEMENT)) 1750 / sizeof(U32)) << 4)); 1751 I2O_MESSAGE_FRAME_setMessageSize(Message_Ptr, 1752 (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) 1753 - sizeof(I2O_SG_ELEMENT)) / sizeof(U32)); 1754 I2O_MESSAGE_FRAME_setInitiatorAddress (Message_Ptr, 1); 1755 I2O_MESSAGE_FRAME_setFunction(Message_Ptr, I2O_PRIVATE_MESSAGE); 1756 I2O_PRIVATE_MESSAGE_FRAME_setXFunctionCode ( 1757 (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr, I2O_SCSI_SCB_EXEC); 1758 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags ( 1759 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr, 1760 I2O_SCB_FLAG_ENABLE_DISCONNECT 1761 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG 1762 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER); 1763 /* 1764 * We do not need any (optional byteswapping) method access to 1765 * the Initiator & Transaction context field. 1766 */ 1767 I2O_MESSAGE_FRAME_setInitiatorContext64(Message, (long)ccb); 1768 1769 I2O_PRIVATE_MESSAGE_FRAME_setOrganizationID( 1770 (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr, DPT_ORGANIZATION_ID); 1771 /* 1772 * copy the cdb over 1773 */ 1774 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setCDBLength( 1775 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr, ccb->csio.cdb_len); 1776 bcopy(&(ccb->csio.cdb_io), 1777 ((PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr)->CDB, 1778 ccb->csio.cdb_len); 1779 1780 /* 1781 * Given a buffer describing a transfer, set up a scatter/gather map 1782 * in a ccb to map that SCSI transfer. 1783 */ 1784 1785 rw = (ccb->ccb_h.flags & CAM_DIR_IN) ? 0 : I2O_SGL_FLAGS_DIR; 1786 1787 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags ( 1788 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr, 1789 (ccb->csio.dxfer_len) 1790 ? ((rw) ? (I2O_SCB_FLAG_XFER_TO_DEVICE 1791 | I2O_SCB_FLAG_ENABLE_DISCONNECT 1792 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG 1793 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER) 1794 : (I2O_SCB_FLAG_XFER_FROM_DEVICE 1795 | I2O_SCB_FLAG_ENABLE_DISCONNECT 1796 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG 1797 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER)) 1798 : (I2O_SCB_FLAG_ENABLE_DISCONNECT 1799 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG 1800 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER)); 1801 1802 /* 1803 * Given a transfer described by a `data', fill in the SG list. 1804 */ 1805 sg = &((PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr)->SGL.u.Simple[0]; 1806 1807 len = ccb->csio.dxfer_len; 1808 v = ccb->csio.data_ptr; 1809 KASSERT(ccb->csio.dxfer_len >= 0, ("csio.dxfer_len < 0")); 1810 MessageSize = I2O_MESSAGE_FRAME_getMessageSize(Message_Ptr); 1811 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setByteCount( 1812 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr, len); 1813 while ((len > 0) && (sg < &((PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE) 1814 Message_Ptr)->SGL.u.Simple[SG_SIZE])) { 1815 span = 0; 1816 next = base = KVTOPHYS(v); 1817 I2O_SGE_SIMPLE_ELEMENT_setPhysicalAddress(sg, base); 1818 1819 /* How far can we go contiguously */ 1820 while ((len > 0) && (base == next)) { 1821 next = trunc_page(base) + PAGE_SIZE; 1822 size = next - base; 1823 if (size > len) { 1824 size = len; 1825 } 1826 span += size; 1827 v += size; 1828 len -= size; 1829 base = KVTOPHYS(v); 1830 } 1831 1832 I2O_FLAGS_COUNT_setCount(&(sg->FlagsCount), span); 1833 if (len == 0) { 1834 rw |= I2O_SGL_FLAGS_LAST_ELEMENT; 1835 } 1836 I2O_FLAGS_COUNT_setFlags(&(sg->FlagsCount), 1837 I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT | rw); 1838 ++sg; 1839 MessageSize += sizeof(*sg) / sizeof(U32); 1840 } 1841 /* We always do the request sense ... */ 1842 if ((span = ccb->csio.sense_len) == 0) { 1843 span = sizeof(ccb->csio.sense_data); 1844 } 1845 SG(sg, 0, I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER, 1846 &(ccb->csio.sense_data), span); 1847 I2O_MESSAGE_FRAME_setMessageSize(Message_Ptr, 1848 MessageSize + (sizeof(*sg) / sizeof(U32))); 1849 return (Message_Ptr); 1850} /* ASR_init_message */ 1851 1852/* 1853 * Reset the adapter. 1854 */ 1855static U32 1856ASR_initOutBound(Asr_softc_t *sc) 1857{ 1858 struct initOutBoundMessage { 1859 I2O_EXEC_OUTBOUND_INIT_MESSAGE M; 1860 U32 R; 1861 } Message; 1862 PI2O_EXEC_OUTBOUND_INIT_MESSAGE Message_Ptr; 1863 U32 *volatile Reply_Ptr; 1864 U32 Old; 1865 1866 /* 1867 * Build up our copy of the Message. 1868 */ 1869 Message_Ptr = (PI2O_EXEC_OUTBOUND_INIT_MESSAGE)ASR_fillMessage(&Message, 1870 sizeof(I2O_EXEC_OUTBOUND_INIT_MESSAGE)); 1871 I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame), 1872 I2O_EXEC_OUTBOUND_INIT); 1873 I2O_EXEC_OUTBOUND_INIT_MESSAGE_setHostPageFrameSize(Message_Ptr, PAGE_SIZE); 1874 I2O_EXEC_OUTBOUND_INIT_MESSAGE_setOutboundMFrameSize(Message_Ptr, 1875 sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)); 1876 /* 1877 * Reset the Reply Status 1878 */ 1879 *(Reply_Ptr = (U32 *)((char *)Message_Ptr 1880 + sizeof(I2O_EXEC_OUTBOUND_INIT_MESSAGE))) = 0; 1881 SG (&(Message_Ptr->SGL), 0, I2O_SGL_FLAGS_LAST_ELEMENT, Reply_Ptr, 1882 sizeof(U32)); 1883 /* 1884 * Send the Message out 1885 */ 1886 if ((Old = ASR_initiateCp(sc, 1887 (PI2O_MESSAGE_FRAME)Message_Ptr)) != -1L) { 1888 u_long size, addr; 1889 1890 /* 1891 * Wait for a response (Poll). 1892 */ 1893 while (*Reply_Ptr < I2O_EXEC_OUTBOUND_INIT_REJECTED); 1894 /* 1895 * Re-enable the interrupts. 1896 */ 1897 asr_set_intr(sc, Old); 1898 /* 1899 * Populate the outbound table. 1900 */ 1901 if (sc->ha_Msgs == NULL) { 1902 1903 /* Allocate the reply frames */ 1904 size = sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME) 1905 * sc->ha_Msgs_Count; 1906 1907 /* 1908 * contigmalloc only works reliably at 1909 * initialization time. 1910 */ 1911 if ((sc->ha_Msgs = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME) 1912 contigmalloc (size, M_DEVBUF, M_WAITOK, 0ul, 1913 0xFFFFFFFFul, (u_long)sizeof(U32), 0ul)) != NULL) { 1914 bzero(sc->ha_Msgs, size); 1915 sc->ha_Msgs_Phys = KVTOPHYS(sc->ha_Msgs); 1916 } 1917 } 1918 1919 /* Initialize the outbound FIFO */ 1920 if (sc->ha_Msgs != NULL) 1921 for(size = sc->ha_Msgs_Count, addr = sc->ha_Msgs_Phys; 1922 size; --size) { 1923 asr_set_FromFIFO(sc, addr); 1924 addr += sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME); 1925 } 1926 return (*Reply_Ptr); 1927 } 1928 return (0); 1929} /* ASR_initOutBound */ 1930 1931/* 1932 * Set the system table 1933 */ 1934static int 1935ASR_setSysTab(Asr_softc_t *sc) 1936{ 1937 PI2O_EXEC_SYS_TAB_SET_MESSAGE Message_Ptr; 1938 PI2O_SET_SYSTAB_HEADER SystemTable; 1939 Asr_softc_t * ha; 1940 PI2O_SGE_SIMPLE_ELEMENT sg; 1941 int retVal; 1942 1943 if ((SystemTable = (PI2O_SET_SYSTAB_HEADER)malloc ( 1944 sizeof(I2O_SET_SYSTAB_HEADER), M_TEMP, M_WAITOK | M_ZERO)) == NULL) { 1945 return (ENOMEM); 1946 } 1947 for (ha = Asr_softc; ha; ha = ha->ha_next) { 1948 ++SystemTable->NumberEntries; 1949 } 1950 if ((Message_Ptr = (PI2O_EXEC_SYS_TAB_SET_MESSAGE)malloc ( 1951 sizeof(I2O_EXEC_SYS_TAB_SET_MESSAGE) - sizeof(I2O_SG_ELEMENT) 1952 + ((3+SystemTable->NumberEntries) * sizeof(I2O_SGE_SIMPLE_ELEMENT)), 1953 M_TEMP, M_WAITOK)) == NULL) { 1954 free(SystemTable, M_TEMP); 1955 return (ENOMEM); 1956 } 1957 (void)ASR_fillMessage((void *)Message_Ptr, 1958 sizeof(I2O_EXEC_SYS_TAB_SET_MESSAGE) - sizeof(I2O_SG_ELEMENT) 1959 + ((3+SystemTable->NumberEntries) * sizeof(I2O_SGE_SIMPLE_ELEMENT))); 1960 I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame), 1961 (I2O_VERSION_11 + 1962 (((sizeof(I2O_EXEC_SYS_TAB_SET_MESSAGE) - sizeof(I2O_SG_ELEMENT)) 1963 / sizeof(U32)) << 4))); 1964 I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame), 1965 I2O_EXEC_SYS_TAB_SET); 1966 /* 1967 * Call the LCT table to determine the number of device entries 1968 * to reserve space for. 1969 * since this code is reused in several systems, code efficiency 1970 * is greater by using a shift operation rather than a divide by 1971 * sizeof(u_int32_t). 1972 */ 1973 sg = (PI2O_SGE_SIMPLE_ELEMENT)((char *)Message_Ptr 1974 + ((I2O_MESSAGE_FRAME_getVersionOffset( 1975 &(Message_Ptr->StdMessageFrame)) & 0xF0) >> 2)); 1976 SG(sg, 0, I2O_SGL_FLAGS_DIR, SystemTable, sizeof(I2O_SET_SYSTAB_HEADER)); 1977 ++sg; 1978 for (ha = Asr_softc; ha; ha = ha->ha_next) { 1979 SG(sg, 0, 1980 ((ha->ha_next) 1981 ? (I2O_SGL_FLAGS_DIR) 1982 : (I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_END_OF_BUFFER)), 1983 &(ha->ha_SystemTable), sizeof(ha->ha_SystemTable)); 1984 ++sg; 1985 } 1986 SG(sg, 0, I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_END_OF_BUFFER, NULL, 0); 1987 SG(sg, 1, I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_LAST_ELEMENT 1988 | I2O_SGL_FLAGS_END_OF_BUFFER, NULL, 0); 1989 retVal = ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr); 1990 free(Message_Ptr, M_TEMP); 1991 free(SystemTable, M_TEMP); 1992 return (retVal); 1993} /* ASR_setSysTab */ 1994 1995static int 1996ASR_acquireHrt(Asr_softc_t *sc) 1997{ 1998 I2O_EXEC_HRT_GET_MESSAGE Message; 1999 I2O_EXEC_HRT_GET_MESSAGE *Message_Ptr; 2000 struct { 2001 I2O_HRT Header; 2002 I2O_HRT_ENTRY Entry[MAX_CHANNEL]; 2003 } Hrt; 2004 u_int8_t NumberOfEntries; 2005 PI2O_HRT_ENTRY Entry; 2006 2007 bzero(&Hrt, sizeof (Hrt)); 2008 Message_Ptr = (I2O_EXEC_HRT_GET_MESSAGE *)ASR_fillMessage(&Message, 2009 sizeof(I2O_EXEC_HRT_GET_MESSAGE) - sizeof(I2O_SG_ELEMENT) 2010 + sizeof(I2O_SGE_SIMPLE_ELEMENT)); 2011 I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame), 2012 (I2O_VERSION_11 2013 + (((sizeof(I2O_EXEC_HRT_GET_MESSAGE) - sizeof(I2O_SG_ELEMENT)) 2014 / sizeof(U32)) << 4))); 2015 I2O_MESSAGE_FRAME_setFunction (&(Message_Ptr->StdMessageFrame), 2016 I2O_EXEC_HRT_GET); 2017 2018 /* 2019 * Set up the buffers as scatter gather elements. 2020 */ 2021 SG(&(Message_Ptr->SGL), 0, 2022 I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER, 2023 &Hrt, sizeof(Hrt)); 2024 if (ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr) != CAM_REQ_CMP) { 2025 return (ENODEV); 2026 } 2027 if ((NumberOfEntries = I2O_HRT_getNumberEntries(&Hrt.Header)) 2028 > (MAX_CHANNEL + 1)) { 2029 NumberOfEntries = MAX_CHANNEL + 1; 2030 } 2031 for (Entry = Hrt.Header.HRTEntry; 2032 NumberOfEntries != 0; 2033 ++Entry, --NumberOfEntries) { 2034 PI2O_LCT_ENTRY Device; 2035 2036 for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY) 2037 (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT)); 2038 ++Device) { 2039 if (I2O_LCT_ENTRY_getLocalTID(Device) 2040 == (I2O_HRT_ENTRY_getAdapterID(Entry) & 0xFFF)) { 2041 Device->le_bus = I2O_HRT_ENTRY_getAdapterID( 2042 Entry) >> 16; 2043 if ((Device->le_bus > sc->ha_MaxBus) 2044 && (Device->le_bus <= MAX_CHANNEL)) { 2045 sc->ha_MaxBus = Device->le_bus; 2046 } 2047 } 2048 } 2049 } 2050 return (0); 2051} /* ASR_acquireHrt */ 2052 2053/* 2054 * Enable the adapter. 2055 */ 2056static int 2057ASR_enableSys(Asr_softc_t *sc) 2058{ 2059 I2O_EXEC_SYS_ENABLE_MESSAGE Message; 2060 PI2O_EXEC_SYS_ENABLE_MESSAGE Message_Ptr; 2061 2062 Message_Ptr = (PI2O_EXEC_SYS_ENABLE_MESSAGE)ASR_fillMessage(&Message, 2063 sizeof(I2O_EXEC_SYS_ENABLE_MESSAGE)); 2064 I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame), 2065 I2O_EXEC_SYS_ENABLE); 2066 return (ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr) != 0); 2067} /* ASR_enableSys */ 2068 2069/* 2070 * Perform the stages necessary to initialize the adapter 2071 */ 2072static int 2073ASR_init(Asr_softc_t *sc) 2074{ 2075 return ((ASR_initOutBound(sc) == 0) 2076 || (ASR_setSysTab(sc) != CAM_REQ_CMP) 2077 || (ASR_enableSys(sc) != CAM_REQ_CMP)); 2078} /* ASR_init */ 2079 2080/* 2081 * Send a Synchronize Cache command to the target device. 2082 */ 2083static void 2084ASR_sync(Asr_softc_t *sc, int bus, int target, int lun) 2085{ 2086 tid_t TID; 2087 2088 /* 2089 * We will not synchronize the device when there are outstanding 2090 * commands issued by the OS (this is due to a locked up device, 2091 * as the OS normally would flush all outstanding commands before 2092 * issuing a shutdown or an adapter reset). 2093 */ 2094 if ((sc != NULL) 2095 && (LIST_FIRST(&(sc->ha_ccb)) != NULL) 2096 && ((TID = ASR_getTid (sc, bus, target, lun)) != (tid_t)-1) 2097 && (TID != (tid_t)0)) { 2098 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE Message; 2099 PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE Message_Ptr; 2100 2101 Message_Ptr = (PRIVATE_SCSI_SCB_EXECUTE_MESSAGE *)&Message; 2102 bzero(Message_Ptr, sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) 2103 - sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT)); 2104 2105 I2O_MESSAGE_FRAME_setVersionOffset( 2106 (PI2O_MESSAGE_FRAME)Message_Ptr, 2107 I2O_VERSION_11 2108 | (((sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) 2109 - sizeof(I2O_SG_ELEMENT)) 2110 / sizeof(U32)) << 4)); 2111 I2O_MESSAGE_FRAME_setMessageSize( 2112 (PI2O_MESSAGE_FRAME)Message_Ptr, 2113 (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) 2114 - sizeof(I2O_SG_ELEMENT)) 2115 / sizeof(U32)); 2116 I2O_MESSAGE_FRAME_setInitiatorAddress ( 2117 (PI2O_MESSAGE_FRAME)Message_Ptr, 1); 2118 I2O_MESSAGE_FRAME_setFunction( 2119 (PI2O_MESSAGE_FRAME)Message_Ptr, I2O_PRIVATE_MESSAGE); 2120 I2O_MESSAGE_FRAME_setTargetAddress( 2121 (PI2O_MESSAGE_FRAME)Message_Ptr, TID); 2122 I2O_PRIVATE_MESSAGE_FRAME_setXFunctionCode ( 2123 (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr, 2124 I2O_SCSI_SCB_EXEC); 2125 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setTID(Message_Ptr, TID); 2126 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr, 2127 I2O_SCB_FLAG_ENABLE_DISCONNECT 2128 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG 2129 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER); 2130 I2O_PRIVATE_MESSAGE_FRAME_setOrganizationID( 2131 (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr, 2132 DPT_ORGANIZATION_ID); 2133 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setCDBLength(Message_Ptr, 6); 2134 Message_Ptr->CDB[0] = SYNCHRONIZE_CACHE; 2135 Message_Ptr->CDB[1] = (lun << 5); 2136 2137 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr, 2138 (I2O_SCB_FLAG_XFER_FROM_DEVICE 2139 | I2O_SCB_FLAG_ENABLE_DISCONNECT 2140 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG 2141 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER)); 2142 2143 (void)ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr); 2144 2145 } 2146} 2147 2148static void 2149ASR_synchronize(Asr_softc_t *sc) 2150{ 2151 int bus, target, lun; 2152 2153 for (bus = 0; bus <= sc->ha_MaxBus; ++bus) { 2154 for (target = 0; target <= sc->ha_MaxId; ++target) { 2155 for (lun = 0; lun <= sc->ha_MaxLun; ++lun) { 2156 ASR_sync(sc,bus,target,lun); 2157 } 2158 } 2159 } 2160} 2161 2162/* 2163 * Reset the HBA, targets and BUS. 2164 * Currently this resets *all* the SCSI busses. 2165 */ 2166static __inline void 2167asr_hbareset(Asr_softc_t *sc) 2168{ 2169 ASR_synchronize(sc); 2170 (void)ASR_reset(sc); 2171} /* asr_hbareset */ 2172 2173/* 2174 * A reduced copy of the real pci_map_mem, incorporating the MAX_MAP 2175 * limit and a reduction in error checking (in the pre 4.0 case). 2176 */ 2177static int 2178asr_pci_map_mem(device_t tag, Asr_softc_t *sc) 2179{ 2180 int rid; 2181 u_int32_t p, l, s; 2182 2183 /* 2184 * I2O specification says we must find first *memory* mapped BAR 2185 */ 2186 for (rid = 0; rid < 4; rid++) { 2187 p = pci_read_config(tag, PCIR_BAR(rid), sizeof(p)); 2188 if ((p & 1) == 0) { 2189 break; 2190 } 2191 } 2192 /* 2193 * Give up? 2194 */ 2195 if (rid >= 4) { 2196 rid = 0; 2197 } 2198 rid = PCIR_BAR(rid); 2199 p = pci_read_config(tag, rid, sizeof(p)); 2200 pci_write_config(tag, rid, -1, sizeof(p)); 2201 l = 0 - (pci_read_config(tag, rid, sizeof(l)) & ~15); 2202 pci_write_config(tag, rid, p, sizeof(p)); 2203 if (l > MAX_MAP) { 2204 l = MAX_MAP; 2205 } 2206 /* 2207 * The 2005S Zero Channel RAID solution is not a perfect PCI 2208 * citizen. It asks for 4MB on BAR0, and 0MB on BAR1, once 2209 * enabled it rewrites the size of BAR0 to 2MB, sets BAR1 to 2210 * BAR0+2MB and sets it's size to 2MB. The IOP registers are 2211 * accessible via BAR0, the messaging registers are accessible 2212 * via BAR1. If the subdevice code is 50 to 59 decimal. 2213 */ 2214 s = pci_read_config(tag, PCIR_DEVVENDOR, sizeof(s)); 2215 if (s != 0xA5111044) { 2216 s = pci_read_config(tag, PCIR_SUBVEND_0, sizeof(s)); 2217 if ((((ADPTDOMINATOR_SUB_ID_START ^ s) & 0xF000FFFF) == 0) 2218 && (ADPTDOMINATOR_SUB_ID_START <= s) 2219 && (s <= ADPTDOMINATOR_SUB_ID_END)) { 2220 l = MAX_MAP; /* Conjoined BAR Raptor Daptor */ 2221 } 2222 } 2223 p &= ~15; 2224 sc->ha_mem_res = bus_alloc_resource(tag, SYS_RES_MEMORY, &rid, 2225 p, p + l, l, RF_ACTIVE); 2226 if (sc->ha_mem_res == NULL) { 2227 return (0); 2228 } 2229 sc->ha_Base = rman_get_start(sc->ha_mem_res); 2230 sc->ha_i2o_bhandle = rman_get_bushandle(sc->ha_mem_res); 2231 sc->ha_i2o_btag = rman_get_bustag(sc->ha_mem_res); 2232 2233 if (s == 0xA5111044) { /* Split BAR Raptor Daptor */ 2234 if ((rid += sizeof(u_int32_t)) >= PCIR_BAR(4)) { 2235 return (0); 2236 } 2237 p = pci_read_config(tag, rid, sizeof(p)); 2238 pci_write_config(tag, rid, -1, sizeof(p)); 2239 l = 0 - (pci_read_config(tag, rid, sizeof(l)) & ~15); 2240 pci_write_config(tag, rid, p, sizeof(p)); 2241 if (l > MAX_MAP) { 2242 l = MAX_MAP; 2243 } 2244 p &= ~15; 2245 sc->ha_mes_res = bus_alloc_resource(tag, SYS_RES_MEMORY, &rid, 2246 p, p + l, l, RF_ACTIVE); 2247 if (sc->ha_mes_res == NULL) { 2248 return (0); 2249 } 2250 sc->ha_frame_bhandle = rman_get_bushandle(sc->ha_mes_res); 2251 sc->ha_frame_btag = rman_get_bustag(sc->ha_mes_res); 2252 } else { 2253 sc->ha_frame_bhandle = sc->ha_i2o_bhandle; 2254 sc->ha_frame_btag = sc->ha_i2o_btag; 2255 } 2256 return (1); 2257} /* asr_pci_map_mem */ 2258 2259/* 2260 * A simplified copy of the real pci_map_int with additional 2261 * registration requirements. 2262 */ 2263static int 2264asr_pci_map_int(device_t tag, Asr_softc_t *sc) 2265{ 2266 int rid = 0; 2267 2268 sc->ha_irq_res = bus_alloc_resource_any(tag, SYS_RES_IRQ, &rid, 2269 RF_ACTIVE | RF_SHAREABLE); 2270 if (sc->ha_irq_res == NULL) { 2271 return (0); 2272 } 2273 if (bus_setup_intr(tag, sc->ha_irq_res, INTR_TYPE_CAM | INTR_ENTROPY, 2274 (driver_intr_t *)asr_intr, (void *)sc, &(sc->ha_intr))) { 2275 return (0); 2276 } 2277 sc->ha_irq = pci_read_config(tag, PCIR_INTLINE, sizeof(char)); 2278 return (1); 2279} /* asr_pci_map_int */ 2280 2281/* 2282 * Attach the devices, and virtual devices to the driver list. 2283 */ 2284static int 2285asr_attach(device_t tag) 2286{ 2287 PI2O_EXEC_STATUS_GET_REPLY status; 2288 PI2O_LCT_ENTRY Device; 2289 Asr_softc_t *sc, **ha; 2290 struct scsi_inquiry_data *iq; 2291 union asr_ccb *ccb; 2292 int bus, size, unit = device_get_unit(tag); 2293 2294 if ((sc = malloc(sizeof(*sc), M_DEVBUF, M_NOWAIT | M_ZERO)) == NULL) { 2295 return(ENOMEM); 2296 } 2297 if (Asr_softc == NULL) { 2298 /* 2299 * Fixup the OS revision as saved in the dptsig for the 2300 * engine (dptioctl.h) to pick up. 2301 */ 2302 bcopy(osrelease, &ASR_sig.dsDescription[16], 5); 2303 } 2304 /* 2305 * Initialize the software structure 2306 */ 2307 LIST_INIT(&(sc->ha_ccb)); 2308 /* Link us into the HA list */ 2309 for (ha = &Asr_softc; *ha; ha = &((*ha)->ha_next)); 2310 *(ha) = sc; 2311 2312 /* 2313 * This is the real McCoy! 2314 */ 2315 if (!asr_pci_map_mem(tag, sc)) { 2316 printf ("asr%d: could not map memory\n", unit); 2317 return(ENXIO); 2318 } 2319 /* Enable if not formerly enabled */ 2320 pci_write_config(tag, PCIR_COMMAND, 2321 pci_read_config(tag, PCIR_COMMAND, sizeof(char)) | 2322 PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN, sizeof(char)); 2323 /* Knowledge is power, responsibility is direct */ 2324 { 2325 struct pci_devinfo { 2326 STAILQ_ENTRY(pci_devinfo) pci_links; 2327 struct resource_list resources; 2328 pcicfgregs cfg; 2329 } * dinfo = device_get_ivars(tag); 2330 sc->ha_pciBusNum = dinfo->cfg.bus; 2331 sc->ha_pciDeviceNum = (dinfo->cfg.slot << 3) | dinfo->cfg.func; 2332 } 2333 /* Check if the device is there? */ 2334 if ((ASR_resetIOP(sc) == 0) || 2335 ((status = (PI2O_EXEC_STATUS_GET_REPLY)malloc( 2336 sizeof(I2O_EXEC_STATUS_GET_REPLY), M_TEMP, M_WAITOK)) == NULL) || 2337 (ASR_getStatus(sc, status) == NULL)) { 2338 printf ("asr%d: could not initialize hardware\n", unit); 2339 return(ENODEV); /* Get next, maybe better luck */ 2340 } 2341 sc->ha_SystemTable.OrganizationID = status->OrganizationID; 2342 sc->ha_SystemTable.IOP_ID = status->IOP_ID; 2343 sc->ha_SystemTable.I2oVersion = status->I2oVersion; 2344 sc->ha_SystemTable.IopState = status->IopState; 2345 sc->ha_SystemTable.MessengerType = status->MessengerType; 2346 sc->ha_SystemTable.InboundMessageFrameSize = status->InboundMFrameSize; 2347 sc->ha_SystemTable.MessengerInfo.InboundMessagePortAddressLow = 2348 (U32)(sc->ha_Base + I2O_REG_TOFIFO); /* XXX 64-bit */ 2349 2350 if (!asr_pci_map_int(tag, (void *)sc)) { 2351 printf ("asr%d: could not map interrupt\n", unit); 2352 return(ENXIO); 2353 } 2354 2355 /* Adjust the maximim inbound count */ 2356 if (((sc->ha_QueueSize = 2357 I2O_EXEC_STATUS_GET_REPLY_getMaxInboundMFrames(status)) > 2358 MAX_INBOUND) || (sc->ha_QueueSize == 0)) { 2359 sc->ha_QueueSize = MAX_INBOUND; 2360 } 2361 2362 /* Adjust the maximum outbound count */ 2363 if (((sc->ha_Msgs_Count = 2364 I2O_EXEC_STATUS_GET_REPLY_getMaxOutboundMFrames(status)) > 2365 MAX_OUTBOUND) || (sc->ha_Msgs_Count == 0)) { 2366 sc->ha_Msgs_Count = MAX_OUTBOUND; 2367 } 2368 if (sc->ha_Msgs_Count > sc->ha_QueueSize) { 2369 sc->ha_Msgs_Count = sc->ha_QueueSize; 2370 } 2371 2372 /* Adjust the maximum SG size to adapter */ 2373 if ((size = (I2O_EXEC_STATUS_GET_REPLY_getInboundMFrameSize(status) << 2374 2)) > MAX_INBOUND_SIZE) { 2375 size = MAX_INBOUND_SIZE; 2376 } 2377 free(status, M_TEMP); 2378 sc->ha_SgSize = (size - sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) 2379 + sizeof(I2O_SG_ELEMENT)) / sizeof(I2O_SGE_SIMPLE_ELEMENT); 2380 2381 /* 2382 * Only do a bus/HBA reset on the first time through. On this 2383 * first time through, we do not send a flush to the devices. 2384 */ 2385 if (ASR_init(sc) == 0) { 2386 struct BufferInfo { 2387 I2O_PARAM_RESULTS_LIST_HEADER Header; 2388 I2O_PARAM_READ_OPERATION_RESULT Read; 2389 I2O_DPT_EXEC_IOP_BUFFERS_SCALAR Info; 2390 } Buffer; 2391 PI2O_DPT_EXEC_IOP_BUFFERS_SCALAR Info; 2392#define FW_DEBUG_BLED_OFFSET 8 2393 2394 if ((Info = (PI2O_DPT_EXEC_IOP_BUFFERS_SCALAR) 2395 ASR_getParams(sc, 0, I2O_DPT_EXEC_IOP_BUFFERS_GROUP_NO, 2396 &Buffer, sizeof(struct BufferInfo))) != NULL) { 2397 sc->ha_blinkLED = FW_DEBUG_BLED_OFFSET + 2398 I2O_DPT_EXEC_IOP_BUFFERS_SCALAR_getSerialOutputOffset(Info); 2399 } 2400 if (ASR_acquireLct(sc) == 0) { 2401 (void)ASR_acquireHrt(sc); 2402 } 2403 } else { 2404 printf ("asr%d: failed to initialize\n", unit); 2405 return(ENXIO); 2406 } 2407 /* 2408 * Add in additional probe responses for more channels. We 2409 * are reusing the variable `target' for a channel loop counter. 2410 * Done here because of we need both the acquireLct and 2411 * acquireHrt data. 2412 */ 2413 for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY) 2414 (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT)); ++Device) { 2415 if (Device->le_type == I2O_UNKNOWN) { 2416 continue; 2417 } 2418 if (I2O_LCT_ENTRY_getUserTID(Device) == 0xFFF) { 2419 if (Device->le_target > sc->ha_MaxId) { 2420 sc->ha_MaxId = Device->le_target; 2421 } 2422 if (Device->le_lun > sc->ha_MaxLun) { 2423 sc->ha_MaxLun = Device->le_lun; 2424 } 2425 } 2426 if (((Device->le_type & I2O_PORT) != 0) 2427 && (Device->le_bus <= MAX_CHANNEL)) { 2428 /* Do not increase MaxId for efficiency */ 2429 sc->ha_adapter_target[Device->le_bus] = 2430 Device->le_target; 2431 } 2432 } 2433 2434 /* 2435 * Print the HBA model number as inquired from the card. 2436 */ 2437 2438 printf("asr%d:", unit); 2439 2440 if ((iq = (struct scsi_inquiry_data *)malloc( 2441 sizeof(struct scsi_inquiry_data), M_TEMP, M_WAITOK | M_ZERO)) != 2442 NULL) { 2443 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE Message; 2444 PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE Message_Ptr; 2445 int posted = 0; 2446 2447 Message_Ptr = (PRIVATE_SCSI_SCB_EXECUTE_MESSAGE *)&Message; 2448 bzero(Message_Ptr, sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) - 2449 sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT)); 2450 2451 I2O_MESSAGE_FRAME_setVersionOffset( 2452 (PI2O_MESSAGE_FRAME)Message_Ptr, I2O_VERSION_11 | 2453 (((sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) 2454 - sizeof(I2O_SG_ELEMENT)) / sizeof(U32)) << 4)); 2455 I2O_MESSAGE_FRAME_setMessageSize( 2456 (PI2O_MESSAGE_FRAME)Message_Ptr, 2457 (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) - 2458 sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT)) / 2459 sizeof(U32)); 2460 I2O_MESSAGE_FRAME_setInitiatorAddress( 2461 (PI2O_MESSAGE_FRAME)Message_Ptr, 1); 2462 I2O_MESSAGE_FRAME_setFunction( 2463 (PI2O_MESSAGE_FRAME)Message_Ptr, I2O_PRIVATE_MESSAGE); 2464 I2O_PRIVATE_MESSAGE_FRAME_setXFunctionCode( 2465 (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr, I2O_SCSI_SCB_EXEC); 2466 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr, 2467 I2O_SCB_FLAG_ENABLE_DISCONNECT 2468 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG 2469 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER); 2470 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setInterpret(Message_Ptr, 1); 2471 I2O_PRIVATE_MESSAGE_FRAME_setOrganizationID( 2472 (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr, 2473 DPT_ORGANIZATION_ID); 2474 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setCDBLength(Message_Ptr, 6); 2475 Message_Ptr->CDB[0] = INQUIRY; 2476 Message_Ptr->CDB[4] = 2477 (unsigned char)sizeof(struct scsi_inquiry_data); 2478 if (Message_Ptr->CDB[4] == 0) { 2479 Message_Ptr->CDB[4] = 255; 2480 } 2481 2482 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr, 2483 (I2O_SCB_FLAG_XFER_FROM_DEVICE 2484 | I2O_SCB_FLAG_ENABLE_DISCONNECT 2485 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG 2486 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER)); 2487 2488 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setByteCount( 2489 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr, 2490 sizeof(struct scsi_inquiry_data)); 2491 SG(&(Message_Ptr->SGL), 0, 2492 I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER, 2493 iq, sizeof(struct scsi_inquiry_data)); 2494 (void)ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr); 2495 2496 if (iq->vendor[0] && (iq->vendor[0] != ' ')) { 2497 printf (" "); 2498 ASR_prstring (iq->vendor, 8); 2499 ++posted; 2500 } 2501 if (iq->product[0] && (iq->product[0] != ' ')) { 2502 printf (" "); 2503 ASR_prstring (iq->product, 16); 2504 ++posted; 2505 } 2506 if (iq->revision[0] && (iq->revision[0] != ' ')) { 2507 printf (" FW Rev. "); 2508 ASR_prstring (iq->revision, 4); 2509 ++posted; 2510 } 2511 free(iq, M_TEMP); 2512 if (posted) { 2513 printf (","); 2514 } 2515 } 2516 printf (" %d channel, %d CCBs, Protocol I2O\n", sc->ha_MaxBus + 1, 2517 (sc->ha_QueueSize > MAX_INBOUND) ? MAX_INBOUND : sc->ha_QueueSize); 2518 2519 /* 2520 * fill in the prototype cam_path. 2521 */ 2522 if ((ccb = asr_alloc_ccb(sc)) == NULL) { 2523 printf ("asr%d: CAM could not be notified of asynchronous callback parameters\n", unit); 2524 return(ENOMEM); 2525 } 2526 for (bus = 0; bus <= sc->ha_MaxBus; ++bus) { 2527 struct cam_devq * devq; 2528 int QueueSize = sc->ha_QueueSize; 2529 2530 if (QueueSize > MAX_INBOUND) { 2531 QueueSize = MAX_INBOUND; 2532 } 2533 2534 /* 2535 * Create the device queue for our SIM(s). 2536 */ 2537 if ((devq = cam_simq_alloc(QueueSize)) == NULL) { 2538 continue; 2539 } 2540 2541 /* 2542 * Construct our first channel SIM entry 2543 */ 2544 sc->ha_sim[bus] = cam_sim_alloc(asr_action, asr_poll, "asr", sc, 2545 unit, 1, QueueSize, devq); 2546 if (sc->ha_sim[bus] == NULL) { 2547 continue; 2548 } 2549 2550 if (xpt_bus_register(sc->ha_sim[bus], bus) != CAM_SUCCESS) { 2551 cam_sim_free(sc->ha_sim[bus], 2552 /*free_devq*/TRUE); 2553 sc->ha_sim[bus] = NULL; 2554 continue; 2555 } 2556 2557 if (xpt_create_path(&(sc->ha_path[bus]), /*periph*/NULL, 2558 cam_sim_path(sc->ha_sim[bus]), CAM_TARGET_WILDCARD, 2559 CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 2560 xpt_bus_deregister( cam_sim_path(sc->ha_sim[bus])); 2561 cam_sim_free(sc->ha_sim[bus], /*free_devq*/TRUE); 2562 sc->ha_sim[bus] = NULL; 2563 continue; 2564 } 2565 } 2566 asr_free_ccb(ccb); 2567 /* 2568 * Generate the device node information 2569 */ 2570 sc->ha_devt = make_dev(&asr_cdevsw, unit, UID_ROOT, GID_OPERATOR, 0640, 2571 "asr%d", unit); 2572 sc->ha_devt->si_drv1 = sc; 2573 return(0); 2574} /* asr_attach */ 2575 2576static void 2577asr_poll(struct cam_sim *sim) 2578{ 2579 asr_intr(cam_sim_softc(sim)); 2580} /* asr_poll */ 2581 2582static void 2583asr_action(struct cam_sim *sim, union ccb *ccb) 2584{ 2585 struct Asr_softc *sc; 2586 2587 debug_asr_printf("asr_action(%lx,%lx{%x})\n", (u_long)sim, (u_long)ccb, 2588 ccb->ccb_h.func_code); 2589 2590 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("asr_action\n")); 2591 2592 ccb->ccb_h.spriv_ptr0 = sc = (struct Asr_softc *)cam_sim_softc(sim); 2593 2594 switch (ccb->ccb_h.func_code) { 2595 2596 /* Common cases first */ 2597 case XPT_SCSI_IO: /* Execute the requested I/O operation */ 2598 { 2599 struct Message { 2600 char M[MAX_INBOUND_SIZE]; 2601 } Message; 2602 PI2O_MESSAGE_FRAME Message_Ptr; 2603 2604 /* Reject incoming commands while we are resetting the card */ 2605 if (sc->ha_in_reset != HA_OPERATIONAL) { 2606 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 2607 if (sc->ha_in_reset >= HA_OFF_LINE) { 2608 /* HBA is now off-line */ 2609 ccb->ccb_h.status |= CAM_UNREC_HBA_ERROR; 2610 } else { 2611 /* HBA currently resetting, try again later. */ 2612 ccb->ccb_h.status |= CAM_REQUEUE_REQ; 2613 } 2614 debug_asr_cmd_printf (" e\n"); 2615 xpt_done(ccb); 2616 debug_asr_cmd_printf (" q\n"); 2617 break; 2618 } 2619 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) { 2620 printf( 2621 "asr%d WARNING: scsi_cmd(%x) already done on b%dt%du%d\n", 2622 cam_sim_unit(xpt_path_sim(ccb->ccb_h.path)), 2623 ccb->csio.cdb_io.cdb_bytes[0], 2624 cam_sim_bus(sim), 2625 ccb->ccb_h.target_id, 2626 ccb->ccb_h.target_lun); 2627 } 2628 debug_asr_cmd_printf("(%d,%d,%d,%d)", cam_sim_unit(sim), 2629 cam_sim_bus(sim), ccb->ccb_h.target_id, 2630 ccb->ccb_h.target_lun); 2631 debug_asr_dump_ccb(ccb); 2632 2633 if ((Message_Ptr = ASR_init_message((union asr_ccb *)ccb, 2634 (PI2O_MESSAGE_FRAME)&Message)) != NULL) { 2635 debug_asr_cmd2_printf ("TID=%x:\n", 2636 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_getTID( 2637 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr)); 2638 debug_asr_cmd2_dump_message(Message_Ptr); 2639 debug_asr_cmd1_printf (" q"); 2640 2641 if (ASR_queue (sc, Message_Ptr) == EMPTY_QUEUE) { 2642 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 2643 ccb->ccb_h.status |= CAM_REQUEUE_REQ; 2644 debug_asr_cmd_printf (" E\n"); 2645 xpt_done(ccb); 2646 } 2647 debug_asr_cmd_printf(" Q\n"); 2648 break; 2649 } 2650 /* 2651 * We will get here if there is no valid TID for the device 2652 * referenced in the scsi command packet. 2653 */ 2654 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 2655 ccb->ccb_h.status |= CAM_SEL_TIMEOUT; 2656 debug_asr_cmd_printf (" B\n"); 2657 xpt_done(ccb); 2658 break; 2659 } 2660 2661 case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */ 2662 /* Rese HBA device ... */ 2663 asr_hbareset (sc); 2664 ccb->ccb_h.status = CAM_REQ_CMP; 2665 xpt_done(ccb); 2666 break; 2667 2668#if (defined(REPORT_LUNS)) 2669 case REPORT_LUNS: 2670#endif 2671 case XPT_ABORT: /* Abort the specified CCB */ 2672 /* XXX Implement */ 2673 ccb->ccb_h.status = CAM_REQ_INVALID; 2674 xpt_done(ccb); 2675 break; 2676 2677 case XPT_SET_TRAN_SETTINGS: 2678 /* XXX Implement */ 2679 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; 2680 xpt_done(ccb); 2681 break; 2682 2683 case XPT_GET_TRAN_SETTINGS: 2684 /* Get default/user set transfer settings for the target */ 2685 { 2686 struct ccb_trans_settings *cts; 2687 u_int target_mask; 2688 2689 cts = &(ccb->cts); 2690 target_mask = 0x01 << ccb->ccb_h.target_id; 2691 if ((cts->flags & CCB_TRANS_USER_SETTINGS) != 0) { 2692 cts->flags = CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB; 2693 cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT; 2694 cts->sync_period = 6; /* 40MHz */ 2695 cts->sync_offset = 15; 2696 2697 cts->valid = CCB_TRANS_SYNC_RATE_VALID 2698 | CCB_TRANS_SYNC_OFFSET_VALID 2699 | CCB_TRANS_BUS_WIDTH_VALID 2700 | CCB_TRANS_DISC_VALID 2701 | CCB_TRANS_TQ_VALID; 2702 ccb->ccb_h.status = CAM_REQ_CMP; 2703 } else { 2704 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; 2705 } 2706 xpt_done(ccb); 2707 break; 2708 } 2709 2710 case XPT_CALC_GEOMETRY: 2711 { 2712 struct ccb_calc_geometry *ccg; 2713 u_int32_t size_mb; 2714 u_int32_t secs_per_cylinder; 2715 2716 ccg = &(ccb->ccg); 2717 size_mb = ccg->volume_size 2718 / ((1024L * 1024L) / ccg->block_size); 2719 2720 if (size_mb > 4096) { 2721 ccg->heads = 255; 2722 ccg->secs_per_track = 63; 2723 } else if (size_mb > 2048) { 2724 ccg->heads = 128; 2725 ccg->secs_per_track = 63; 2726 } else if (size_mb > 1024) { 2727 ccg->heads = 65; 2728 ccg->secs_per_track = 63; 2729 } else { 2730 ccg->heads = 64; 2731 ccg->secs_per_track = 32; 2732 } 2733 secs_per_cylinder = ccg->heads * ccg->secs_per_track; 2734 ccg->cylinders = ccg->volume_size / secs_per_cylinder; 2735 ccb->ccb_h.status = CAM_REQ_CMP; 2736 xpt_done(ccb); 2737 break; 2738 } 2739 2740 case XPT_RESET_BUS: /* Reset the specified SCSI bus */ 2741 ASR_resetBus (sc, cam_sim_bus(sim)); 2742 ccb->ccb_h.status = CAM_REQ_CMP; 2743 xpt_done(ccb); 2744 break; 2745 2746 case XPT_TERM_IO: /* Terminate the I/O process */ 2747 /* XXX Implement */ 2748 ccb->ccb_h.status = CAM_REQ_INVALID; 2749 xpt_done(ccb); 2750 break; 2751 2752 case XPT_PATH_INQ: /* Path routing inquiry */ 2753 { 2754 struct ccb_pathinq *cpi = &(ccb->cpi); 2755 2756 cpi->version_num = 1; /* XXX??? */ 2757 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16; 2758 cpi->target_sprt = 0; 2759 /* Not necessary to reset bus, done by HDM initialization */ 2760 cpi->hba_misc = PIM_NOBUSRESET; 2761 cpi->hba_eng_cnt = 0; 2762 cpi->max_target = sc->ha_MaxId; 2763 cpi->max_lun = sc->ha_MaxLun; 2764 cpi->initiator_id = sc->ha_adapter_target[cam_sim_bus(sim)]; 2765 cpi->bus_id = cam_sim_bus(sim); 2766 cpi->base_transfer_speed = 3300; 2767 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 2768 strncpy(cpi->hba_vid, "Adaptec", HBA_IDLEN); 2769 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); 2770 cpi->unit_number = cam_sim_unit(sim); 2771 cpi->ccb_h.status = CAM_REQ_CMP; 2772 xpt_done(ccb); 2773 break; 2774 } 2775 default: 2776 ccb->ccb_h.status = CAM_REQ_INVALID; 2777 xpt_done(ccb); 2778 break; 2779 } 2780} /* asr_action */ 2781 2782/* 2783 * Handle processing of current CCB as pointed to by the Status. 2784 */ 2785static int 2786asr_intr(Asr_softc_t *sc) 2787{ 2788 int processed; 2789 2790 for(processed = 0; asr_get_status(sc) & Mask_InterruptsDisabled; 2791 processed = 1) { 2792 union asr_ccb *ccb; 2793 U32 ReplyOffset; 2794 PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME Reply; 2795 2796 if (((ReplyOffset = asr_get_FromFIFO(sc)) == EMPTY_QUEUE) 2797 && ((ReplyOffset = asr_get_FromFIFO(sc)) == EMPTY_QUEUE)) { 2798 break; 2799 } 2800 Reply = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)(ReplyOffset 2801 - sc->ha_Msgs_Phys + (char *)(sc->ha_Msgs)); 2802 /* 2803 * We do not need any (optional byteswapping) method access to 2804 * the Initiator context field. 2805 */ 2806 ccb = (union asr_ccb *)(long) 2807 I2O_MESSAGE_FRAME_getInitiatorContext64( 2808 &(Reply->StdReplyFrame.StdMessageFrame)); 2809 if (I2O_MESSAGE_FRAME_getMsgFlags( 2810 &(Reply->StdReplyFrame.StdMessageFrame)) 2811 & I2O_MESSAGE_FLAGS_FAIL) { 2812 I2O_UTIL_NOP_MESSAGE Message; 2813 PI2O_UTIL_NOP_MESSAGE Message_Ptr; 2814 U32 MessageOffset; 2815 2816 MessageOffset = (u_long) 2817 I2O_FAILURE_REPLY_MESSAGE_FRAME_getPreservedMFA( 2818 (PI2O_FAILURE_REPLY_MESSAGE_FRAME)Reply); 2819 /* 2820 * Get the Original Message Frame's address, and get 2821 * it's Transaction Context into our space. (Currently 2822 * unused at original authorship, but better to be 2823 * safe than sorry). Straight copy means that we 2824 * need not concern ourselves with the (optional 2825 * byteswapping) method access. 2826 */ 2827 Reply->StdReplyFrame.TransactionContext = 2828 bus_space_read_4(sc->ha_frame_btag, 2829 sc->ha_frame_bhandle, MessageOffset + 2830 offsetof(I2O_SINGLE_REPLY_MESSAGE_FRAME, 2831 TransactionContext)); 2832 /* 2833 * For 64 bit machines, we need to reconstruct the 2834 * 64 bit context. 2835 */ 2836 ccb = (union asr_ccb *)(long) 2837 I2O_MESSAGE_FRAME_getInitiatorContext64( 2838 &(Reply->StdReplyFrame.StdMessageFrame)); 2839 /* 2840 * Unique error code for command failure. 2841 */ 2842 I2O_SINGLE_REPLY_MESSAGE_FRAME_setDetailedStatusCode( 2843 &(Reply->StdReplyFrame), (u_int16_t)-2); 2844 /* 2845 * Modify the message frame to contain a NOP and 2846 * re-issue it to the controller. 2847 */ 2848 Message_Ptr = (PI2O_UTIL_NOP_MESSAGE)ASR_fillMessage( 2849 &Message, sizeof(I2O_UTIL_NOP_MESSAGE)); 2850#if (I2O_UTIL_NOP != 0) 2851 I2O_MESSAGE_FRAME_setFunction ( 2852 &(Message_Ptr->StdMessageFrame), 2853 I2O_UTIL_NOP); 2854#endif 2855 /* 2856 * Copy the packet out to the Original Message 2857 */ 2858 asr_set_frame(sc, Message_Ptr, MessageOffset, 2859 sizeof(I2O_UTIL_NOP_MESSAGE)); 2860 /* 2861 * Issue the NOP 2862 */ 2863 asr_set_ToFIFO(sc, MessageOffset); 2864 } 2865 2866 /* 2867 * Asynchronous command with no return requirements, 2868 * and a generic handler for immunity against odd error 2869 * returns from the adapter. 2870 */ 2871 if (ccb == NULL) { 2872 /* 2873 * Return Reply so that it can be used for the 2874 * next command 2875 */ 2876 asr_set_FromFIFO(sc, ReplyOffset); 2877 continue; 2878 } 2879 2880 /* Welease Wadjah! (and stop timeouts) */ 2881 ASR_ccbRemove (sc, ccb); 2882 2883 switch ( 2884 I2O_SINGLE_REPLY_MESSAGE_FRAME_getDetailedStatusCode( 2885 &(Reply->StdReplyFrame))) { 2886 2887 case I2O_SCSI_DSC_SUCCESS: 2888 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 2889 ccb->ccb_h.status |= CAM_REQ_CMP; 2890 break; 2891 2892 case I2O_SCSI_DSC_CHECK_CONDITION: 2893 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 2894 ccb->ccb_h.status |= CAM_REQ_CMP|CAM_AUTOSNS_VALID; 2895 break; 2896 2897 case I2O_SCSI_DSC_BUSY: 2898 /* FALLTHRU */ 2899 case I2O_SCSI_HBA_DSC_ADAPTER_BUSY: 2900 /* FALLTHRU */ 2901 case I2O_SCSI_HBA_DSC_SCSI_BUS_RESET: 2902 /* FALLTHRU */ 2903 case I2O_SCSI_HBA_DSC_BUS_BUSY: 2904 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 2905 ccb->ccb_h.status |= CAM_SCSI_BUSY; 2906 break; 2907 2908 case I2O_SCSI_HBA_DSC_SELECTION_TIMEOUT: 2909 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 2910 ccb->ccb_h.status |= CAM_SEL_TIMEOUT; 2911 break; 2912 2913 case I2O_SCSI_HBA_DSC_COMMAND_TIMEOUT: 2914 /* FALLTHRU */ 2915 case I2O_SCSI_HBA_DSC_DEVICE_NOT_PRESENT: 2916 /* FALLTHRU */ 2917 case I2O_SCSI_HBA_DSC_LUN_INVALID: 2918 /* FALLTHRU */ 2919 case I2O_SCSI_HBA_DSC_SCSI_TID_INVALID: 2920 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 2921 ccb->ccb_h.status |= CAM_CMD_TIMEOUT; 2922 break; 2923 2924 case I2O_SCSI_HBA_DSC_DATA_OVERRUN: 2925 /* FALLTHRU */ 2926 case I2O_SCSI_HBA_DSC_REQUEST_LENGTH_ERROR: 2927 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 2928 ccb->ccb_h.status |= CAM_DATA_RUN_ERR; 2929 break; 2930 2931 default: 2932 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 2933 ccb->ccb_h.status |= CAM_REQUEUE_REQ; 2934 break; 2935 } 2936 if ((ccb->csio.resid = ccb->csio.dxfer_len) != 0) { 2937 ccb->csio.resid -= 2938 I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME_getTransferCount( 2939 Reply); 2940 } 2941 2942 /* Sense data in reply packet */ 2943 if (ccb->ccb_h.status & CAM_AUTOSNS_VALID) { 2944 u_int16_t size = I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME_getAutoSenseTransferCount(Reply); 2945 2946 if (size) { 2947 if (size > sizeof(ccb->csio.sense_data)) { 2948 size = sizeof(ccb->csio.sense_data); 2949 } 2950 if (size > I2O_SCSI_SENSE_DATA_SZ) { 2951 size = I2O_SCSI_SENSE_DATA_SZ; 2952 } 2953 if ((ccb->csio.sense_len) 2954 && (size > ccb->csio.sense_len)) { 2955 size = ccb->csio.sense_len; 2956 } 2957 bcopy(Reply->SenseData, 2958 &(ccb->csio.sense_data), size); 2959 } 2960 } 2961 2962 /* 2963 * Return Reply so that it can be used for the next command 2964 * since we have no more need for it now 2965 */ 2966 asr_set_FromFIFO(sc, ReplyOffset); 2967 2968 if (ccb->ccb_h.path) { 2969 xpt_done ((union ccb *)ccb); 2970 } else { 2971 wakeup (ccb); 2972 } 2973 } 2974 return (processed); 2975} /* asr_intr */ 2976 2977#undef QueueSize /* Grrrr */ 2978#undef SG_Size /* Grrrr */ 2979 2980/* 2981 * Meant to be included at the bottom of asr.c !!! 2982 */ 2983 2984/* 2985 * Included here as hard coded. Done because other necessary include 2986 * files utilize C++ comment structures which make them a nuisance to 2987 * included here just to pick up these three typedefs. 2988 */ 2989typedef U32 DPT_TAG_T; 2990typedef U32 DPT_MSG_T; 2991typedef U32 DPT_RTN_T; 2992 2993#undef SCSI_RESET /* Conflicts with "scsi/scsiconf.h" defintion */ 2994#include "dev/asr/osd_unix.h" 2995 2996#define asr_unit(dev) minor(dev) 2997 2998static u_int8_t ASR_ctlr_held; 2999 3000static int 3001asr_open(dev_t dev, int32_t flags, int32_t ifmt, struct thread *td) 3002{ 3003 int s; 3004 int error; 3005 3006 if (dev->si_drv1 == NULL) { 3007 return (ENODEV); 3008 } 3009 s = splcam (); 3010 if (ASR_ctlr_held) { 3011 error = EBUSY; 3012 } else if ((error = suser(td)) == 0) { 3013 ++ASR_ctlr_held; 3014 } 3015 splx(s); 3016 return (error); 3017} /* asr_open */ 3018 3019static int 3020asr_close(dev_t dev, int flags, int ifmt, struct thread *td) 3021{ 3022 3023 ASR_ctlr_held = 0; 3024 return (0); 3025} /* asr_close */ 3026 3027 3028/*-------------------------------------------------------------------------*/ 3029/* Function ASR_queue_i */ 3030/*-------------------------------------------------------------------------*/ 3031/* The Parameters Passed To This Function Are : */ 3032/* Asr_softc_t * : HBA miniport driver's adapter data storage. */ 3033/* PI2O_MESSAGE_FRAME : Msg Structure Pointer For This Command */ 3034/* I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME following the Msg Structure */ 3035/* */ 3036/* This Function Will Take The User Request Packet And Convert It To An */ 3037/* I2O MSG And Send It Off To The Adapter. */ 3038/* */ 3039/* Return : 0 For OK, Error Code Otherwise */ 3040/*-------------------------------------------------------------------------*/ 3041static int 3042ASR_queue_i(Asr_softc_t *sc, PI2O_MESSAGE_FRAME Packet) 3043{ 3044 union asr_ccb * ccb; 3045 PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME Reply; 3046 PI2O_MESSAGE_FRAME Message_Ptr; 3047 PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME Reply_Ptr; 3048 int MessageSizeInBytes; 3049 int ReplySizeInBytes; 3050 int error; 3051 int s; 3052 /* Scatter Gather buffer list */ 3053 struct ioctlSgList_S { 3054 SLIST_ENTRY(ioctlSgList_S) link; 3055 caddr_t UserSpace; 3056 I2O_FLAGS_COUNT FlagsCount; 3057 char KernelSpace[sizeof(long)]; 3058 } * elm; 3059 /* Generates a `first' entry */ 3060 SLIST_HEAD(ioctlSgListHead_S, ioctlSgList_S) sgList; 3061 3062 if (ASR_getBlinkLedCode(sc)) { 3063 debug_usr_cmd_printf ("Adapter currently in BlinkLed %x\n", 3064 ASR_getBlinkLedCode(sc)); 3065 return (EIO); 3066 } 3067 /* Copy in the message into a local allocation */ 3068 if ((Message_Ptr = (PI2O_MESSAGE_FRAME)malloc ( 3069 sizeof(I2O_MESSAGE_FRAME), M_TEMP, M_WAITOK)) == NULL) { 3070 debug_usr_cmd_printf ( 3071 "Failed to acquire I2O_MESSAGE_FRAME memory\n"); 3072 return (ENOMEM); 3073 } 3074 if ((error = copyin ((caddr_t)Packet, (caddr_t)Message_Ptr, 3075 sizeof(I2O_MESSAGE_FRAME))) != 0) { 3076 free(Message_Ptr, M_TEMP); 3077 debug_usr_cmd_printf ("Can't copy in packet errno=%d\n", error); 3078 return (error); 3079 } 3080 /* Acquire information to determine type of packet */ 3081 MessageSizeInBytes = (I2O_MESSAGE_FRAME_getMessageSize(Message_Ptr)<<2); 3082 /* The offset of the reply information within the user packet */ 3083 Reply = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)((char *)Packet 3084 + MessageSizeInBytes); 3085 3086 /* Check if the message is a synchronous initialization command */ 3087 s = I2O_MESSAGE_FRAME_getFunction(Message_Ptr); 3088 free(Message_Ptr, M_TEMP); 3089 switch (s) { 3090 3091 case I2O_EXEC_IOP_RESET: 3092 { U32 status; 3093 3094 status = ASR_resetIOP(sc); 3095 ReplySizeInBytes = sizeof(status); 3096 debug_usr_cmd_printf ("resetIOP done\n"); 3097 return (copyout ((caddr_t)&status, (caddr_t)Reply, 3098 ReplySizeInBytes)); 3099 } 3100 3101 case I2O_EXEC_STATUS_GET: 3102 { I2O_EXEC_STATUS_GET_REPLY status; 3103 3104 if (ASR_getStatus(sc, &status) == NULL) { 3105 debug_usr_cmd_printf ("getStatus failed\n"); 3106 return (ENXIO); 3107 } 3108 ReplySizeInBytes = sizeof(status); 3109 debug_usr_cmd_printf ("getStatus done\n"); 3110 return (copyout ((caddr_t)&status, (caddr_t)Reply, 3111 ReplySizeInBytes)); 3112 } 3113 3114 case I2O_EXEC_OUTBOUND_INIT: 3115 { U32 status; 3116 3117 status = ASR_initOutBound(sc); 3118 ReplySizeInBytes = sizeof(status); 3119 debug_usr_cmd_printf ("intOutBound done\n"); 3120 return (copyout ((caddr_t)&status, (caddr_t)Reply, 3121 ReplySizeInBytes)); 3122 } 3123 } 3124 3125 /* Determine if the message size is valid */ 3126 if ((MessageSizeInBytes < sizeof(I2O_MESSAGE_FRAME)) 3127 || (MAX_INBOUND_SIZE < MessageSizeInBytes)) { 3128 debug_usr_cmd_printf ("Packet size %d incorrect\n", 3129 MessageSizeInBytes); 3130 return (EINVAL); 3131 } 3132 3133 if ((Message_Ptr = (PI2O_MESSAGE_FRAME)malloc (MessageSizeInBytes, 3134 M_TEMP, M_WAITOK)) == NULL) { 3135 debug_usr_cmd_printf ("Failed to acquire frame[%d] memory\n", 3136 MessageSizeInBytes); 3137 return (ENOMEM); 3138 } 3139 if ((error = copyin ((caddr_t)Packet, (caddr_t)Message_Ptr, 3140 MessageSizeInBytes)) != 0) { 3141 free(Message_Ptr, M_TEMP); 3142 debug_usr_cmd_printf ("Can't copy in packet[%d] errno=%d\n", 3143 MessageSizeInBytes, error); 3144 return (error); 3145 } 3146 3147 /* Check the size of the reply frame, and start constructing */ 3148 3149 if ((Reply_Ptr = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)malloc ( 3150 sizeof(I2O_MESSAGE_FRAME), M_TEMP, M_WAITOK)) == NULL) { 3151 free(Message_Ptr, M_TEMP); 3152 debug_usr_cmd_printf ( 3153 "Failed to acquire I2O_MESSAGE_FRAME memory\n"); 3154 return (ENOMEM); 3155 } 3156 if ((error = copyin ((caddr_t)Reply, (caddr_t)Reply_Ptr, 3157 sizeof(I2O_MESSAGE_FRAME))) != 0) { 3158 free(Reply_Ptr, M_TEMP); 3159 free(Message_Ptr, M_TEMP); 3160 debug_usr_cmd_printf ( 3161 "Failed to copy in reply frame, errno=%d\n", 3162 error); 3163 return (error); 3164 } 3165 ReplySizeInBytes = (I2O_MESSAGE_FRAME_getMessageSize( 3166 &(Reply_Ptr->StdReplyFrame.StdMessageFrame)) << 2); 3167 free(Reply_Ptr, M_TEMP); 3168 if (ReplySizeInBytes < sizeof(I2O_SINGLE_REPLY_MESSAGE_FRAME)) { 3169 free(Message_Ptr, M_TEMP); 3170 debug_usr_cmd_printf ( 3171 "Failed to copy in reply frame[%d], errno=%d\n", 3172 ReplySizeInBytes, error); 3173 return (EINVAL); 3174 } 3175 3176 if ((Reply_Ptr = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)malloc ( 3177 ((ReplySizeInBytes > sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)) 3178 ? ReplySizeInBytes : sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)), 3179 M_TEMP, M_WAITOK)) == NULL) { 3180 free(Message_Ptr, M_TEMP); 3181 debug_usr_cmd_printf ("Failed to acquire frame[%d] memory\n", 3182 ReplySizeInBytes); 3183 return (ENOMEM); 3184 } 3185 (void)ASR_fillMessage((void *)Reply_Ptr, ReplySizeInBytes); 3186 Reply_Ptr->StdReplyFrame.StdMessageFrame.InitiatorContext 3187 = Message_Ptr->InitiatorContext; 3188 Reply_Ptr->StdReplyFrame.TransactionContext 3189 = ((PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr)->TransactionContext; 3190 I2O_MESSAGE_FRAME_setMsgFlags( 3191 &(Reply_Ptr->StdReplyFrame.StdMessageFrame), 3192 I2O_MESSAGE_FRAME_getMsgFlags( 3193 &(Reply_Ptr->StdReplyFrame.StdMessageFrame)) 3194 | I2O_MESSAGE_FLAGS_REPLY); 3195 3196 /* Check if the message is a special case command */ 3197 switch (I2O_MESSAGE_FRAME_getFunction(Message_Ptr)) { 3198 case I2O_EXEC_SYS_TAB_SET: /* Special Case of empty Scatter Gather */ 3199 if (MessageSizeInBytes == ((I2O_MESSAGE_FRAME_getVersionOffset( 3200 Message_Ptr) & 0xF0) >> 2)) { 3201 free(Message_Ptr, M_TEMP); 3202 I2O_SINGLE_REPLY_MESSAGE_FRAME_setDetailedStatusCode( 3203 &(Reply_Ptr->StdReplyFrame), 3204 (ASR_setSysTab(sc) != CAM_REQ_CMP)); 3205 I2O_MESSAGE_FRAME_setMessageSize( 3206 &(Reply_Ptr->StdReplyFrame.StdMessageFrame), 3207 sizeof(I2O_SINGLE_REPLY_MESSAGE_FRAME)); 3208 error = copyout ((caddr_t)Reply_Ptr, (caddr_t)Reply, 3209 ReplySizeInBytes); 3210 free(Reply_Ptr, M_TEMP); 3211 return (error); 3212 } 3213 } 3214 3215 /* Deal in the general case */ 3216 /* First allocate and optionally copy in each scatter gather element */ 3217 SLIST_INIT(&sgList); 3218 if ((I2O_MESSAGE_FRAME_getVersionOffset(Message_Ptr) & 0xF0) != 0) { 3219 PI2O_SGE_SIMPLE_ELEMENT sg; 3220 3221 /* 3222 * since this code is reused in several systems, code 3223 * efficiency is greater by using a shift operation rather 3224 * than a divide by sizeof(u_int32_t). 3225 */ 3226 sg = (PI2O_SGE_SIMPLE_ELEMENT)((char *)Message_Ptr 3227 + ((I2O_MESSAGE_FRAME_getVersionOffset(Message_Ptr) & 0xF0) 3228 >> 2)); 3229 while (sg < (PI2O_SGE_SIMPLE_ELEMENT)(((caddr_t)Message_Ptr) 3230 + MessageSizeInBytes)) { 3231 caddr_t v; 3232 int len; 3233 3234 if ((I2O_FLAGS_COUNT_getFlags(&(sg->FlagsCount)) 3235 & I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT) == 0) { 3236 error = EINVAL; 3237 break; 3238 } 3239 len = I2O_FLAGS_COUNT_getCount(&(sg->FlagsCount)); 3240 debug_usr_cmd_printf ("SG[%d] = %x[%d]\n", 3241 sg - (PI2O_SGE_SIMPLE_ELEMENT)((char *)Message_Ptr 3242 + ((I2O_MESSAGE_FRAME_getVersionOffset( 3243 Message_Ptr) & 0xF0) >> 2)), 3244 I2O_SGE_SIMPLE_ELEMENT_getPhysicalAddress(sg), len); 3245 3246 if ((elm = (struct ioctlSgList_S *)malloc ( 3247 sizeof(*elm) - sizeof(elm->KernelSpace) + len, 3248 M_TEMP, M_WAITOK)) == NULL) { 3249 debug_usr_cmd_printf ( 3250 "Failed to allocate SG[%d]\n", len); 3251 error = ENOMEM; 3252 break; 3253 } 3254 SLIST_INSERT_HEAD(&sgList, elm, link); 3255 elm->FlagsCount = sg->FlagsCount; 3256 elm->UserSpace = (caddr_t) 3257 (I2O_SGE_SIMPLE_ELEMENT_getPhysicalAddress(sg)); 3258 v = elm->KernelSpace; 3259 /* Copy in outgoing data (DIR bit could be invalid) */ 3260 if ((error = copyin (elm->UserSpace, (caddr_t)v, len)) 3261 != 0) { 3262 break; 3263 } 3264 /* 3265 * If the buffer is not contiguous, lets 3266 * break up the scatter/gather entries. 3267 */ 3268 while ((len > 0) 3269 && (sg < (PI2O_SGE_SIMPLE_ELEMENT) 3270 (((caddr_t)Message_Ptr) + MAX_INBOUND_SIZE))) { 3271 int next, base, span; 3272 3273 span = 0; 3274 next = base = KVTOPHYS(v); 3275 I2O_SGE_SIMPLE_ELEMENT_setPhysicalAddress(sg, 3276 base); 3277 3278 /* How far can we go physically contiguously */ 3279 while ((len > 0) && (base == next)) { 3280 int size; 3281 3282 next = trunc_page(base) + PAGE_SIZE; 3283 size = next - base; 3284 if (size > len) { 3285 size = len; 3286 } 3287 span += size; 3288 v += size; 3289 len -= size; 3290 base = KVTOPHYS(v); 3291 } 3292 3293 /* Construct the Flags */ 3294 I2O_FLAGS_COUNT_setCount(&(sg->FlagsCount), 3295 span); 3296 { 3297 int flags = I2O_FLAGS_COUNT_getFlags( 3298 &(elm->FlagsCount)); 3299 /* Any remaining length? */ 3300 if (len > 0) { 3301 flags &= 3302 ~(I2O_SGL_FLAGS_END_OF_BUFFER 3303 | I2O_SGL_FLAGS_LAST_ELEMENT); 3304 } 3305 I2O_FLAGS_COUNT_setFlags( 3306 &(sg->FlagsCount), flags); 3307 } 3308 3309 debug_usr_cmd_printf ("sg[%d] = %x[%d]\n", 3310 sg - (PI2O_SGE_SIMPLE_ELEMENT) 3311 ((char *)Message_Ptr 3312 + ((I2O_MESSAGE_FRAME_getVersionOffset( 3313 Message_Ptr) & 0xF0) >> 2)), 3314 I2O_SGE_SIMPLE_ELEMENT_getPhysicalAddress(sg), 3315 span); 3316 if (len <= 0) { 3317 break; 3318 } 3319 3320 /* 3321 * Incrementing requires resizing of the 3322 * packet, and moving up the existing SG 3323 * elements. 3324 */ 3325 ++sg; 3326 MessageSizeInBytes += sizeof(*sg); 3327 I2O_MESSAGE_FRAME_setMessageSize(Message_Ptr, 3328 I2O_MESSAGE_FRAME_getMessageSize(Message_Ptr) 3329 + (sizeof(*sg) / sizeof(U32))); 3330 { 3331 PI2O_MESSAGE_FRAME NewMessage_Ptr; 3332 3333 if ((NewMessage_Ptr 3334 = (PI2O_MESSAGE_FRAME) 3335 malloc (MessageSizeInBytes, 3336 M_TEMP, M_WAITOK)) == NULL) { 3337 debug_usr_cmd_printf ( 3338 "Failed to acquire frame[%d] memory\n", 3339 MessageSizeInBytes); 3340 error = ENOMEM; 3341 break; 3342 } 3343 span = ((caddr_t)sg) 3344 - (caddr_t)Message_Ptr; 3345 bcopy(Message_Ptr,NewMessage_Ptr, span); 3346 bcopy((caddr_t)(sg-1), 3347 ((caddr_t)NewMessage_Ptr) + span, 3348 MessageSizeInBytes - span); 3349 free(Message_Ptr, M_TEMP); 3350 sg = (PI2O_SGE_SIMPLE_ELEMENT) 3351 (((caddr_t)NewMessage_Ptr) + span); 3352 Message_Ptr = NewMessage_Ptr; 3353 } 3354 } 3355 if ((error) 3356 || ((I2O_FLAGS_COUNT_getFlags(&(sg->FlagsCount)) 3357 & I2O_SGL_FLAGS_LAST_ELEMENT) != 0)) { 3358 break; 3359 } 3360 ++sg; 3361 } 3362 if (error) { 3363 while ((elm = SLIST_FIRST(&sgList)) != NULL) { 3364 SLIST_REMOVE_HEAD(&sgList, link); 3365 free(elm, M_TEMP); 3366 } 3367 free(Reply_Ptr, M_TEMP); 3368 free(Message_Ptr, M_TEMP); 3369 return (error); 3370 } 3371 } 3372 3373 debug_usr_cmd_printf ("Inbound: "); 3374 debug_usr_cmd_dump_message(Message_Ptr); 3375 3376 /* Send the command */ 3377 if ((ccb = asr_alloc_ccb (sc)) == NULL) { 3378 /* Free up in-kernel buffers */ 3379 while ((elm = SLIST_FIRST(&sgList)) != NULL) { 3380 SLIST_REMOVE_HEAD(&sgList, link); 3381 free(elm, M_TEMP); 3382 } 3383 free(Reply_Ptr, M_TEMP); 3384 free(Message_Ptr, M_TEMP); 3385 return (ENOMEM); 3386 } 3387 3388 /* 3389 * We do not need any (optional byteswapping) method access to 3390 * the Initiator context field. 3391 */ 3392 I2O_MESSAGE_FRAME_setInitiatorContext64( 3393 (PI2O_MESSAGE_FRAME)Message_Ptr, (long)ccb); 3394 3395 (void)ASR_queue (sc, (PI2O_MESSAGE_FRAME)Message_Ptr); 3396 3397 free(Message_Ptr, M_TEMP); 3398 3399 /* 3400 * Wait for the board to report a finished instruction. 3401 */ 3402 s = splcam(); 3403 while ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) { 3404 if (ASR_getBlinkLedCode(sc)) { 3405 /* Reset Adapter */ 3406 printf ("asr%d: Blink LED 0x%x resetting adapter\n", 3407 cam_sim_unit(xpt_path_sim(ccb->ccb_h.path)), 3408 ASR_getBlinkLedCode(sc)); 3409 if (ASR_reset (sc) == ENXIO) { 3410 /* Command Cleanup */ 3411 ASR_ccbRemove(sc, ccb); 3412 } 3413 splx(s); 3414 /* Free up in-kernel buffers */ 3415 while ((elm = SLIST_FIRST(&sgList)) != NULL) { 3416 SLIST_REMOVE_HEAD(&sgList, link); 3417 free(elm, M_TEMP); 3418 } 3419 free(Reply_Ptr, M_TEMP); 3420 asr_free_ccb(ccb); 3421 return (EIO); 3422 } 3423 /* Check every second for BlinkLed */ 3424 /* There is no PRICAM, but outwardly PRIBIO is functional */ 3425 tsleep(ccb, PRIBIO, "asr", hz); 3426 } 3427 splx(s); 3428 3429 debug_usr_cmd_printf ("Outbound: "); 3430 debug_usr_cmd_dump_message(Reply_Ptr); 3431 3432 I2O_SINGLE_REPLY_MESSAGE_FRAME_setDetailedStatusCode( 3433 &(Reply_Ptr->StdReplyFrame), 3434 (ccb->ccb_h.status != CAM_REQ_CMP)); 3435 3436 if (ReplySizeInBytes >= (sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME) 3437 - I2O_SCSI_SENSE_DATA_SZ - sizeof(U32))) { 3438 I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME_setTransferCount(Reply_Ptr, 3439 ccb->csio.dxfer_len - ccb->csio.resid); 3440 } 3441 if ((ccb->ccb_h.status & CAM_AUTOSNS_VALID) && (ReplySizeInBytes 3442 > (sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME) 3443 - I2O_SCSI_SENSE_DATA_SZ))) { 3444 int size = ReplySizeInBytes 3445 - sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME) 3446 - I2O_SCSI_SENSE_DATA_SZ; 3447 3448 if (size > sizeof(ccb->csio.sense_data)) { 3449 size = sizeof(ccb->csio.sense_data); 3450 } 3451 bcopy(&(ccb->csio.sense_data), Reply_Ptr->SenseData, size); 3452 I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME_setAutoSenseTransferCount( 3453 Reply_Ptr, size); 3454 } 3455 3456 /* Free up in-kernel buffers */ 3457 while ((elm = SLIST_FIRST(&sgList)) != NULL) { 3458 /* Copy out as necessary */ 3459 if ((error == 0) 3460 /* DIR bit considered `valid', error due to ignorance works */ 3461 && ((I2O_FLAGS_COUNT_getFlags(&(elm->FlagsCount)) 3462 & I2O_SGL_FLAGS_DIR) == 0)) { 3463 error = copyout((caddr_t)(elm->KernelSpace), 3464 elm->UserSpace, 3465 I2O_FLAGS_COUNT_getCount(&(elm->FlagsCount))); 3466 } 3467 SLIST_REMOVE_HEAD(&sgList, link); 3468 free(elm, M_TEMP); 3469 } 3470 if (error == 0) { 3471 /* Copy reply frame to user space */ 3472 error = copyout((caddr_t)Reply_Ptr, (caddr_t)Reply, 3473 ReplySizeInBytes); 3474 } 3475 free(Reply_Ptr, M_TEMP); 3476 asr_free_ccb(ccb); 3477 3478 return (error); 3479} /* ASR_queue_i */ 3480 3481/*----------------------------------------------------------------------*/ 3482/* Function asr_ioctl */ 3483/*----------------------------------------------------------------------*/ 3484/* The parameters passed to this function are : */ 3485/* dev : Device number. */ 3486/* cmd : Ioctl Command */ 3487/* data : User Argument Passed In. */ 3488/* flag : Mode Parameter */ 3489/* proc : Process Parameter */ 3490/* */ 3491/* This function is the user interface into this adapter driver */ 3492/* */ 3493/* Return : zero if OK, error code if not */ 3494/*----------------------------------------------------------------------*/ 3495 3496static int 3497asr_ioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct thread *td) 3498{ 3499 Asr_softc_t *sc = dev->si_drv1; 3500 int i, error = 0; 3501 3502 if (sc != NULL) 3503 switch(cmd) { 3504 3505 case DPT_SIGNATURE: 3506 return (copyout((caddr_t)(&ASR_sig), *((caddr_t *)data), 3507 sizeof(dpt_sig_S))); 3508 3509 /* Traditional version of the ioctl interface */ 3510 case DPT_CTRLINFO & 0x0000FFFF: 3511 case DPT_CTRLINFO: { 3512 struct { 3513 u_int16_t length; 3514 u_int16_t drvrHBAnum; 3515 u_int32_t baseAddr; 3516 u_int16_t blinkState; 3517 u_int8_t pciBusNum; 3518 u_int8_t pciDeviceNum; 3519 u_int16_t hbaFlags; 3520 u_int16_t Interrupt; 3521 u_int32_t reserved1; 3522 u_int32_t reserved2; 3523 u_int32_t reserved3; 3524 } CtlrInfo; 3525 3526 bzero(&CtlrInfo, sizeof(CtlrInfo)); 3527 CtlrInfo.length = sizeof(CtlrInfo) - sizeof(u_int16_t); 3528 CtlrInfo.drvrHBAnum = asr_unit(dev); 3529 CtlrInfo.baseAddr = sc->ha_Base; 3530 i = ASR_getBlinkLedCode (sc); 3531 if (i == -1) 3532 i = 0; 3533 3534 CtlrInfo.blinkState = i; 3535 CtlrInfo.pciBusNum = sc->ha_pciBusNum; 3536 CtlrInfo.pciDeviceNum = sc->ha_pciDeviceNum; 3537#define FLG_OSD_PCI_VALID 0x0001 3538#define FLG_OSD_DMA 0x0002 3539#define FLG_OSD_I2O 0x0004 3540 CtlrInfo.hbaFlags = FLG_OSD_PCI_VALID|FLG_OSD_DMA|FLG_OSD_I2O; 3541 CtlrInfo.Interrupt = sc->ha_irq; 3542 error = copyout(&CtlrInfo, *(caddr_t *)data, sizeof(CtlrInfo)); 3543 } return (error); 3544 3545 /* Traditional version of the ioctl interface */ 3546 case DPT_SYSINFO & 0x0000FFFF: 3547 case DPT_SYSINFO: { 3548 sysInfo_S Info; 3549 3550 bzero(&Info, sizeof(Info)); 3551 3552 Info.processorFamily = ASR_sig.dsProcessorFamily; 3553#if defined(__i386__) 3554 switch (cpu) { 3555 case CPU_386SX: case CPU_386: 3556 Info.processorType = PROC_386; break; 3557 case CPU_486SX: case CPU_486: 3558 Info.processorType = PROC_486; break; 3559 case CPU_586: 3560 Info.processorType = PROC_PENTIUM; break; 3561 case CPU_686: 3562 Info.processorType = PROC_SEXIUM; break; 3563 } 3564#elif defined(__alpha__) 3565 Info.processorType = PROC_ALPHA; 3566#endif 3567 3568 Info.osType = OS_BSDI_UNIX; 3569 Info.osMajorVersion = osrelease[0] - '0'; 3570 Info.osMinorVersion = osrelease[2] - '0'; 3571 /* Info.osRevision = 0; */ 3572 /* Info.osSubRevision = 0; */ 3573 Info.busType = SI_PCI_BUS; 3574 Info.flags = SI_OSversionValid|SI_BusTypeValid|SI_NO_SmartROM; 3575 3576 error = copyout(&Info, *(caddr_t *)data, sizeof(Info)); 3577 return (error); } 3578 3579 /* Get The BlinkLED State */ 3580 case DPT_BLINKLED: 3581 i = ASR_getBlinkLedCode (sc); 3582 if (i == -1) { 3583 i = 0; 3584 } 3585 error = copyout(&i, *(caddr_t *)data, sizeof(i)); 3586 break; 3587 3588 /* Send an I2O command */ 3589 case I2OUSRCMD: 3590 return (ASR_queue_i(sc, *((PI2O_MESSAGE_FRAME *)data))); 3591 3592 /* Reset and re-initialize the adapter */ 3593 case I2ORESETCMD: 3594 return (ASR_reset(sc)); 3595 3596 /* Rescan the LCT table and resynchronize the information */ 3597 case I2ORESCANCMD: 3598 return (ASR_rescan(sc)); 3599 } 3600 return (EINVAL); 3601} /* asr_ioctl */ 3602