asr.c revision 66190
1/* $FreeBSD: head/sys/dev/asr/asr.c 66190 2000-09-21 20:21:30Z msmith $ */ 2/* 3 * Copyright (c) 1996-2000 Distributed Processing Technology Corporation 4 * Copyright (c) 2000 Adaptec Corporation 5 * All rights reserved. 6 * 7 * TERMS AND CONDITIONS OF USE 8 * 9 * Redistribution and use in source form, with or without modification, are 10 * permitted provided that redistributions of source code must retain the 11 * above copyright notice, this list of conditions and the following disclaimer. 12 * 13 * This software is provided `as is' by Adaptec and any express or implied 14 * warranties, including, but not limited to, the implied warranties of 15 * merchantability and fitness for a particular purpose, are disclaimed. In no 16 * event shall Adaptec be liable for any direct, indirect, incidental, special, 17 * exemplary or consequential damages (including, but not limited to, 18 * procurement of substitute goods or services; loss of use, data, or profits; 19 * or business interruptions) however caused and on any theory of liability, 20 * whether in contract, strict liability, or tort (including negligence or 21 * otherwise) arising in any way out of the use of this driver software, even 22 * if advised of the possibility of such damage. 23 * 24 * SCSI I2O host adapter driver 25 * 26 * V1.03 2000/07/12 Mark_Salyzyn@adaptec.com 27 * - The controller is not actually an ASR (Adaptec SCSI RAID) 28 * series that is visible, it's more of an internal code name. 29 * remove any visible references within reason for now. 30 * - bus_ptr->LUN was not correctly zeroed when initially 31 * allocated causing a possible panic of the operating system 32 * during boot. 33 * V1.02 2000/06/26 Mark_Salyzyn@adaptec.com 34 * - Code always fails for ASR_getTid affecting performance. 35 * - initiated a set of changes that resulted from a formal 36 * code inspection by Mark_Salyzyn@adaptec.com, 37 * George_Dake@adaptec.com, Jeff_Zeak@adaptec.com, 38 * Martin_Wilson@adaptec.com and Vincent_Trandoan@adaptec.com. 39 * Their findings were focussed on the LCT & TID handler, and 40 * all resulting changes were to improve code readability, 41 * consistency or have a positive effect on performance. 42 * V1.01 2000/06/14 Mark_Salyzyn@adaptec.com 43 * - Passthrough returned an incorrect error. 44 * - Passthrough did not migrate the intrinsic scsi layer wakeup 45 * on command completion. 46 * - generate control device nodes using make_dev and delete_dev. 47 * - Performance affected by TID caching reallocing. 48 * - Made suggested changes by Justin_Gibbs@adaptec.com 49 * - use splcam instead of splbio. 50 * - use cam_imask instead of bio_imask. 51 * - use u_int8_t instead of u_char. 52 * - use u_int16_t instead of u_short. 53 * - use u_int32_t instead of u_long where appropriate. 54 * - use 64 bit context handler instead of 32 bit. 55 * - create_ccb should only allocate the worst case 56 * requirements for the driver since CAM may evolve 57 * making union ccb much larger than needed here. 58 * renamed create_ccb to asr_alloc_ccb. 59 * - go nutz justifying all debug prints as macros 60 * defined at the top and remove unsightly ifdefs. 61 * - INLINE STATIC viewed as confusing. Historically 62 * utilized to affect code performance and debug 63 * issues in OS, Compiler or OEM specific situations. 64 * V1.00 2000/05/31 Mark_Salyzyn@adaptec.com 65 * - Ported from FreeBSD 2.2.X DPT I2O driver. 66 * changed struct scsi_xfer to union ccb/struct ccb_hdr 67 * changed variable name xs to ccb 68 * changed struct scsi_link to struct cam_path 69 * changed struct scsibus_data to struct cam_sim 70 * stopped using fordriver for holding on to the TID 71 * use proprietary packet creation instead of scsi_inquire 72 * CAM layer sends synchronize commands. 73 */ 74 75#define ASR_VERSION 1 76#define ASR_REVISION '0' 77#define ASR_SUBREVISION '3' 78#define ASR_MONTH 7 79#define ASR_DAY 12 80#define ASR_YEAR 2000 - 1980 81 82/* 83 * Debug macros to resude the unsightly ifdefs 84 */ 85#if (defined(DEBUG_ASR) || defined(DEBUG_ASR_USR_CMD) || defined(DEBUG_ASR_CMD)) 86# define debug_asr_message(message) \ 87 { \ 88 u_int32_t * pointer = (u_int32_t *)message; \ 89 u_int32_t length = I2O_MESSAGE_FRAME_getMessageSize(message);\ 90 u_int32_t counter = 0; \ 91 \ 92 while (length--) { \ 93 printf ("%08lx%c", (u_long)*(pointer++), \ 94 (((++counter & 7) == 0) || (length == 0)) \ 95 ? '\n' \ 96 : ' '); \ 97 } \ 98 } 99#endif /* DEBUG_ASR || DEBUG_ASR_USR_CMD || DEBUG_ASR_CMD */ 100 101#if (defined(DEBUG_ASR)) 102 /* Breaks on none STDC based compilers :-( */ 103# define debug_asr_printf(fmt,args...) printf(fmt, ##args) 104# define debug_asr_dump_message(message) debug_asr_message(message) 105# define debug_asr_print_path(ccb) xpt_print_path(ccb->ccb_h.path); 106 /* None fatal version of the ASSERT macro */ 107# if (defined(__STDC__)) 108# define ASSERT(phrase) if(!(phrase))printf(#phrase " at line %d file %s\n",__LINE__,__FILE__) 109# else 110# define ASSERT(phrase) if(!(phrase))printf("phrase" " at line %d file %s\n",__LINE__,__FILE__) 111# endif 112#else /* DEBUG_ASR */ 113# define debug_asr_printf(fmt,args...) 114# define debug_asr_dump_message(message) 115# define debug_asr_print_path(ccb) 116# define ASSERT(x) 117#endif /* DEBUG_ASR */ 118 119/* 120 * If DEBUG_ASR_CMD is defined: 121 * 0 - Display incoming SCSI commands 122 * 1 - add in a quick character before queueing. 123 * 2 - add in outgoing message frames. 124 */ 125#if (defined(DEBUG_ASR_CMD)) 126# define debug_asr_cmd_printf(fmt,args...) printf(fmt,##args) 127# define debug_asr_dump_ccb(ccb) \ 128 { \ 129 u_int8_t * cp = (unsigned char *)&(ccb->csio.cdb_io); \ 130 int len = ccb->csio.cdb_len; \ 131 \ 132 while (len) { \ 133 debug_asr_cmd_printf (" %02x", *(cp++)); \ 134 --len; \ 135 } \ 136 } 137# if (DEBUG_ASR_CMD > 0) 138# define debug_asr_cmd1_printf debug_asr_cmd_printf 139# else 140# define debug_asr_cmd1_printf(fmt,args...) 141# endif 142# if (DEBUG_ASR_CMD > 1) 143# define debug_asr_cmd2_printf debug_asr_cmd_printf 144# define debug_asr_cmd2_dump_message(message) debug_asr_message(message) 145# else 146# define debug_asr_cmd2_printf(fmt,args...) 147# define debug_asr_cmd2_dump_message(message) 148# endif 149#else /* DEBUG_ASR_CMD */ 150# define debug_asr_cmd_printf(fmt,args...) 151# define debug_asr_cmd_dump_ccb(ccb) 152# define debug_asr_cmd1_printf(fmt,args...) 153# define debug_asr_cmd2_printf(fmt,args...) 154# define debug_asr_cmd2_dump_message(message) 155#endif /* DEBUG_ASR_CMD */ 156 157#if (defined(DEBUG_ASR_USR_CMD)) 158# define debug_usr_cmd_printf(fmt,args...) printf(fmt,##args) 159# define debug_usr_cmd_dump_message(message) debug_usr_message(message) 160#else /* DEBUG_ASR_USR_CMD */ 161# define debug_usr_cmd_printf(fmt,args...) 162# define debug_usr_cmd_dump_message(message) 163#endif /* DEBUG_ASR_USR_CMD */ 164 165#define dsDescription_size 46 /* Snug as a bug in a rug */ 166#include "dev/asr/dptsig.h" 167 168static dpt_sig_S ASR_sig = { 169 { 'd', 'P', 't', 'S', 'i', 'G'}, SIG_VERSION, PROC_INTEL, 170 PROC_386 | PROC_486 | PROC_PENTIUM | PROC_SEXIUM, FT_HBADRVR, 0, 171 OEM_DPT, OS_FREE_BSD, CAP_ABOVE16MB, DEV_ALL, 172 ADF_ALL_SC5, 173 0, 0, ASR_VERSION, ASR_REVISION, ASR_SUBREVISION, 174 ASR_MONTH, ASR_DAY, ASR_YEAR, 175/* 01234567890123456789012345678901234567890123456789 < 50 chars */ 176 "Adaptec FreeBSD 4.0.0 Unix SCSI I2O HBA Driver" 177 /* ^^^^^ asr_attach alters these to match OS */ 178}; 179 180#include <sys/param.h> /* TRUE=1 and FALSE=0 defined here */ 181#include <sys/kernel.h> 182#include <sys/systm.h> 183#include <sys/malloc.h> 184#include <sys/proc.h> 185#include <sys/conf.h> 186#include <sys/disklabel.h> 187#include <sys/bus.h> 188#include <machine/resource.h> 189#include <machine/bus.h> 190#include <sys/rman.h> 191#include <sys/stat.h> 192 193#include <cam/cam.h> 194#include <cam/cam_ccb.h> 195#include <cam/cam_sim.h> 196#include <cam/cam_xpt_sim.h> 197#include <cam/cam_xpt_periph.h> 198 199#include <cam/scsi/scsi_all.h> 200#include <cam/scsi/scsi_message.h> 201 202#include <vm/vm.h> 203#include <vm/pmap.h> 204#include <machine/cputypes.h> 205#include <machine/clock.h> 206#include <i386/include/vmparam.h> 207 208#include <pci/pcivar.h> 209#include <pci/pcireg.h> 210 211#define STATIC static 212#define INLINE 213 214#if (defined(DEBUG_ASR) && (DEBUG_ASR > 0)) 215# undef STATIC 216# define STATIC 217# undef INLINE 218# define INLINE 219#endif 220#define IN 221#define OUT 222#define INOUT 223 224#define osdSwap4(x) ((u_long)ntohl((u_long)(x))) 225#define KVTOPHYS(x) vtophys(x) 226#include "dev/asr/dptalign.h" 227#include "dev/asr/i2oexec.h" 228#include "dev/asr/i2obscsi.h" 229#include "dev/asr/i2odpt.h" 230#include "dev/asr/i2oadptr.h" 231#include "opt_asr.h" 232 233#include "dev/asr/sys_info.h" 234 235/* Configuration Definitions */ 236 237#define SG_SIZE 58 /* Scatter Gather list Size */ 238#define MAX_TARGET_ID 126 /* Maximum Target ID supported */ 239#define MAX_LUN 255 /* Maximum LUN Supported */ 240#define MAX_CHANNEL 7 /* Maximum Channel # Supported by driver */ 241#define MAX_INBOUND 2000 /* Max CCBs, Also Max Queue Size */ 242#define MAX_OUTBOUND 256 /* Maximum outbound frames/adapter */ 243#define MAX_INBOUND_SIZE 512 /* Maximum inbound frame size */ 244#define MAX_MAP 4194304L /* Maximum mapping size of IOP */ 245 246/************************************************************************** 247** ASR Host Adapter structure - One Structure For Each Host Adapter That ** 248** Is Configured Into The System. The Structure Supplies Configuration ** 249** Information, Status Info, Queue Info And An Active CCB List Pointer. ** 250***************************************************************************/ 251 252/* I2O register set */ 253typedef struct { 254 U8 Address[0x30]; 255 volatile U32 Status; 256 volatile U32 Mask; 257# define Mask_InterruptsDisabled 0x08 258 U32 x[2]; 259 volatile U32 ToFIFO; /* In Bound FIFO */ 260 volatile U32 FromFIFO; /* Out Bound FIFO */ 261} i2oRegs_t; 262 263/* 264 * A MIX of performance and space considerations for TID lookups 265 */ 266typedef u_int16_t tid_t; 267 268typedef struct { 269 u_int32_t size; /* up to MAX_LUN */ 270 tid_t TID[1]; 271} lun2tid_t; 272 273typedef struct { 274 u_int32_t size; /* up to MAX_TARGET */ 275 lun2tid_t * LUN[1]; 276} target2lun_t; 277 278/* 279 * To ensure that we only allocate and use the worst case ccb here, lets 280 * make our own local ccb union. If asr_alloc_ccb is utilized for another 281 * ccb type, ensure that you add the additional structures into our local 282 * ccb union. To ensure strict type checking, we will utilize the local 283 * ccb definition wherever possible. 284 */ 285union asr_ccb { 286 struct ccb_hdr ccb_h; /* For convenience */ 287 struct ccb_scsiio csio; 288 struct ccb_setasync csa; 289}; 290 291typedef struct Asr_softc { 292 u_int16_t ha_irq; 293 void * ha_Base; /* base port for each board */ 294 u_int8_t * volatile ha_blinkLED; 295 i2oRegs_t * ha_Virt; /* Base address of adapter */ 296 I2O_IOP_ENTRY ha_SystemTable; 297 LIST_HEAD(,ccb_hdr) ha_ccb; /* ccbs in use */ 298 struct cam_path * ha_path[MAX_CHANNEL+1]; 299 struct cam_sim * ha_sim[MAX_CHANNEL+1]; 300#if __FreeBSD_version >= 400000 301 struct resource * ha_mem_res; 302 struct resource * ha_irq_res; 303 void * ha_intr; 304#endif 305 u_int8_t ha_adapter_target[MAX_CHANNEL+1]; 306 PI2O_LCT ha_LCT; /* Complete list of devices */ 307# define le_type IdentityTag[0] 308# define I2O_BSA 0x20 309# define I2O_FCA 0x40 310# define I2O_SCSI 0x00 311# define I2O_PORT 0x80 312# define I2O_UNKNOWN 0x7F 313# define le_bus IdentityTag[1] 314# define le_target IdentityTag[2] 315# define le_lun IdentityTag[3] 316 target2lun_t * ha_targets[MAX_CHANNEL+1]; 317 PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME ha_Msgs; 318 u_long ha_Msgs_Phys; 319 u_int16_t ha_Msgs_Count; 320 321 /* Configuration information */ 322 /* The target id maximums we take */ 323 u_int8_t ha_MaxBus; /* Maximum bus */ 324 u_int8_t ha_MaxId; /* Maximum target ID */ 325 u_int8_t ha_MaxLun; /* Maximum target LUN */ 326 u_int8_t ha_SgSize; /* Max SG elements */ 327 u_int8_t ha_pciBusNum; 328 u_int8_t ha_pciDeviceNum; 329 u_int16_t ha_QueueSize; /* Max outstanding commands */ 330 331 /* Links into other parents and HBAs */ 332 struct Asr_softc * ha_next; /* HBA list */ 333 334#ifdef ASR_MEASURE_PERFORMANCE 335#define MAX_TIMEQ_SIZE 256 // assumes MAX 256 scsi commands sent 336 asr_perf_t ha_performance; 337 u_int32_t ha_submitted_ccbs_count; 338 339 // Queueing macros for a circular queue 340#define TIMEQ_FREE_LIST_EMPTY(head, tail) (-1 == (head) && -1 == (tail)) 341#define TIMEQ_FREE_LIST_FULL(head, tail) ((((tail) + 1) % MAX_TIMEQ_SIZE) == (head)) 342#define ENQ_TIMEQ_FREE_LIST(item, Q, head, tail) \ 343 if (!TIMEQ_FREE_LIST_FULL((head), (tail))) { \ 344 if TIMEQ_FREE_LIST_EMPTY((head),(tail)) { \ 345 (head) = (tail) = 0; \ 346 } \ 347 else (tail) = ((tail) + 1) % MAX_TIMEQ_SIZE; \ 348 Q[(tail)] = (item); \ 349 } \ 350 else { \ 351 debug_asr_printf("asr: Enqueueing when TimeQ Free List is full... This should not happen!\n"); \ 352 } 353#define DEQ_TIMEQ_FREE_LIST(item, Q, head, tail) \ 354 if (!TIMEQ_FREE_LIST_EMPTY((head), (tail))) { \ 355 item = Q[(head)]; \ 356 if ((head) == (tail)) { (head) = (tail) = -1; } \ 357 else (head) = ((head) + 1) % MAX_TIMEQ_SIZE; \ 358 } \ 359 else { \ 360 (item) = -1; \ 361 debug_asr_printf("asr: Dequeueing when TimeQ Free List is empty... This should not happen!\n"); \ 362 } 363 364 // Circular queue of time stamps 365 struct timeval ha_timeQ[MAX_TIMEQ_SIZE]; 366 u_int32_t ha_timeQFreeList[MAX_TIMEQ_SIZE]; 367 int ha_timeQFreeHead; 368 int ha_timeQFreeTail; 369#endif 370} Asr_softc_t; 371 372STATIC Asr_softc_t * Asr_softc; 373 374/* 375 * Prototypes of the routines we have in this object. 376 */ 377 378/* Externally callable routines */ 379#if __FreeBSD_version >= 400000 380#define PROBE_ARGS IN device_t tag 381#define PROBE_RET int 382#define PROBE_SET() u_long id = (pci_get_device(tag)<<16)|pci_get_vendor(tag) 383#define PROBE_RETURN(retval) if(retval){device_set_desc(tag,retval);return(0);}else{return(ENXIO);} 384#define ATTACH_ARGS IN device_t tag 385#define ATTACH_RET int 386#define ATTACH_SET() int unit = device_get_unit(tag) 387#define ATTACH_RETURN(retval) return(retval) 388#else 389#define PROBE_ARGS IN pcici_t tag, IN pcidi_t id 390#define PROBE_RET const char * 391#define PROBE_SET() 392#define PROBE_RETURN(retval) return(retval) 393#define ATTACH_ARGS IN pcici_t tag, IN int unit 394#define ATTACH_RET void 395#define ATTACH_SET() 396#define ATTACH_RETURN(retval) return 397#endif 398/* I2O HDM interface */ 399STATIC PROBE_RET asr_probe __P((PROBE_ARGS)); 400STATIC ATTACH_RET asr_attach __P((ATTACH_ARGS)); 401/* DOMINO placeholder */ 402STATIC PROBE_RET domino_probe __P((PROBE_ARGS)); 403STATIC ATTACH_RET domino_attach __P((ATTACH_ARGS)); 404/* MODE0 adapter placeholder */ 405STATIC PROBE_RET mode0_probe __P((PROBE_ARGS)); 406STATIC ATTACH_RET mode0_attach __P((ATTACH_ARGS)); 407 408STATIC Asr_softc_t * ASR_get_sc __P(( 409 IN dev_t dev)); 410STATIC int asr_ioctl __P(( 411 IN dev_t dev, 412 IN u_long cmd, 413 INOUT caddr_t data, 414 int flag, 415 struct proc * proc)); 416STATIC int asr_open __P(( 417 IN dev_t dev, 418 int32_t flags, 419 int32_t ifmt, 420 IN struct proc * proc)); 421STATIC int asr_close __P(( 422 dev_t dev, 423 int flags, 424 int ifmt, 425 struct proc * proc)); 426STATIC int asr_intr __P(( 427 IN Asr_softc_t * sc)); 428STATIC void asr_timeout __P(( 429 INOUT void * arg)); 430STATIC int ASR_init __P(( 431 IN Asr_softc_t * sc)); 432STATIC INLINE int ASR_acquireLct __P(( 433 INOUT Asr_softc_t * sc)); 434STATIC INLINE int ASR_acquireHrt __P(( 435 INOUT Asr_softc_t * sc)); 436STATIC void asr_action __P(( 437 IN struct cam_sim * sim, 438 IN union ccb * ccb)); 439STATIC void asr_async __P(( 440 void * callback_arg, 441 u_int32_t code, 442 struct cam_path * path, 443 void * arg)); 444STATIC void asr_poll __P(( 445 IN struct cam_sim * sim)); 446 447/* 448 * Here is the auto-probe structure used to nest our tests appropriately 449 * during the startup phase of the operating system. 450 */ 451#if __FreeBSD_version >= 400000 452STATIC device_method_t asr_methods[] = { 453 DEVMETHOD(device_probe, asr_probe), 454 DEVMETHOD(device_attach, asr_attach), 455 { 0, 0 } 456}; 457 458STATIC driver_t asr_driver = { 459 "asr", 460 asr_methods, 461 sizeof(Asr_softc_t) 462}; 463 464STATIC devclass_t asr_devclass; 465 466DRIVER_MODULE(asr, pci, asr_driver, asr_devclass, 0, 0); 467 468STATIC device_method_t domino_methods[] = { 469 DEVMETHOD(device_probe, domino_probe), 470 DEVMETHOD(device_attach, domino_attach), 471 { 0, 0 } 472}; 473 474STATIC driver_t domino_driver = { 475 "domino", 476 domino_methods, 477 0 478}; 479 480STATIC devclass_t domino_devclass; 481 482DRIVER_MODULE(domino, pci, domino_driver, domino_devclass, 0, 0); 483 484STATIC device_method_t mode0_methods[] = { 485 DEVMETHOD(device_probe, mode0_probe), 486 DEVMETHOD(device_attach, mode0_attach), 487 { 0, 0 } 488}; 489 490STATIC driver_t mode0_driver = { 491 "mode0", 492 mode0_methods, 493 0 494}; 495 496STATIC devclass_t mode0_devclass; 497 498DRIVER_MODULE(mode0, pci, mode0_driver, mode0_devclass, 0, 0); 499#else 500STATIC u_long asr_pcicount = 0; 501STATIC struct pci_device asr_pcidev = { 502 "asr", 503 asr_probe, 504 asr_attach, 505 &asr_pcicount, 506 NULL 507}; 508DATA_SET (asr_pciset, asr_pcidev); 509 510STATIC u_long domino_pcicount = 0; 511STATIC struct pci_device domino_pcidev = { 512 "domino", 513 domino_probe, 514 domino_attach, 515 &domino_pcicount, 516 NULL 517}; 518DATA_SET (domino_pciset, domino_pcidev); 519 520STATIC u_long mode0_pcicount = 0; 521STATIC struct pci_device mode0_pcidev = { 522 "mode0", 523 mode0_probe, 524 mode0_attach, 525 &mode0_pcicount, 526 NULL 527}; 528DATA_SET (mode0_pciset, mode0_pcidev); 529#endif 530 531/* 532 * devsw for asr hba driver 533 * 534 * only ioctl is used. the sd driver provides all other access. 535 */ 536#define CDEV_MAJOR 154 /* prefered default character major */ 537STATIC struct cdevsw asr_cdevsw = { 538 asr_open, /* open */ 539 asr_close, /* close */ 540 noread, /* read */ 541 nowrite, /* write */ 542 asr_ioctl, /* ioctl */ 543 nopoll, /* poll */ 544 nommap, /* mmap */ 545 nostrategy, /* strategy */ 546 "asr", /* name */ 547 CDEV_MAJOR, /* maj */ 548 nodump, /* dump */ 549 nopsize, /* psize */ 550 0, /* flags */ 551 -1 /* bmaj */ 552}; 553 554#ifdef ASR_MEASURE_PERFORMANCE 555STATIC u_int32_t asr_time_delta __P((IN struct timeval start, 556 IN struct timeval end)); 557#endif 558 559/* 560 * Initialize the dynamic cdevsw hooks. 561 */ 562STATIC void 563asr_drvinit ( 564 void * unused) 565{ 566 static int asr_devsw_installed = 0; 567 568 if (asr_devsw_installed) { 569 return; 570 } 571 asr_devsw_installed++; 572 /* 573 * Find a free spot (the report during driver load used by 574 * osd layer in engine to generate the controlling nodes). 575 */ 576 while ((asr_cdevsw.d_maj < NUMCDEVSW) 577 && (devsw(makedev(asr_cdevsw.d_maj,0)) != (struct cdevsw *)NULL)) { 578 ++asr_cdevsw.d_maj; 579 } 580 if (asr_cdevsw.d_maj >= NUMCDEVSW) for ( 581 asr_cdevsw.d_maj = 0; 582 (asr_cdevsw.d_maj < CDEV_MAJOR) 583 && (devsw(makedev(asr_cdevsw.d_maj,0)) != (struct cdevsw *)NULL); 584 ++asr_cdevsw.d_maj); 585 /* 586 * Come to papa 587 */ 588 cdevsw_add(&asr_cdevsw); 589 /* 590 * delete any nodes that would attach to the primary adapter, 591 * let the adapter scans add them. 592 */ 593 destroy_dev(makedev(asr_cdevsw.d_maj,0)); 594} /* asr_drvinit */ 595 596/* Must initialize before CAM layer picks up our HBA driver */ 597SYSINIT(asrdev,SI_SUB_DRIVERS,SI_ORDER_MIDDLE+CDEV_MAJOR,asr_drvinit,NULL) 598 599/* I2O support routines */ 600#define defAlignLong(STRUCT,NAME) char NAME[sizeof(STRUCT)] 601#define getAlignLong(STRUCT,NAME) ((STRUCT *)(NAME)) 602 603/* 604 * Fill message with default. 605 */ 606STATIC PI2O_MESSAGE_FRAME 607ASR_fillMessage ( 608 IN char * Message, 609 IN u_int16_t size) 610{ 611 OUT PI2O_MESSAGE_FRAME Message_Ptr; 612 613 Message_Ptr = getAlignLong(I2O_MESSAGE_FRAME, Message); 614 bzero ((void *)Message_Ptr, size); 615 I2O_MESSAGE_FRAME_setVersionOffset(Message_Ptr, I2O_VERSION_11); 616 I2O_MESSAGE_FRAME_setMessageSize(Message_Ptr, 617 (size + sizeof(U32) - 1) >> 2); 618 I2O_MESSAGE_FRAME_setInitiatorAddress (Message_Ptr, 1); 619 return (Message_Ptr); 620} /* ASR_fillMessage */ 621 622#define EMPTY_QUEUE ((U32)-1L) 623 624STATIC INLINE U32 625ASR_getMessage( 626 IN i2oRegs_t * virt) 627{ 628 OUT U32 MessageOffset; 629 630 if ((MessageOffset = virt->ToFIFO) == EMPTY_QUEUE) { 631 MessageOffset = virt->ToFIFO; 632 } 633 return (MessageOffset); 634} /* ASR_getMessage */ 635 636/* Issue a polled command */ 637STATIC U32 638ASR_initiateCp ( 639 INOUT i2oRegs_t * virt, 640 IN PI2O_MESSAGE_FRAME Message) 641{ 642 OUT U32 Mask = -1L; 643 U32 MessageOffset; 644 u_int Delay = 1500; 645 646 /* 647 * ASR_initiateCp is only used for synchronous commands and will 648 * be made more resiliant to adapter delays since commands like 649 * resetIOP can cause the adapter to be deaf for a little time. 650 */ 651 while (((MessageOffset = ASR_getMessage(virt)) == EMPTY_QUEUE) 652 && (--Delay != 0)) { 653 DELAY (10000); 654 } 655 if (MessageOffset != EMPTY_QUEUE) { 656 bcopy (Message, virt->Address + MessageOffset, 657 I2O_MESSAGE_FRAME_getMessageSize(Message) << 2); 658 /* 659 * Disable the Interrupts 660 */ 661 virt->Mask = (Mask = virt->Mask) | Mask_InterruptsDisabled; 662 virt->ToFIFO = MessageOffset; 663 } 664 return (Mask); 665} /* ASR_initiateCp */ 666 667/* 668 * Reset the adapter. 669 */ 670STATIC U32 671ASR_resetIOP ( 672 INOUT i2oRegs_t * virt) 673{ 674 struct resetMessage { 675 I2O_EXEC_IOP_RESET_MESSAGE M; 676 U32 R; 677 }; 678 defAlignLong(struct resetMessage,Message); 679 PI2O_EXEC_IOP_RESET_MESSAGE Message_Ptr; 680 OUT U32 * volatile Reply_Ptr; 681 U32 Old; 682 683 /* 684 * Build up our copy of the Message. 685 */ 686 Message_Ptr = (PI2O_EXEC_IOP_RESET_MESSAGE)ASR_fillMessage(Message, 687 sizeof(I2O_EXEC_IOP_RESET_MESSAGE)); 688 I2O_EXEC_IOP_RESET_MESSAGE_setFunction(Message_Ptr, I2O_EXEC_IOP_RESET); 689 /* 690 * Reset the Reply Status 691 */ 692 *(Reply_Ptr = (U32 *)((char *)Message_Ptr 693 + sizeof(I2O_EXEC_IOP_RESET_MESSAGE))) = 0; 694 I2O_EXEC_IOP_RESET_MESSAGE_setStatusWordLowAddress(Message_Ptr, 695 KVTOPHYS((void *)Reply_Ptr)); 696 /* 697 * Send the Message out 698 */ 699 if ((Old = ASR_initiateCp (virt, (PI2O_MESSAGE_FRAME)Message_Ptr)) != (U32)-1L) { 700 /* 701 * Wait for a response (Poll), timeouts are dangerous if 702 * the card is truly responsive. We assume response in 2s. 703 */ 704 u_int8_t Delay = 200; 705 706 while ((*Reply_Ptr == 0) && (--Delay != 0)) { 707 DELAY (10000); 708 } 709 /* 710 * Re-enable the interrupts. 711 */ 712 virt->Mask = Old; 713 ASSERT (*Reply_Ptr); 714 return (*Reply_Ptr); 715 } 716 ASSERT (Old != (U32)-1L); 717 return (0); 718} /* ASR_resetIOP */ 719 720/* 721 * Get the curent state of the adapter 722 */ 723STATIC INLINE PI2O_EXEC_STATUS_GET_REPLY 724ASR_getStatus ( 725 INOUT i2oRegs_t * virt, 726 OUT PI2O_EXEC_STATUS_GET_REPLY buffer) 727{ 728 defAlignLong(I2O_EXEC_STATUS_GET_MESSAGE,Message); 729 PI2O_EXEC_STATUS_GET_MESSAGE Message_Ptr; 730 U32 Old; 731 732 /* 733 * Build up our copy of the Message. 734 */ 735 Message_Ptr = (PI2O_EXEC_STATUS_GET_MESSAGE)ASR_fillMessage(Message, 736 sizeof(I2O_EXEC_STATUS_GET_MESSAGE)); 737 I2O_EXEC_STATUS_GET_MESSAGE_setFunction(Message_Ptr, 738 I2O_EXEC_STATUS_GET); 739 I2O_EXEC_STATUS_GET_MESSAGE_setReplyBufferAddressLow(Message_Ptr, 740 KVTOPHYS((void *)buffer)); 741 /* This one is a Byte Count */ 742 I2O_EXEC_STATUS_GET_MESSAGE_setReplyBufferLength(Message_Ptr, 743 sizeof(I2O_EXEC_STATUS_GET_REPLY)); 744 /* 745 * Reset the Reply Status 746 */ 747 bzero ((void *)buffer, sizeof(I2O_EXEC_STATUS_GET_REPLY)); 748 /* 749 * Send the Message out 750 */ 751 if ((Old = ASR_initiateCp (virt, (PI2O_MESSAGE_FRAME)Message_Ptr)) != (U32)-1L) { 752 /* 753 * Wait for a response (Poll), timeouts are dangerous if 754 * the card is truly responsive. We assume response in 50ms. 755 */ 756 u_int8_t Delay = 50; 757 758 while (*((U8 * volatile)&buffer->SyncByte) == 0) { 759 if (--Delay == 0) { 760 buffer = (PI2O_EXEC_STATUS_GET_REPLY)NULL; 761 break; 762 } 763 DELAY (1000); 764 } 765 /* 766 * Re-enable the interrupts. 767 */ 768 virt->Mask = Old; 769 return (buffer); 770 } 771 return ((PI2O_EXEC_STATUS_GET_REPLY)NULL); 772} /* ASR_getStatus */ 773 774/* 775 * Check if the device is a SCSI I2O HBA, and add it to the list. 776 */ 777 778/* 779 * Probe for ASR controller. If we find it, we will use it. 780 * virtual adapters. 781 */ 782STATIC PROBE_RET 783asr_probe(PROBE_ARGS) 784{ 785 PROBE_SET(); 786 if (id == 0xA5011044) { 787 PROBE_RETURN ("Adaptec Caching SCSI RAID"); 788 } 789 PROBE_RETURN (NULL); 790} /* asr_probe */ 791 792/* 793 * Probe/Attach for DOMINO chipset. 794 */ 795STATIC PROBE_RET 796domino_probe(PROBE_ARGS) 797{ 798 PROBE_SET(); 799 if (id == 0x10121044) { 800 PROBE_RETURN ("Adaptec Caching Memory Controller"); 801 } 802 PROBE_RETURN (NULL); 803} /* domino_probe */ 804 805STATIC ATTACH_RET 806domino_attach (ATTACH_ARGS) 807{ 808 ATTACH_RETURN (0); 809} /* domino_attach */ 810 811/* 812 * Probe/Attach for MODE0 adapters. 813 */ 814STATIC PROBE_RET 815mode0_probe(PROBE_ARGS) 816{ 817 PROBE_SET(); 818 if (id == 0x908010B5) { 819 PROBE_RETURN ("Adaptec Mode0 3xxx"); 820 } 821#if 0 /* this would match any generic i960 -- mjs */ 822 if (id == 0x19608086) { 823 PROBE_RETURN ("Adaptec Mode0 1xxx"); 824 } 825#endif 826 PROBE_RETURN (NULL); 827} /* mode0_probe */ 828 829STATIC ATTACH_RET 830mode0_attach (ATTACH_ARGS) 831{ 832 ATTACH_RETURN (0); 833} /* mode0_attach */ 834 835STATIC INLINE union asr_ccb * 836asr_alloc_ccb ( 837 IN Asr_softc_t * sc) 838{ 839 OUT union asr_ccb * new_ccb; 840 841 if ((new_ccb = (union asr_ccb *)malloc(sizeof(*new_ccb), 842 M_DEVBUF, M_WAITOK)) != (union asr_ccb *)NULL) { 843 bzero (new_ccb, sizeof(*new_ccb)); 844 new_ccb->ccb_h.pinfo.priority = 1; 845 new_ccb->ccb_h.pinfo.index = CAM_UNQUEUED_INDEX; 846 new_ccb->ccb_h.spriv_ptr0 = sc; 847 } 848 return (new_ccb); 849} /* asr_alloc_ccb */ 850 851STATIC INLINE void 852asr_free_ccb ( 853 IN union asr_ccb * free_ccb) 854{ 855 free(free_ccb, M_DEVBUF); 856} /* asr_free_ccb */ 857 858/* 859 * Print inquiry data `carefully' 860 */ 861STATIC void 862ASR_prstring ( 863 u_int8_t * s, 864 int len) 865{ 866 while ((--len >= 0) && (*s) && (*s != ' ') && (*s != '-')) { 867 printf ("%c", *(s++)); 868 } 869} /* ASR_prstring */ 870 871/* 872 * Prototypes 873 */ 874STATIC INLINE int ASR_queue __P(( 875 IN Asr_softc_t * sc, 876 IN PI2O_MESSAGE_FRAME Message)); 877/* 878 * Send a message synchronously and without Interrupt to a ccb. 879 */ 880STATIC int 881ASR_queue_s ( 882 INOUT union asr_ccb * ccb, 883 IN PI2O_MESSAGE_FRAME Message) 884{ 885 int s; 886 U32 Mask; 887 Asr_softc_t * sc = (Asr_softc_t *)(ccb->ccb_h.spriv_ptr0); 888 889 /* 890 * We do not need any (optional byteswapping) method access to 891 * the Initiator context field. 892 */ 893 I2O_MESSAGE_FRAME_setInitiatorContext64(Message, (long)ccb); 894 895 /* Prevent interrupt service */ 896 s = splcam (); 897 sc->ha_Virt->Mask = (Mask = sc->ha_Virt->Mask) 898 | Mask_InterruptsDisabled; 899 900 if (ASR_queue (sc, Message) == EMPTY_QUEUE) { 901 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 902 ccb->ccb_h.status |= CAM_REQUEUE_REQ; 903 } 904 905 /* 906 * Wait for this board to report a finished instruction. 907 */ 908 while (ccb->ccb_h.status == CAM_REQ_INPROG) { 909 (void)asr_intr (sc); 910 } 911 912 /* Re-enable Interrupts */ 913 sc->ha_Virt->Mask = Mask; 914 splx(s); 915 916 return (ccb->ccb_h.status); 917} /* ASR_queue_s */ 918 919/* 920 * Send a message synchronously to a Asr_softc_t 921 */ 922STATIC int 923ASR_queue_c ( 924 IN Asr_softc_t * sc, 925 IN PI2O_MESSAGE_FRAME Message) 926{ 927 union asr_ccb * ccb; 928 OUT int status; 929 930 if ((ccb = asr_alloc_ccb (sc)) == (union asr_ccb *)NULL) { 931 return (CAM_REQUEUE_REQ); 932 } 933 934 status = ASR_queue_s (ccb, Message); 935 936 asr_free_ccb(ccb); 937 938 return (status); 939} /* ASR_queue_c */ 940 941/* 942 * Add the specified ccb to the active queue 943 */ 944STATIC INLINE void 945ASR_ccbAdd ( 946 IN Asr_softc_t * sc, 947 INOUT union asr_ccb * ccb) 948{ 949 int s; 950 951 s = splcam(); 952 LIST_INSERT_HEAD(&(sc->ha_ccb), &(ccb->ccb_h), sim_links.le); 953 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) { 954 if (ccb->ccb_h.timeout == CAM_TIME_DEFAULT) { 955 /* 956 * RAID systems can take considerable time to 957 * complete some commands given the large cache 958 * flashes switching from write back to write thru. 959 */ 960 ccb->ccb_h.timeout = 6 * 60 * 1000; 961 } 962 ccb->ccb_h.timeout_ch = timeout(asr_timeout, (caddr_t)ccb, 963 (ccb->ccb_h.timeout * hz) / 1000); 964 } 965 splx(s); 966} /* ASR_ccbAdd */ 967 968/* 969 * Remove the specified ccb from the active queue. 970 */ 971STATIC INLINE void 972ASR_ccbRemove ( 973 IN Asr_softc_t * sc, 974 INOUT union asr_ccb * ccb) 975{ 976 int s; 977 978 s = splcam(); 979 untimeout(asr_timeout, (caddr_t)ccb, ccb->ccb_h.timeout_ch); 980 LIST_REMOVE(&ccb->ccb_h, sim_links.le); 981 splx(s); 982} /* ASR_ccbRemove */ 983 984/* 985 * Fail all the active commands, so they get re-issued by the operating 986 * system. 987 */ 988STATIC INLINE void 989ASR_failActiveCommands ( 990 IN Asr_softc_t * sc) 991{ 992 struct ccb_hdr * ccb; 993 defAlignLong(I2O_EXEC_LCT_NOTIFY_MESSAGE,Message); 994 PI2O_EXEC_LCT_NOTIFY_MESSAGE Message_Ptr; 995 int s; 996 997 /* Send a blind LCT command to wait for the enableSys to complete */ 998 Message_Ptr = (PI2O_EXEC_LCT_NOTIFY_MESSAGE)ASR_fillMessage(Message, 999 sizeof(I2O_EXEC_LCT_NOTIFY_MESSAGE) - sizeof(I2O_SG_ELEMENT)); 1000 I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame), 1001 I2O_EXEC_LCT_NOTIFY); 1002 I2O_EXEC_LCT_NOTIFY_MESSAGE_setClassIdentifier(Message_Ptr, 1003 I2O_CLASS_MATCH_ANYCLASS); 1004 (void)ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr); 1005 1006 s = splcam(); 1007 LIST_FOREACH(ccb, &(sc->ha_ccb), sim_links.le) { 1008 1009 ASR_ccbRemove (sc, (union asr_ccb *)ccb); 1010 1011 ccb->status &= ~CAM_STATUS_MASK; 1012 ccb->status |= CAM_REQUEUE_REQ; 1013 ((struct ccb_scsiio *)ccb)->resid 1014 = ((struct ccb_scsiio *)ccb)->dxfer_len; 1015 1016 if (ccb->path) { 1017 xpt_done ((union ccb *)ccb); 1018 } else { 1019 wakeup ((caddr_t)ccb); 1020 } 1021 } 1022 splx(s); 1023} /* ASR_failActiveCommands */ 1024 1025/* 1026 * The following command causes the HBA to reset the specific bus 1027 */ 1028STATIC INLINE void 1029ASR_resetBus( 1030 IN Asr_softc_t * sc, 1031 IN int bus) 1032{ 1033 defAlignLong(I2O_HBA_BUS_RESET_MESSAGE,Message); 1034 I2O_HBA_BUS_RESET_MESSAGE * Message_Ptr; 1035 PI2O_LCT_ENTRY Device; 1036 1037 Message_Ptr = (I2O_HBA_BUS_RESET_MESSAGE *)ASR_fillMessage(Message, 1038 sizeof(I2O_HBA_BUS_RESET_MESSAGE)); 1039 I2O_MESSAGE_FRAME_setFunction(&Message_Ptr->StdMessageFrame, 1040 I2O_HBA_BUS_RESET); 1041 for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY) 1042 (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT)); 1043 ++Device) { 1044 if (((Device->le_type & I2O_PORT) != 0) 1045 && (Device->le_bus == bus)) { 1046 I2O_MESSAGE_FRAME_setTargetAddress( 1047 &Message_Ptr->StdMessageFrame, 1048 I2O_LCT_ENTRY_getLocalTID(Device)); 1049 /* Asynchronous command, with no expectations */ 1050 (void)ASR_queue(sc, (PI2O_MESSAGE_FRAME)Message_Ptr); 1051 break; 1052 } 1053 } 1054} /* ASR_resetBus */ 1055 1056STATIC INLINE int 1057ASR_getBlinkLedCode ( 1058 IN Asr_softc_t * sc) 1059{ 1060 if ((sc != (Asr_softc_t *)NULL) 1061 && (sc->ha_blinkLED != (u_int8_t *)NULL) 1062 && (sc->ha_blinkLED[1] == 0xBC)) { 1063 return (sc->ha_blinkLED[0]); 1064 } 1065 return (0); 1066} /* ASR_getBlinkCode */ 1067 1068/* 1069 * Determine the address of an TID lookup. Must be done at high priority 1070 * since the address can be changed by other threads of execution. 1071 * 1072 * Returns NULL pointer if not indexible (but will attempt to generate 1073 * an index if `new_entry' flag is set to TRUE). 1074 * 1075 * All addressible entries are to be guaranteed zero if never initialized. 1076 */ 1077STATIC INLINE tid_t * 1078ASR_getTidAddress( 1079 INOUT Asr_softc_t * sc, 1080 IN int bus, 1081 IN int target, 1082 IN int lun, 1083 IN int new_entry) 1084{ 1085 target2lun_t * bus_ptr; 1086 lun2tid_t * target_ptr; 1087 unsigned new_size; 1088 1089 /* 1090 * Validity checking of incoming parameters. More of a bound 1091 * expansion limit than an issue with the code dealing with the 1092 * values. 1093 * 1094 * sc must be valid before it gets here, so that check could be 1095 * dropped if speed a critical issue. 1096 */ 1097 if ((sc == (Asr_softc_t *)NULL) 1098 || (bus > MAX_CHANNEL) 1099 || (target > sc->ha_MaxId) 1100 || (lun > sc->ha_MaxLun)) { 1101 debug_asr_printf("(%lx,%d,%d,%d) target out of range\n", 1102 (u_long)sc, bus, target, lun); 1103 return ((tid_t *)NULL); 1104 } 1105 /* 1106 * See if there is an associated bus list. 1107 * 1108 * for performance, allocate in size of BUS_CHUNK chunks. 1109 * BUS_CHUNK must be a power of two. This is to reduce 1110 * fragmentation effects on the allocations. 1111 */ 1112# define BUS_CHUNK 8 1113 new_size = ((target + BUS_CHUNK - 1) & ~(BUS_CHUNK - 1)); 1114 if ((bus_ptr = sc->ha_targets[bus]) == (target2lun_t *)NULL) { 1115 /* 1116 * Allocate a new structure? 1117 * Since one element in structure, the +1 1118 * needed for size has been abstracted. 1119 */ 1120 if ((new_entry == FALSE) 1121 || ((sc->ha_targets[bus] = bus_ptr = (target2lun_t *)malloc ( 1122 sizeof(*bus_ptr) + (sizeof(bus_ptr->LUN) * new_size), 1123 M_TEMP, M_WAITOK)) 1124 == (target2lun_t *)NULL)) { 1125 debug_asr_printf("failed to allocate bus list\n"); 1126 return ((tid_t *)NULL); 1127 } 1128 bzero (bus_ptr, sizeof(*bus_ptr) 1129 + (sizeof(bus_ptr->LUN) * new_size)); 1130 bus_ptr->size = new_size + 1; 1131 } else if (bus_ptr->size <= new_size) { 1132 target2lun_t * new_bus_ptr; 1133 1134 /* 1135 * Reallocate a new structure? 1136 * Since one element in structure, the +1 1137 * needed for size has been abstracted. 1138 */ 1139 if ((new_entry == FALSE) 1140 || ((new_bus_ptr = (target2lun_t *)malloc ( 1141 sizeof(*bus_ptr) + (sizeof(bus_ptr->LUN) * new_size), 1142 M_TEMP, M_WAITOK)) 1143 == (target2lun_t *)NULL)) { 1144 debug_asr_printf("failed to reallocate bus list\n"); 1145 return ((tid_t *)NULL); 1146 } 1147 /* 1148 * Zero and copy the whole thing, safer, simpler coding 1149 * and not really performance critical at this point. 1150 */ 1151 bzero (new_bus_ptr, sizeof(*bus_ptr) 1152 + (sizeof(bus_ptr->LUN) * new_size)); 1153 bcopy (bus_ptr, new_bus_ptr, sizeof(*bus_ptr) 1154 + (sizeof(bus_ptr->LUN) * (bus_ptr->size - 1))); 1155 sc->ha_targets[bus] = new_bus_ptr; 1156 free (bus_ptr, M_TEMP); 1157 bus_ptr = new_bus_ptr; 1158 bus_ptr->size = new_size + 1; 1159 } 1160 /* 1161 * We now have the bus list, lets get to the target list. 1162 * Since most systems have only *one* lun, we do not allocate 1163 * in chunks as above, here we allow one, then in chunk sizes. 1164 * TARGET_CHUNK must be a power of two. This is to reduce 1165 * fragmentation effects on the allocations. 1166 */ 1167# define TARGET_CHUNK 8 1168 if ((new_size = lun) != 0) { 1169 new_size = ((lun + TARGET_CHUNK - 1) & ~(TARGET_CHUNK - 1)); 1170 } 1171 if ((target_ptr = bus_ptr->LUN[target]) == (lun2tid_t *)NULL) { 1172 /* 1173 * Allocate a new structure? 1174 * Since one element in structure, the +1 1175 * needed for size has been abstracted. 1176 */ 1177 if ((new_entry == FALSE) 1178 || ((bus_ptr->LUN[target] = target_ptr = (lun2tid_t *)malloc ( 1179 sizeof(*target_ptr) + (sizeof(target_ptr->TID) * new_size), 1180 M_TEMP, M_WAITOK)) 1181 == (lun2tid_t *)NULL)) { 1182 debug_asr_printf("failed to allocate target list\n"); 1183 return ((tid_t *)NULL); 1184 } 1185 bzero (target_ptr, sizeof(*target_ptr) 1186 + (sizeof(target_ptr->TID) * new_size)); 1187 target_ptr->size = new_size + 1; 1188 } else if (target_ptr->size <= new_size) { 1189 lun2tid_t * new_target_ptr; 1190 1191 /* 1192 * Reallocate a new structure? 1193 * Since one element in structure, the +1 1194 * needed for size has been abstracted. 1195 */ 1196 if ((new_entry == FALSE) 1197 || ((new_target_ptr = (lun2tid_t *)malloc ( 1198 sizeof(*target_ptr) + (sizeof(target_ptr->TID) * new_size), 1199 M_TEMP, M_WAITOK)) 1200 == (lun2tid_t *)NULL)) { 1201 debug_asr_printf("failed to reallocate target list\n"); 1202 return ((tid_t *)NULL); 1203 } 1204 /* 1205 * Zero and copy the whole thing, safer, simpler coding 1206 * and not really performance critical at this point. 1207 */ 1208 bzero (new_target_ptr, sizeof(*target_ptr) 1209 + (sizeof(target_ptr->TID) * new_size)); 1210 bcopy (target_ptr, new_target_ptr, 1211 sizeof(*target_ptr) 1212 + (sizeof(target_ptr->TID) * (target_ptr->size - 1))); 1213 bus_ptr->LUN[target] = new_target_ptr; 1214 free (target_ptr, M_TEMP); 1215 target_ptr = new_target_ptr; 1216 target_ptr->size = new_size + 1; 1217 } 1218 /* 1219 * Now, acquire the TID address from the LUN indexed list. 1220 */ 1221 return (&(target_ptr->TID[lun])); 1222} /* ASR_getTidAddress */ 1223 1224/* 1225 * Get a pre-existing TID relationship. 1226 * 1227 * If the TID was never set, return (tid_t)-1. 1228 * 1229 * should use mutex rather than spl. 1230 */ 1231STATIC INLINE tid_t 1232ASR_getTid ( 1233 IN Asr_softc_t * sc, 1234 IN int bus, 1235 IN int target, 1236 IN int lun) 1237{ 1238 tid_t * tid_ptr; 1239 int s; 1240 OUT tid_t retval; 1241 1242 s = splcam(); 1243 if (((tid_ptr = ASR_getTidAddress (sc, bus, target, lun, FALSE)) 1244 == (tid_t *)NULL) 1245 /* (tid_t)0 or (tid_t)-1 indicate no TID */ 1246 || (*tid_ptr == (tid_t)0)) { 1247 splx(s); 1248 return ((tid_t)-1); 1249 } 1250 retval = *tid_ptr; 1251 splx(s); 1252 return (retval); 1253} /* ASR_getTid */ 1254 1255/* 1256 * Set a TID relationship. 1257 * 1258 * If the TID was not set, return (tid_t)-1. 1259 * 1260 * should use mutex rather than spl. 1261 */ 1262STATIC INLINE tid_t 1263ASR_setTid ( 1264 INOUT Asr_softc_t * sc, 1265 IN int bus, 1266 IN int target, 1267 IN int lun, 1268 INOUT tid_t TID) 1269{ 1270 tid_t * tid_ptr; 1271 int s; 1272 1273 if (TID != (tid_t)-1) { 1274 if (TID == 0) { 1275 return ((tid_t)-1); 1276 } 1277 s = splcam(); 1278 if ((tid_ptr = ASR_getTidAddress (sc, bus, target, lun, TRUE)) 1279 == (tid_t *)NULL) { 1280 splx(s); 1281 return ((tid_t)-1); 1282 } 1283 *tid_ptr = TID; 1284 splx(s); 1285 } 1286 return (TID); 1287} /* ASR_setTid */ 1288 1289/*-------------------------------------------------------------------------*/ 1290/* Function ASR_rescan */ 1291/*-------------------------------------------------------------------------*/ 1292/* The Parameters Passed To This Function Are : */ 1293/* Asr_softc_t * : HBA miniport driver's adapter data storage. */ 1294/* */ 1295/* This Function Will rescan the adapter and resynchronize any data */ 1296/* */ 1297/* Return : 0 For OK, Error Code Otherwise */ 1298/*-------------------------------------------------------------------------*/ 1299 1300STATIC INLINE int 1301ASR_rescan( 1302 IN Asr_softc_t * sc) 1303{ 1304 int bus; 1305 OUT int error; 1306 1307 /* 1308 * Re-acquire the LCT table and synchronize us to the adapter. 1309 */ 1310 if ((error = ASR_acquireLct(sc)) == 0) { 1311 error = ASR_acquireHrt(sc); 1312 } 1313 1314 if (error != 0) { 1315 return error; 1316 } 1317 1318 bus = sc->ha_MaxBus; 1319 /* Reset all existing cached TID lookups */ 1320 do { 1321 int target; 1322 1323 /* 1324 * Scan for all targets on this bus to see if they 1325 * got affected by the rescan. 1326 */ 1327 for (target = 0; target <= sc->ha_MaxId; ++target) { 1328 int lun; 1329 1330 for (lun = 0; lun <= sc->ha_MaxLun; ++lun) { 1331 PI2O_LCT_ENTRY Device; 1332 tid_t TID = (tid_t)-1; 1333 1334 /* 1335 * See if the cached TID changed. Search for 1336 * the device in our new LCT. 1337 */ 1338 for (Device = sc->ha_LCT->LCTEntry; 1339 Device < (PI2O_LCT_ENTRY)(((U32 *)sc->ha_LCT) 1340 + I2O_LCT_getTableSize(sc->ha_LCT)); 1341 ++Device) { 1342 if ((Device->le_type != I2O_UNKNOWN) 1343 && (Device->le_bus == bus) 1344 && (Device->le_target == target) 1345 && (Device->le_lun == lun) 1346 && (I2O_LCT_ENTRY_getUserTID(Device) 1347 == 0xFFF)) { 1348 TID = I2O_LCT_ENTRY_getLocalTID( 1349 Device); 1350 break; 1351 } 1352 } 1353 /* 1354 * Indicate to the OS that the label needs 1355 * to be recalculated, or that the specific 1356 * open device is no longer valid (Merde) 1357 * because the cached TID changed. 1358 * ASR_getTid (sc, bus, target, lun) != TI 1359 */ 1360 /* 1361 * We have the option of clearing the 1362 * cached TID for it to be rescanned, or to 1363 * set it now even if the device never got 1364 * accessed. We chose the later since we 1365 * currently do not use the condition that 1366 * the TID ever got cached. 1367 */ 1368 ASR_setTid (sc, bus, target, lun, TID); 1369 } 1370 } 1371 } while (--bus >= 0); 1372 return (error); 1373} /* ASR_rescan */ 1374 1375/*-------------------------------------------------------------------------*/ 1376/* Function ASR_reset */ 1377/*-------------------------------------------------------------------------*/ 1378/* The Parameters Passed To This Function Are : */ 1379/* Asr_softc_t * : HBA miniport driver's adapter data storage. */ 1380/* */ 1381/* This Function Will reset the adapter and resynchronize any data */ 1382/* */ 1383/* Return : None */ 1384/*-------------------------------------------------------------------------*/ 1385 1386STATIC INLINE void 1387ASR_reset( 1388 IN Asr_softc_t * sc) 1389{ 1390 (void)ASR_resetIOP (sc->ha_Virt); 1391 (void)ASR_init (sc); 1392 (void)ASR_rescan (sc); 1393 (void)ASR_failActiveCommands (sc); 1394} /* ASR_reset */ 1395 1396/* 1397 * Device timeout handler. 1398 */ 1399STATIC void 1400asr_timeout( 1401 INOUT void * arg) 1402{ 1403 union asr_ccb * ccb = (union asr_ccb *)arg; 1404 Asr_softc_t * sc = (Asr_softc_t *)(ccb->ccb_h.spriv_ptr0); 1405 int s; 1406 1407 debug_asr_print_path(ccb); 1408 debug_asr_printf("timed out"); 1409 1410 /* 1411 * Check if the adapter has locked up? 1412 */ 1413 if ((s = ASR_getBlinkLedCode(sc)) != 0) { 1414 debug_asr_printf ( 1415 " due to adapter blinkled code %x\nresetting adapter\n", s); 1416 ASR_reset (sc); 1417 return; 1418 } 1419 /* 1420 * Abort does not function on the ASR card!!! Walking away from 1421 * the SCSI command is also *very* dangerous. A SCSI BUS reset is 1422 * our best bet, followed by a complete adapter reset if that fails. 1423 */ 1424 s = splcam(); 1425 if (ccb->ccb_h.status == CAM_CMD_TIMEOUT) { 1426 debug_asr_printf (" AGAIN\nreinitializing adapter\n"); 1427 ASR_reset (sc); 1428 splx(s); 1429 return; 1430 } 1431 debug_asr_printf ("\nresetting bus\n"); 1432 /* If the BUS reset does not take, then an adapter reset is next! */ 1433 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 1434 ccb->ccb_h.status |= CAM_CMD_TIMEOUT; 1435 ccb->ccb_h.timeout_ch = timeout(asr_timeout, (caddr_t)ccb, 1436 (ccb->ccb_h.timeout * hz) / 1000); 1437 ASR_resetBus (sc, cam_sim_bus(xpt_path_sim(ccb->ccb_h.path))); 1438 splx(s); 1439} /* asr_timeout */ 1440 1441/* 1442 * send a message asynchronously 1443 */ 1444STATIC INLINE int 1445ASR_queue( 1446 IN Asr_softc_t * sc, 1447 IN PI2O_MESSAGE_FRAME Message) 1448{ 1449 OUT U32 MessageOffset; 1450 union asr_ccb * ccb; 1451 1452 debug_asr_printf ("Host Command Dump:\n"); 1453 debug_asr_dump_message (Message); 1454 1455 /* 1456 * Limit the number of Messages sent to this HBA. Better to sleep, 1457 * than to hardware loop like a nut! By limiting the number of 1458 * messages to an individual HBA here, we manage to perform all 1459 * the processing of the message ready to drop the next one into 1460 * the controller. We could limit the messages we are allowed to 1461 * take, but that may have a performance hit. 1462 */ 1463 ccb = (union asr_ccb *)(long) 1464 I2O_MESSAGE_FRAME_getInitiatorContext64(Message); 1465 1466 if (((MessageOffset = ASR_getMessage(sc->ha_Virt)) != EMPTY_QUEUE) 1467 || ((MessageOffset = ASR_getMessage(sc->ha_Virt)) != EMPTY_QUEUE)) { 1468#ifdef ASR_MEASURE_PERFORMANCE 1469 int startTimeIndex; 1470 1471 if (ccb) { 1472 ++sc->ha_performance.command_count[ 1473 (int) ccb->csio.cdb_io.cdb_bytes[0]]; 1474 DEQ_TIMEQ_FREE_LIST(startTimeIndex, 1475 sc->ha_timeQFreeList, 1476 sc->ha_timeQFreeHead, 1477 sc->ha_timeQFreeTail); 1478 if (-1 != startTimeIndex) { 1479 microtime(&sc->ha_timeQ[startTimeIndex]); 1480 } 1481 /* Time stamp the command before we send it out */ 1482 ((PRIVATE_SCSI_SCB_EXECUTE_MESSAGE *) Message)-> 1483 PrivateMessageFrame.TransactionContext 1484 = (I2O_TRANSACTION_CONTEXT) startTimeIndex; 1485 1486 ++sc->ha_submitted_ccbs_count; 1487 if (sc->ha_performance.max_submit_count 1488 < sc->ha_submitted_ccbs_count) { 1489 sc->ha_performance.max_submit_count 1490 = sc->ha_submitted_ccbs_count; 1491 } 1492 } 1493#endif 1494 bcopy (Message, sc->ha_Virt->Address + MessageOffset, 1495 I2O_MESSAGE_FRAME_getMessageSize(Message) << 2); 1496 if (ccb) { 1497 ASR_ccbAdd (sc, ccb); 1498 } 1499 /* Post the command */ 1500 sc->ha_Virt->ToFIFO = MessageOffset; 1501 } else { 1502 if (ASR_getBlinkLedCode(sc)) { 1503 ASR_reset (sc); 1504 } 1505 } 1506 return (MessageOffset); 1507} /* ASR_queue */ 1508 1509 1510/* Simple Scatter Gather elements */ 1511#define SG(SGL,Index,Flags,Buffer,Size) \ 1512 I2O_FLAGS_COUNT_setCount( \ 1513 &(((PI2O_SG_ELEMENT)(SGL))->u.Simple[Index].FlagsCount), \ 1514 Size); \ 1515 I2O_FLAGS_COUNT_setFlags( \ 1516 &(((PI2O_SG_ELEMENT)(SGL))->u.Simple[Index].FlagsCount), \ 1517 I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT | (Flags)); \ 1518 I2O_SGE_SIMPLE_ELEMENT_setPhysicalAddress( \ 1519 &(((PI2O_SG_ELEMENT)(SGL))->u.Simple[Index]), \ 1520 (Buffer == NULL) ? NULL : KVTOPHYS(Buffer)) 1521 1522/* 1523 * Retrieve Parameter Group. 1524 * Buffer must be allocated using defAlignLong macro. 1525 */ 1526STATIC void * 1527ASR_getParams( 1528 IN Asr_softc_t * sc, 1529 IN tid_t TID, 1530 IN int Group, 1531 OUT void * Buffer, 1532 IN unsigned BufferSize) 1533{ 1534 struct paramGetMessage { 1535 I2O_UTIL_PARAMS_GET_MESSAGE M; 1536 char F[ 1537 sizeof(I2O_SGE_SIMPLE_ELEMENT)*2 - sizeof(I2O_SG_ELEMENT)]; 1538 struct Operations { 1539 I2O_PARAM_OPERATIONS_LIST_HEADER Header; 1540 I2O_PARAM_OPERATION_ALL_TEMPLATE Template[1]; 1541 } O; 1542 }; 1543 defAlignLong(struct paramGetMessage, Message); 1544 struct Operations * Operations_Ptr; 1545 I2O_UTIL_PARAMS_GET_MESSAGE * Message_Ptr; 1546 struct ParamBuffer { 1547 I2O_PARAM_RESULTS_LIST_HEADER Header; 1548 I2O_PARAM_READ_OPERATION_RESULT Read; 1549 char Info[1]; 1550 } * Buffer_Ptr; 1551 1552 Message_Ptr = (I2O_UTIL_PARAMS_GET_MESSAGE *)ASR_fillMessage(Message, 1553 sizeof(I2O_UTIL_PARAMS_GET_MESSAGE) 1554 + sizeof(I2O_SGE_SIMPLE_ELEMENT)*2 - sizeof(I2O_SG_ELEMENT)); 1555 Operations_Ptr = (struct Operations *)((char *)Message_Ptr 1556 + sizeof(I2O_UTIL_PARAMS_GET_MESSAGE) 1557 + sizeof(I2O_SGE_SIMPLE_ELEMENT)*2 - sizeof(I2O_SG_ELEMENT)); 1558 bzero ((void *)Operations_Ptr, sizeof(struct Operations)); 1559 I2O_PARAM_OPERATIONS_LIST_HEADER_setOperationCount( 1560 &(Operations_Ptr->Header), 1); 1561 I2O_PARAM_OPERATION_ALL_TEMPLATE_setOperation( 1562 &(Operations_Ptr->Template[0]), I2O_PARAMS_OPERATION_FIELD_GET); 1563 I2O_PARAM_OPERATION_ALL_TEMPLATE_setFieldCount( 1564 &(Operations_Ptr->Template[0]), 0xFFFF); 1565 I2O_PARAM_OPERATION_ALL_TEMPLATE_setGroupNumber( 1566 &(Operations_Ptr->Template[0]), Group); 1567 bzero ((void *)(Buffer_Ptr = getAlignLong(struct ParamBuffer, Buffer)), 1568 BufferSize); 1569 1570 I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame), 1571 I2O_VERSION_11 1572 + (((sizeof(I2O_UTIL_PARAMS_GET_MESSAGE) - sizeof(I2O_SG_ELEMENT)) 1573 / sizeof(U32)) << 4)); 1574 I2O_MESSAGE_FRAME_setTargetAddress (&(Message_Ptr->StdMessageFrame), 1575 TID); 1576 I2O_MESSAGE_FRAME_setFunction (&(Message_Ptr->StdMessageFrame), 1577 I2O_UTIL_PARAMS_GET); 1578 /* 1579 * Set up the buffers as scatter gather elements. 1580 */ 1581 SG(&(Message_Ptr->SGL), 0, 1582 I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_END_OF_BUFFER, 1583 Operations_Ptr, sizeof(struct Operations)); 1584 SG(&(Message_Ptr->SGL), 1, 1585 I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER, 1586 Buffer_Ptr, BufferSize); 1587 1588 if ((ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr) == CAM_REQ_CMP) 1589 && (Buffer_Ptr->Header.ResultCount)) { 1590 return ((void *)(Buffer_Ptr->Info)); 1591 } 1592 return ((void *)NULL); 1593} /* ASR_getParams */ 1594 1595/* 1596 * Acquire the LCT information. 1597 */ 1598STATIC INLINE int 1599ASR_acquireLct ( 1600 INOUT Asr_softc_t * sc) 1601{ 1602 PI2O_EXEC_LCT_NOTIFY_MESSAGE Message_Ptr; 1603 PI2O_SGE_SIMPLE_ELEMENT sg; 1604 int MessageSizeInBytes; 1605 caddr_t v; 1606 int len; 1607 I2O_LCT Table; 1608 PI2O_LCT_ENTRY Entry; 1609 1610 /* 1611 * sc value assumed valid 1612 */ 1613 MessageSizeInBytes = sizeof(I2O_EXEC_LCT_NOTIFY_MESSAGE) 1614 - sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT); 1615 if ((Message_Ptr = (PI2O_EXEC_LCT_NOTIFY_MESSAGE)malloc ( 1616 MessageSizeInBytes, M_TEMP, M_WAITOK)) 1617 == (PI2O_EXEC_LCT_NOTIFY_MESSAGE)NULL) { 1618 return (ENOMEM); 1619 } 1620 (void)ASR_fillMessage((char *)Message_Ptr, MessageSizeInBytes); 1621 I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame), 1622 (I2O_VERSION_11 + 1623 (((sizeof(I2O_EXEC_LCT_NOTIFY_MESSAGE) - sizeof(I2O_SG_ELEMENT)) 1624 / sizeof(U32)) << 4))); 1625 I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame), 1626 I2O_EXEC_LCT_NOTIFY); 1627 I2O_EXEC_LCT_NOTIFY_MESSAGE_setClassIdentifier(Message_Ptr, 1628 I2O_CLASS_MATCH_ANYCLASS); 1629 /* 1630 * Call the LCT table to determine the number of device entries 1631 * to reserve space for. 1632 */ 1633 SG(&(Message_Ptr->SGL), 0, 1634 I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER, &Table, 1635 sizeof(I2O_LCT)); 1636 /* 1637 * since this code is reused in several systems, code efficiency 1638 * is greater by using a shift operation rather than a divide by 1639 * sizeof(u_int32_t). 1640 */ 1641 I2O_LCT_setTableSize(&Table, 1642 (sizeof(I2O_LCT) - sizeof(I2O_LCT_ENTRY)) >> 2); 1643 (void)ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr); 1644 /* 1645 * Determine the size of the LCT table. 1646 */ 1647 if (sc->ha_LCT) { 1648 free (sc->ha_LCT, M_TEMP); 1649 } 1650 /* 1651 * malloc only generates contiguous memory when less than a 1652 * page is expected. We must break the request up into an SG list ... 1653 */ 1654 if (((len = (I2O_LCT_getTableSize(&Table) << 2)) <= 1655 (sizeof(I2O_LCT) - sizeof(I2O_LCT_ENTRY))) 1656 || (len > (128 * 1024))) { /* Arbitrary */ 1657 free (Message_Ptr, M_TEMP); 1658 return (EINVAL); 1659 } 1660 if ((sc->ha_LCT = (PI2O_LCT)malloc (len, M_TEMP, M_WAITOK)) 1661 == (PI2O_LCT)NULL) { 1662 free (Message_Ptr, M_TEMP); 1663 return (ENOMEM); 1664 } 1665 /* 1666 * since this code is reused in several systems, code efficiency 1667 * is greater by using a shift operation rather than a divide by 1668 * sizeof(u_int32_t). 1669 */ 1670 I2O_LCT_setTableSize(sc->ha_LCT, 1671 (sizeof(I2O_LCT) - sizeof(I2O_LCT_ENTRY)) >> 2); 1672 /* 1673 * Convert the access to the LCT table into a SG list. 1674 */ 1675 sg = Message_Ptr->SGL.u.Simple; 1676 v = (caddr_t)(sc->ha_LCT); 1677 for (;;) { 1678 int next, base, span; 1679 1680 span = 0; 1681 next = base = KVTOPHYS(v); 1682 I2O_SGE_SIMPLE_ELEMENT_setPhysicalAddress(sg, base); 1683 1684 /* How far can we go contiguously */ 1685 while ((len > 0) && (base == next)) { 1686 int size; 1687 1688 next = trunc_page(base) + PAGE_SIZE; 1689 size = next - base; 1690 if (size > len) { 1691 size = len; 1692 } 1693 span += size; 1694 v += size; 1695 len -= size; 1696 base = KVTOPHYS(v); 1697 } 1698 1699 /* Construct the Flags */ 1700 I2O_FLAGS_COUNT_setCount(&(sg->FlagsCount), span); 1701 { 1702 int rw = I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT; 1703 if (len <= 0) { 1704 rw = (I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT 1705 | I2O_SGL_FLAGS_LAST_ELEMENT 1706 | I2O_SGL_FLAGS_END_OF_BUFFER); 1707 } 1708 I2O_FLAGS_COUNT_setFlags(&(sg->FlagsCount), rw); 1709 } 1710 1711 if (len <= 0) { 1712 break; 1713 } 1714 1715 /* 1716 * Incrementing requires resizing of the packet. 1717 */ 1718 ++sg; 1719 MessageSizeInBytes += sizeof(*sg); 1720 I2O_MESSAGE_FRAME_setMessageSize( 1721 &(Message_Ptr->StdMessageFrame), 1722 I2O_MESSAGE_FRAME_getMessageSize( 1723 &(Message_Ptr->StdMessageFrame)) 1724 + (sizeof(*sg) / sizeof(U32))); 1725 { 1726 PI2O_EXEC_LCT_NOTIFY_MESSAGE NewMessage_Ptr; 1727 1728 if ((NewMessage_Ptr = (PI2O_EXEC_LCT_NOTIFY_MESSAGE) 1729 malloc (MessageSizeInBytes, M_TEMP, M_WAITOK)) 1730 == (PI2O_EXEC_LCT_NOTIFY_MESSAGE)NULL) { 1731 free (sc->ha_LCT, M_TEMP); 1732 sc->ha_LCT = (PI2O_LCT)NULL; 1733 free (Message_Ptr, M_TEMP); 1734 return (ENOMEM); 1735 } 1736 span = ((caddr_t)sg) - (caddr_t)Message_Ptr; 1737 bcopy ((caddr_t)Message_Ptr, 1738 (caddr_t)NewMessage_Ptr, span); 1739 free (Message_Ptr, M_TEMP); 1740 sg = (PI2O_SGE_SIMPLE_ELEMENT) 1741 (((caddr_t)NewMessage_Ptr) + span); 1742 Message_Ptr = NewMessage_Ptr; 1743 } 1744 } 1745 { int retval; 1746 1747 retval = ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr); 1748 free (Message_Ptr, M_TEMP); 1749 if (retval != CAM_REQ_CMP) { 1750 return (ENODEV); 1751 } 1752 } 1753 /* If the LCT table grew, lets truncate accesses */ 1754 if (I2O_LCT_getTableSize(&Table) < I2O_LCT_getTableSize(sc->ha_LCT)) { 1755 I2O_LCT_setTableSize(sc->ha_LCT, I2O_LCT_getTableSize(&Table)); 1756 } 1757 for (Entry = sc->ha_LCT->LCTEntry; Entry < (PI2O_LCT_ENTRY) 1758 (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT)); 1759 ++Entry) { 1760 Entry->le_type = I2O_UNKNOWN; 1761 switch (I2O_CLASS_ID_getClass(&(Entry->ClassID))) { 1762 1763 case I2O_CLASS_RANDOM_BLOCK_STORAGE: 1764 Entry->le_type = I2O_BSA; 1765 break; 1766 1767 case I2O_CLASS_SCSI_PERIPHERAL: 1768 Entry->le_type = I2O_SCSI; 1769 break; 1770 1771 case I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL: 1772 Entry->le_type = I2O_FCA; 1773 break; 1774 1775 case I2O_CLASS_BUS_ADAPTER_PORT: 1776 Entry->le_type = I2O_PORT | I2O_SCSI; 1777 /* FALLTHRU */ 1778 case I2O_CLASS_FIBRE_CHANNEL_PORT: 1779 if (I2O_CLASS_ID_getClass(&(Entry->ClassID)) == 1780 I2O_CLASS_FIBRE_CHANNEL_PORT) { 1781 Entry->le_type = I2O_PORT | I2O_FCA; 1782 } 1783 { struct ControllerInfo { 1784 I2O_PARAM_RESULTS_LIST_HEADER Header; 1785 I2O_PARAM_READ_OPERATION_RESULT Read; 1786 I2O_HBA_SCSI_CONTROLLER_INFO_SCALAR Info; 1787 }; 1788 defAlignLong(struct ControllerInfo, Buffer); 1789 PI2O_HBA_SCSI_CONTROLLER_INFO_SCALAR Info; 1790 1791 Entry->le_bus = 0xff; 1792 Entry->le_target = 0xff; 1793 Entry->le_lun = 0xff; 1794 1795 if ((Info = (PI2O_HBA_SCSI_CONTROLLER_INFO_SCALAR) 1796 ASR_getParams(sc, 1797 I2O_LCT_ENTRY_getLocalTID(Entry), 1798 I2O_HBA_SCSI_CONTROLLER_INFO_GROUP_NO, 1799 Buffer, sizeof(struct ControllerInfo))) 1800 == (PI2O_HBA_SCSI_CONTROLLER_INFO_SCALAR)NULL) { 1801 continue; 1802 } 1803 Entry->le_target 1804 = I2O_HBA_SCSI_CONTROLLER_INFO_SCALAR_getInitiatorID( 1805 Info); 1806 Entry->le_lun = 0; 1807 } /* FALLTHRU */ 1808 default: 1809 continue; 1810 } 1811 { struct DeviceInfo { 1812 I2O_PARAM_RESULTS_LIST_HEADER Header; 1813 I2O_PARAM_READ_OPERATION_RESULT Read; 1814 I2O_DPT_DEVICE_INFO_SCALAR Info; 1815 }; 1816 defAlignLong (struct DeviceInfo, Buffer); 1817 PI2O_DPT_DEVICE_INFO_SCALAR Info; 1818 1819 Entry->le_bus = 0xff; 1820 Entry->le_target = 0xff; 1821 Entry->le_lun = 0xff; 1822 1823 if ((Info = (PI2O_DPT_DEVICE_INFO_SCALAR) 1824 ASR_getParams(sc, 1825 I2O_LCT_ENTRY_getLocalTID(Entry), 1826 I2O_DPT_DEVICE_INFO_GROUP_NO, 1827 Buffer, sizeof(struct DeviceInfo))) 1828 == (PI2O_DPT_DEVICE_INFO_SCALAR)NULL) { 1829 continue; 1830 } 1831 Entry->le_type 1832 |= I2O_DPT_DEVICE_INFO_SCALAR_getDeviceType(Info); 1833 Entry->le_bus 1834 = I2O_DPT_DEVICE_INFO_SCALAR_getBus(Info); 1835 if ((Entry->le_bus > sc->ha_MaxBus) 1836 && (Entry->le_bus <= MAX_CHANNEL)) { 1837 sc->ha_MaxBus = Entry->le_bus; 1838 } 1839 Entry->le_target 1840 = I2O_DPT_DEVICE_INFO_SCALAR_getIdentifier(Info); 1841 Entry->le_lun 1842 = I2O_DPT_DEVICE_INFO_SCALAR_getLunInfo(Info); 1843 } 1844 } 1845 /* 1846 * A zero return value indicates success. 1847 */ 1848 return (0); 1849} /* ASR_acquireLct */ 1850 1851/* 1852 * Initialize a message frame. 1853 * We assume that the CDB has already been set up, so all we do here is 1854 * generate the Scatter Gather list. 1855 */ 1856STATIC INLINE PI2O_MESSAGE_FRAME 1857ASR_init_message( 1858 IN union asr_ccb * ccb, 1859 OUT PI2O_MESSAGE_FRAME Message) 1860{ 1861 int next, span, base, rw; 1862 OUT PI2O_MESSAGE_FRAME Message_Ptr; 1863 Asr_softc_t * sc = (Asr_softc_t *)(ccb->ccb_h.spriv_ptr0); 1864 PI2O_SGE_SIMPLE_ELEMENT sg; 1865 caddr_t v; 1866 vm_size_t size, len; 1867 U32 MessageSize; 1868 1869 /* We only need to zero out the PRIVATE_SCSI_SCB_EXECUTE_MESSAGE */ 1870 bzero (Message_Ptr = getAlignLong(I2O_MESSAGE_FRAME, Message), 1871 (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) - sizeof(I2O_SG_ELEMENT))); 1872 1873 { 1874 int target = ccb->ccb_h.target_id; 1875 int lun = ccb->ccb_h.target_lun; 1876 int bus = cam_sim_bus(xpt_path_sim(ccb->ccb_h.path)); 1877 tid_t TID; 1878 1879 if ((TID = ASR_getTid (sc, bus, target, lun)) == (tid_t)-1) { 1880 PI2O_LCT_ENTRY Device; 1881 1882 TID = (tid_t)0; 1883 for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY) 1884 (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT)); 1885 ++Device) { 1886 if ((Device->le_type != I2O_UNKNOWN) 1887 && (Device->le_bus == bus) 1888 && (Device->le_target == target) 1889 && (Device->le_lun == lun) 1890 && (I2O_LCT_ENTRY_getUserTID(Device) == 0xFFF)) { 1891 TID = I2O_LCT_ENTRY_getLocalTID(Device); 1892 ASR_setTid (sc, Device->le_bus, 1893 Device->le_target, Device->le_lun, 1894 TID); 1895 break; 1896 } 1897 } 1898 } 1899 if (TID == (tid_t)0) { 1900 return ((PI2O_MESSAGE_FRAME)NULL); 1901 } 1902 I2O_MESSAGE_FRAME_setTargetAddress(Message_Ptr, TID); 1903 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setTID( 1904 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr, TID); 1905 } 1906 I2O_MESSAGE_FRAME_setVersionOffset(Message_Ptr, I2O_VERSION_11 | 1907 (((sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) - sizeof(I2O_SG_ELEMENT)) 1908 / sizeof(U32)) << 4)); 1909 I2O_MESSAGE_FRAME_setMessageSize(Message_Ptr, 1910 (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) 1911 - sizeof(I2O_SG_ELEMENT)) / sizeof(U32)); 1912 I2O_MESSAGE_FRAME_setInitiatorAddress (Message_Ptr, 1); 1913 I2O_MESSAGE_FRAME_setFunction(Message_Ptr, I2O_PRIVATE_MESSAGE); 1914 I2O_PRIVATE_MESSAGE_FRAME_setXFunctionCode ( 1915 (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr, I2O_SCSI_SCB_EXEC); 1916 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags ( 1917 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr, 1918 I2O_SCB_FLAG_ENABLE_DISCONNECT 1919 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG 1920 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER); 1921 /* 1922 * We do not need any (optional byteswapping) method access to 1923 * the Initiator & Transaction context field. 1924 */ 1925 I2O_MESSAGE_FRAME_setInitiatorContext64(Message, (long)ccb); 1926 1927 I2O_PRIVATE_MESSAGE_FRAME_setOrganizationID( 1928 (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr, DPT_ORGANIZATION_ID); 1929 /* 1930 * copy the cdb over 1931 */ 1932 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setCDBLength( 1933 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr, ccb->csio.cdb_len); 1934 bcopy (&(ccb->csio.cdb_io), 1935 ((PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr)->CDB, ccb->csio.cdb_len); 1936 1937 /* 1938 * Given a buffer describing a transfer, set up a scatter/gather map 1939 * in a ccb to map that SCSI transfer. 1940 */ 1941 1942 rw = (ccb->ccb_h.flags & CAM_DIR_IN) ? 0 : I2O_SGL_FLAGS_DIR; 1943 1944 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags ( 1945 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr, 1946 (ccb->csio.dxfer_len) 1947 ? ((rw) ? (I2O_SCB_FLAG_XFER_TO_DEVICE 1948 | I2O_SCB_FLAG_ENABLE_DISCONNECT 1949 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG 1950 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER) 1951 : (I2O_SCB_FLAG_XFER_FROM_DEVICE 1952 | I2O_SCB_FLAG_ENABLE_DISCONNECT 1953 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG 1954 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER)) 1955 : (I2O_SCB_FLAG_ENABLE_DISCONNECT 1956 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG 1957 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER)); 1958 1959 /* 1960 * Given a transfer described by a `data', fill in the SG list. 1961 */ 1962 sg = &((PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr)->SGL.u.Simple[0]; 1963 1964 len = ccb->csio.dxfer_len; 1965 v = ccb->csio.data_ptr; 1966 ASSERT (ccb->csio.dxfer_len >= 0); 1967 MessageSize = I2O_MESSAGE_FRAME_getMessageSize(Message_Ptr); 1968 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setByteCount( 1969 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr, len); 1970 while ((len > 0) && (sg < &((PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE) 1971 Message_Ptr)->SGL.u.Simple[SG_SIZE])) { 1972 span = 0; 1973 next = base = KVTOPHYS(v); 1974 I2O_SGE_SIMPLE_ELEMENT_setPhysicalAddress(sg, base); 1975 1976 /* How far can we go contiguously */ 1977 while ((len > 0) && (base == next)) { 1978 next = trunc_page(base) + PAGE_SIZE; 1979 size = next - base; 1980 if (size > len) { 1981 size = len; 1982 } 1983 span += size; 1984 v += size; 1985 len -= size; 1986 base = KVTOPHYS(v); 1987 } 1988 1989 I2O_FLAGS_COUNT_setCount(&(sg->FlagsCount), span); 1990 if (len == 0) { 1991 rw |= I2O_SGL_FLAGS_LAST_ELEMENT; 1992 } 1993 I2O_FLAGS_COUNT_setFlags(&(sg->FlagsCount), 1994 I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT | rw); 1995 ++sg; 1996 MessageSize += sizeof(*sg) / sizeof(U32); 1997 } 1998 /* We always do the request sense ... */ 1999 if ((span = ccb->csio.sense_len) == 0) { 2000 span = sizeof(ccb->csio.sense_data); 2001 } 2002 SG(sg, 0, I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER, 2003 &(ccb->csio.sense_data), span); 2004 I2O_MESSAGE_FRAME_setMessageSize(Message_Ptr, 2005 MessageSize + (sizeof(*sg) / sizeof(U32))); 2006 return (Message_Ptr); 2007} /* ASR_init_message */ 2008 2009/* 2010 * Reset the adapter. 2011 */ 2012STATIC INLINE U32 2013ASR_initOutBound ( 2014 INOUT Asr_softc_t * sc) 2015{ 2016 struct initOutBoundMessage { 2017 I2O_EXEC_OUTBOUND_INIT_MESSAGE M; 2018 U32 R; 2019 }; 2020 defAlignLong(struct initOutBoundMessage,Message); 2021 PI2O_EXEC_OUTBOUND_INIT_MESSAGE Message_Ptr; 2022 OUT U32 * volatile Reply_Ptr; 2023 U32 Old; 2024 2025 /* 2026 * Build up our copy of the Message. 2027 */ 2028 Message_Ptr = (PI2O_EXEC_OUTBOUND_INIT_MESSAGE)ASR_fillMessage(Message, 2029 sizeof(I2O_EXEC_OUTBOUND_INIT_MESSAGE)); 2030 I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame), 2031 I2O_EXEC_OUTBOUND_INIT); 2032 I2O_EXEC_OUTBOUND_INIT_MESSAGE_setHostPageFrameSize(Message_Ptr, PAGE_SIZE); 2033 I2O_EXEC_OUTBOUND_INIT_MESSAGE_setOutboundMFrameSize(Message_Ptr, 2034 sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)); 2035 /* 2036 * Reset the Reply Status 2037 */ 2038 *(Reply_Ptr = (U32 *)((char *)Message_Ptr 2039 + sizeof(I2O_EXEC_OUTBOUND_INIT_MESSAGE))) = 0; 2040 SG (&(Message_Ptr->SGL), 0, I2O_SGL_FLAGS_LAST_ELEMENT, Reply_Ptr, 2041 sizeof(U32)); 2042 /* 2043 * Send the Message out 2044 */ 2045 if ((Old = ASR_initiateCp (sc->ha_Virt, (PI2O_MESSAGE_FRAME)Message_Ptr)) != (U32)-1L) { 2046 u_long size, addr; 2047 2048 /* 2049 * Wait for a response (Poll). 2050 */ 2051 while (*Reply_Ptr < I2O_EXEC_OUTBOUND_INIT_REJECTED); 2052 /* 2053 * Re-enable the interrupts. 2054 */ 2055 sc->ha_Virt->Mask = Old; 2056 /* 2057 * Populate the outbound table. 2058 */ 2059 if (sc->ha_Msgs == (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)NULL) { 2060 2061 /* Allocate the reply frames */ 2062 size = sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME) 2063 * sc->ha_Msgs_Count; 2064 2065 /* 2066 * contigmalloc only works reliably at 2067 * initialization time. 2068 */ 2069 if ((sc->ha_Msgs = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME) 2070 contigmalloc (size, M_DEVBUF, M_WAITOK, 0ul, 2071 0xFFFFFFFFul, (u_long)sizeof(U32), 0ul)) 2072 != (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)NULL) { 2073 (void)bzero ((char *)sc->ha_Msgs, size); 2074 sc->ha_Msgs_Phys = KVTOPHYS(sc->ha_Msgs); 2075 } 2076 } 2077 2078 /* Initialize the outbound FIFO */ 2079 if (sc->ha_Msgs != (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)NULL) 2080 for (size = sc->ha_Msgs_Count, addr = sc->ha_Msgs_Phys; 2081 size; --size) { 2082 sc->ha_Virt->FromFIFO = addr; 2083 addr += sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME); 2084 } 2085 return (*Reply_Ptr); 2086 } 2087 return (0); 2088} /* ASR_initOutBound */ 2089 2090/* 2091 * Set the system table 2092 */ 2093STATIC INLINE int 2094ASR_setSysTab( 2095 IN Asr_softc_t * sc) 2096{ 2097 PI2O_EXEC_SYS_TAB_SET_MESSAGE Message_Ptr; 2098 PI2O_SET_SYSTAB_HEADER SystemTable; 2099 Asr_softc_t * ha; 2100 PI2O_SGE_SIMPLE_ELEMENT sg; 2101 int retVal; 2102 2103 if ((SystemTable = (PI2O_SET_SYSTAB_HEADER)malloc ( 2104 sizeof(I2O_SET_SYSTAB_HEADER), M_TEMP, M_WAITOK)) 2105 == (PI2O_SET_SYSTAB_HEADER)NULL) { 2106 return (ENOMEM); 2107 } 2108 bzero (SystemTable, sizeof(I2O_SET_SYSTAB_HEADER)); 2109 for (ha = Asr_softc; ha; ha = ha->ha_next) { 2110 ++SystemTable->NumberEntries; 2111 } 2112 if ((Message_Ptr = (PI2O_EXEC_SYS_TAB_SET_MESSAGE)malloc ( 2113 sizeof(I2O_EXEC_SYS_TAB_SET_MESSAGE) - sizeof(I2O_SG_ELEMENT) 2114 + ((3+SystemTable->NumberEntries) * sizeof(I2O_SGE_SIMPLE_ELEMENT)), 2115 M_TEMP, M_WAITOK)) == (PI2O_EXEC_SYS_TAB_SET_MESSAGE)NULL) { 2116 free (SystemTable, M_TEMP); 2117 return (ENOMEM); 2118 } 2119 (void)ASR_fillMessage((char *)Message_Ptr, 2120 sizeof(I2O_EXEC_SYS_TAB_SET_MESSAGE) - sizeof(I2O_SG_ELEMENT) 2121 + ((3+SystemTable->NumberEntries) * sizeof(I2O_SGE_SIMPLE_ELEMENT))); 2122 I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame), 2123 (I2O_VERSION_11 + 2124 (((sizeof(I2O_EXEC_SYS_TAB_SET_MESSAGE) - sizeof(I2O_SG_ELEMENT)) 2125 / sizeof(U32)) << 4))); 2126 I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame), 2127 I2O_EXEC_SYS_TAB_SET); 2128 /* 2129 * Call the LCT table to determine the number of device entries 2130 * to reserve space for. 2131 * since this code is reused in several systems, code efficiency 2132 * is greater by using a shift operation rather than a divide by 2133 * sizeof(u_int32_t). 2134 */ 2135 sg = (PI2O_SGE_SIMPLE_ELEMENT)((char *)Message_Ptr 2136 + ((I2O_MESSAGE_FRAME_getVersionOffset( 2137 &(Message_Ptr->StdMessageFrame)) & 0xF0) >> 2)); 2138 SG(sg, 0, I2O_SGL_FLAGS_DIR, SystemTable, sizeof(I2O_SET_SYSTAB_HEADER)); 2139 ++sg; 2140 for (ha = Asr_softc; ha; ha = ha->ha_next) { 2141 SG(sg, 0, 2142 ((ha->ha_next) 2143 ? (I2O_SGL_FLAGS_DIR) 2144 : (I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_END_OF_BUFFER)), 2145 &(ha->ha_SystemTable), sizeof(ha->ha_SystemTable)); 2146 ++sg; 2147 } 2148 SG(sg, 0, I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_END_OF_BUFFER, NULL, 0); 2149 SG(sg, 1, I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_LAST_ELEMENT 2150 | I2O_SGL_FLAGS_END_OF_BUFFER, NULL, 0); 2151 retVal = ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr); 2152 free (Message_Ptr, M_TEMP); 2153 free (SystemTable, M_TEMP); 2154 return (retVal); 2155} /* ASR_setSysTab */ 2156 2157STATIC INLINE int 2158ASR_acquireHrt ( 2159 INOUT Asr_softc_t * sc) 2160{ 2161 defAlignLong(I2O_EXEC_HRT_GET_MESSAGE,Message); 2162 I2O_EXEC_HRT_GET_MESSAGE * Message_Ptr; 2163 struct { 2164 I2O_HRT Header; 2165 I2O_HRT_ENTRY Entry[MAX_CHANNEL]; 2166 } Hrt; 2167 u_int8_t NumberOfEntries; 2168 PI2O_HRT_ENTRY Entry; 2169 2170 bzero ((void *)&Hrt, sizeof (Hrt)); 2171 Message_Ptr = (I2O_EXEC_HRT_GET_MESSAGE *)ASR_fillMessage(Message, 2172 sizeof(I2O_EXEC_HRT_GET_MESSAGE) - sizeof(I2O_SG_ELEMENT) 2173 + sizeof(I2O_SGE_SIMPLE_ELEMENT)); 2174 I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame), 2175 (I2O_VERSION_11 2176 + (((sizeof(I2O_EXEC_HRT_GET_MESSAGE) - sizeof(I2O_SG_ELEMENT)) 2177 / sizeof(U32)) << 4))); 2178 I2O_MESSAGE_FRAME_setFunction (&(Message_Ptr->StdMessageFrame), 2179 I2O_EXEC_HRT_GET); 2180 2181 /* 2182 * Set up the buffers as scatter gather elements. 2183 */ 2184 SG(&(Message_Ptr->SGL), 0, 2185 I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER, 2186 &Hrt, sizeof(Hrt)); 2187 if (ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr) != CAM_REQ_CMP) { 2188 return (ENODEV); 2189 } 2190 if ((NumberOfEntries = I2O_HRT_getNumberEntries(&Hrt.Header)) 2191 > (MAX_CHANNEL + 1)) { 2192 NumberOfEntries = MAX_CHANNEL + 1; 2193 } 2194 for (Entry = Hrt.Header.HRTEntry; 2195 NumberOfEntries != 0; 2196 ++Entry, --NumberOfEntries) { 2197 PI2O_LCT_ENTRY Device; 2198 2199 for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY) 2200 (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT)); 2201 ++Device) { 2202 if (I2O_LCT_ENTRY_getLocalTID(Device) 2203 == (I2O_HRT_ENTRY_getAdapterID(Entry) & 0xFFF)) { 2204 Device->le_bus = I2O_HRT_ENTRY_getAdapterID( 2205 Entry) >> 16; 2206 if ((Device->le_bus > sc->ha_MaxBus) 2207 && (Device->le_bus <= MAX_CHANNEL)) { 2208 sc->ha_MaxBus = Device->le_bus; 2209 } 2210 } 2211 } 2212 } 2213 return (0); 2214} /* ASR_acquireHrt */ 2215 2216/* 2217 * Enable the adapter. 2218 */ 2219STATIC INLINE int 2220ASR_enableSys ( 2221 IN Asr_softc_t * sc) 2222{ 2223 defAlignLong(I2O_EXEC_SYS_ENABLE_MESSAGE,Message); 2224 PI2O_EXEC_SYS_ENABLE_MESSAGE Message_Ptr; 2225 2226 Message_Ptr = (PI2O_EXEC_SYS_ENABLE_MESSAGE)ASR_fillMessage(Message, 2227 sizeof(I2O_EXEC_SYS_ENABLE_MESSAGE)); 2228 I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame), 2229 I2O_EXEC_SYS_ENABLE); 2230 return (ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr) != 0); 2231} /* ASR_enableSys */ 2232 2233/* 2234 * Perform the stages necessary to initialize the adapter 2235 */ 2236STATIC int 2237ASR_init( 2238 IN Asr_softc_t * sc) 2239{ 2240 return ((ASR_initOutBound(sc) == 0) 2241 || (ASR_setSysTab(sc) != CAM_REQ_CMP) 2242 || (ASR_enableSys(sc) != CAM_REQ_CMP)); 2243} /* ASR_init */ 2244 2245/* 2246 * Send a Synchronize Cache command to the target device. 2247 */ 2248STATIC INLINE void 2249ASR_sync ( 2250 IN Asr_softc_t * sc, 2251 IN int bus, 2252 IN int target, 2253 IN int lun) 2254{ 2255 tid_t TID; 2256 2257 /* 2258 * We will not synchronize the device when there are outstanding 2259 * commands issued by the OS (this is due to a locked up device, 2260 * as the OS normally would flush all outstanding commands before 2261 * issuing a shutdown or an adapter reset). 2262 */ 2263 if ((sc != (Asr_softc_t *)NULL) 2264 && (sc->ha_ccb.lh_first != (struct ccb_hdr *)NULL) 2265 && ((TID = ASR_getTid (sc, bus, target, lun)) != (tid_t)-1) 2266 && (TID != (tid_t)0)) { 2267 defAlignLong(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE,Message); 2268 PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE Message_Ptr; 2269 2270 bzero (Message_Ptr 2271 = getAlignLong(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE, Message), 2272 sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) 2273 - sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT)); 2274 2275 I2O_MESSAGE_FRAME_setVersionOffset( 2276 (PI2O_MESSAGE_FRAME)Message_Ptr, 2277 I2O_VERSION_11 2278 | (((sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) 2279 - sizeof(I2O_SG_ELEMENT)) 2280 / sizeof(U32)) << 4)); 2281 I2O_MESSAGE_FRAME_setMessageSize( 2282 (PI2O_MESSAGE_FRAME)Message_Ptr, 2283 (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) 2284 - sizeof(I2O_SG_ELEMENT)) 2285 / sizeof(U32)); 2286 I2O_MESSAGE_FRAME_setInitiatorAddress ( 2287 (PI2O_MESSAGE_FRAME)Message_Ptr, 1); 2288 I2O_MESSAGE_FRAME_setFunction( 2289 (PI2O_MESSAGE_FRAME)Message_Ptr, I2O_PRIVATE_MESSAGE); 2290 I2O_MESSAGE_FRAME_setTargetAddress( 2291 (PI2O_MESSAGE_FRAME)Message_Ptr, TID); 2292 I2O_PRIVATE_MESSAGE_FRAME_setXFunctionCode ( 2293 (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr, 2294 I2O_SCSI_SCB_EXEC); 2295 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setTID(Message_Ptr, TID); 2296 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr, 2297 I2O_SCB_FLAG_ENABLE_DISCONNECT 2298 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG 2299 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER); 2300 I2O_PRIVATE_MESSAGE_FRAME_setOrganizationID( 2301 (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr, 2302 DPT_ORGANIZATION_ID); 2303 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setCDBLength(Message_Ptr, 6); 2304 Message_Ptr->CDB[0] = SYNCHRONIZE_CACHE; 2305 Message_Ptr->CDB[1] = (lun << 5); 2306 2307 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr, 2308 (I2O_SCB_FLAG_XFER_FROM_DEVICE 2309 | I2O_SCB_FLAG_ENABLE_DISCONNECT 2310 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG 2311 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER)); 2312 2313 (void)ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr); 2314 2315 } 2316} 2317 2318STATIC INLINE void 2319ASR_synchronize ( 2320 IN Asr_softc_t * sc) 2321{ 2322 int bus, target, lun; 2323 2324 for (bus = 0; bus <= sc->ha_MaxBus; ++bus) { 2325 for (target = 0; target <= sc->ha_MaxId; ++target) { 2326 for (lun = 0; lun <= sc->ha_MaxLun; ++lun) { 2327 ASR_sync(sc,bus,target,lun); 2328 } 2329 } 2330 } 2331} 2332 2333/* 2334 * Reset the HBA, targets and BUS. 2335 * Currently this resets *all* the SCSI busses. 2336 */ 2337STATIC INLINE void 2338asr_hbareset( 2339 IN Asr_softc_t * sc) 2340{ 2341 ASR_synchronize (sc); 2342 ASR_reset (sc); 2343} /* asr_hbareset */ 2344 2345/* 2346 * A reduced copy of the real pci_map_mem, incorporating the MAX_MAP 2347 * limit and a reduction in error checking (in the pre 4.0 case). 2348 */ 2349STATIC int 2350asr_pci_map_mem ( 2351#if __FreeBSD_version >= 400000 2352 IN device_t tag, 2353#else 2354 IN pcici_t tag, 2355#endif 2356 IN Asr_softc_t * sc) 2357{ 2358 int rid; 2359 u_int32_t p, l; 2360 2361#if __FreeBSD_version >= 400000 2362 /* 2363 * I2O specification says we must find first *memory* mapped BAR 2364 */ 2365 for (rid = PCIR_MAPS; 2366 rid < (PCIR_MAPS + 4 * sizeof(u_int32_t)); 2367 ++rid) { 2368 p = pci_read_config(tag, rid, sizeof(p)); 2369 if ((p & 1) == 0) { 2370 break; 2371 } 2372 } 2373 /* 2374 * Give up? 2375 */ 2376 if (rid >= (PCIR_MAPS + 4 * sizeof(u_int32_t))) { 2377 rid = PCIR_MAPS; 2378 } 2379 p = pci_read_config(tag, rid, sizeof(p)); 2380 pci_write_config(tag, rid, -1, sizeof(p)); 2381 l = 0 - (pci_read_config(tag, rid, sizeof(l)) & ~15); 2382 pci_write_config(tag, rid, p, sizeof(p)); 2383 if (l > MAX_MAP) { 2384 l = MAX_MAP; 2385 } 2386 p &= ~15; 2387 sc->ha_mem_res = bus_alloc_resource(tag, SYS_RES_MEMORY, &rid, 2388 p, p + l, l, RF_ACTIVE); 2389 if (sc->ha_mem_res == (struct resource *)NULL) { 2390 return (0); 2391 } 2392 sc->ha_Base = (void *)rman_get_start(sc->ha_mem_res); 2393 if (sc->ha_Base == (void *)NULL) { 2394 return (0); 2395 } 2396 sc->ha_Virt = (i2oRegs_t *) rman_get_virtual(sc->ha_mem_res); 2397#else 2398 vm_size_t psize, poffs; 2399 2400 /* 2401 * I2O specification says we must find first *memory* mapped BAR 2402 */ 2403 for (rid = PCI_MAP_REG_START; 2404 rid < (PCI_MAP_REG_START + 4 * sizeof(u_int32_t)); 2405 ++rid) { 2406 p = pci_conf_read (tag, rid); 2407 if ((p & 1) == 0) { 2408 break; 2409 } 2410 } 2411 if (rid >= (PCI_MAP_REG_START + 4 * sizeof(u_int32_t))) { 2412 rid = PCI_MAP_REG_START; 2413 } 2414 /* 2415 ** save old mapping, get size and type of memory 2416 ** 2417 ** type is in the lowest four bits. 2418 ** If device requires 2^n bytes, the next 2419 ** n-4 bits are read as 0. 2420 */ 2421 2422 sc->ha_Base = (void *)((p = pci_conf_read (tag, rid)) 2423 & PCI_MAP_MEMORY_ADDRESS_MASK); 2424 pci_conf_write (tag, rid, 0xfffffffful); 2425 l = pci_conf_read (tag, rid); 2426 pci_conf_write (tag, rid, p); 2427 2428 /* 2429 ** check the type 2430 */ 2431 2432 if (!((l & PCI_MAP_MEMORY_TYPE_MASK) == PCI_MAP_MEMORY_TYPE_32BIT_1M 2433 && ((u_long)sc->ha_Base & ~0xfffff) == 0) 2434 && ((l & PCI_MAP_MEMORY_TYPE_MASK) != PCI_MAP_MEMORY_TYPE_32BIT)) { 2435 debug_asr_printf ( 2436 "asr_pci_map_mem failed: bad memory type=0x%x\n", 2437 (unsigned) l); 2438 return (0); 2439 }; 2440 2441 /* 2442 ** get the size. 2443 */ 2444 2445 psize = -(l & PCI_MAP_MEMORY_ADDRESS_MASK); 2446 if (psize > MAX_MAP) { 2447 psize = MAX_MAP; 2448 } 2449 2450 if ((sc->ha_Base == (void *)NULL) 2451 || (sc->ha_Base == (void *)PCI_MAP_MEMORY_ADDRESS_MASK)) { 2452 debug_asr_printf ("asr_pci_map_mem: not configured by bios.\n"); 2453 return (0); 2454 }; 2455 2456 /* 2457 ** Truncate sc->ha_Base to page boundary. 2458 ** (Or does pmap_mapdev the job?) 2459 */ 2460 2461 poffs = (u_long)sc->ha_Base - trunc_page ((u_long)sc->ha_Base); 2462 sc->ha_Virt = (i2oRegs_t *)pmap_mapdev ((u_long)sc->ha_Base - poffs, 2463 psize + poffs); 2464 2465 if (sc->ha_Virt == (i2oRegs_t *)NULL) { 2466 return (0); 2467 } 2468 2469 sc->ha_Virt = (i2oRegs_t *)((u_long)sc->ha_Virt + poffs); 2470#endif 2471 return (1); 2472} /* asr_pci_map_mem */ 2473 2474/* 2475 * A simplified copy of the real pci_map_int with additional 2476 * registration requirements. 2477 */ 2478STATIC int 2479asr_pci_map_int ( 2480#if __FreeBSD_version >= 400000 2481 IN device_t tag, 2482#else 2483 IN pcici_t tag, 2484#endif 2485 IN Asr_softc_t * sc) 2486{ 2487#if __FreeBSD_version >= 400000 2488 int rid = 0; 2489 2490 sc->ha_irq_res = bus_alloc_resource(tag, SYS_RES_IRQ, &rid, 2491 0, ~0, 1, RF_ACTIVE | RF_SHAREABLE); 2492 if (sc->ha_irq_res == (struct resource *)NULL) { 2493 return (0); 2494 } 2495 if (bus_setup_intr(tag, sc->ha_irq_res, INTR_TYPE_CAM, 2496 (driver_intr_t *)asr_intr, (void *)sc, &(sc->ha_intr))) { 2497 return (0); 2498 } 2499 sc->ha_irq = pci_read_config(tag, PCIR_INTLINE, sizeof(char)); 2500#else 2501 if (!pci_map_int(tag, (pci_inthand_t *)asr_intr, 2502 (void *)sc, &cam_imask)) { 2503 return (0); 2504 } 2505 sc->ha_irq = pci_conf_read(tag, PCIR_INTLINE); 2506#endif 2507 return (1); 2508} /* asr_pci_map_int */ 2509 2510/* 2511 * Attach the devices, and virtual devices to the driver list. 2512 */ 2513STATIC ATTACH_RET 2514asr_attach (ATTACH_ARGS) 2515{ 2516 Asr_softc_t * sc; 2517 struct scsi_inquiry_data * iq; 2518 ATTACH_SET(); 2519 2520 if ((sc = malloc(sizeof(*sc), M_DEVBUF, M_NOWAIT)) == (Asr_softc_t *)NULL) { 2521 ATTACH_RETURN(ENOMEM); 2522 } 2523 if (Asr_softc == (Asr_softc_t *)NULL) { 2524 /* 2525 * Fixup the OS revision as saved in the dptsig for the 2526 * engine (dptioctl.h) to pick up. 2527 */ 2528 bcopy (osrelease, &ASR_sig.dsDescription[16], 5); 2529 printf ("asr%d: major=%d\n", unit, asr_cdevsw.d_maj); 2530 } 2531 /* 2532 * Initialize the software structure 2533 */ 2534 bzero (sc, sizeof(*sc)); 2535 LIST_INIT(&sc->ha_ccb); 2536# ifdef ASR_MEASURE_PERFORMANCE 2537 { 2538 u_int32_t i; 2539 2540 // initialize free list for timeQ 2541 sc->ha_timeQFreeHead = 0; 2542 sc->ha_timeQFreeTail = MAX_TIMEQ_SIZE - 1; 2543 for (i = 0; i < MAX_TIMEQ_SIZE; i++) { 2544 sc->ha_timeQFreeList[i] = i; 2545 } 2546 } 2547# endif 2548 /* Link us into the HA list */ 2549 { 2550 Asr_softc_t **ha; 2551 2552 for (ha = &Asr_softc; *ha; ha = &((*ha)->ha_next)); 2553 *(ha) = sc; 2554 } 2555 { 2556 PI2O_EXEC_STATUS_GET_REPLY status; 2557 int size; 2558 2559 /* 2560 * This is the real McCoy! 2561 */ 2562 if (!asr_pci_map_mem(tag, sc)) { 2563 printf ("asr%d: could not map memory\n", unit); 2564 ATTACH_RETURN(ENXIO); 2565 } 2566 /* Enable if not formerly enabled */ 2567#if __FreeBSD_version >= 400000 2568 pci_write_config (tag, PCIR_COMMAND, 2569 pci_read_config (tag, PCIR_COMMAND, sizeof(char)) 2570 | PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN, sizeof(char)); 2571 /* Knowledge is power, responsibility is direct */ 2572 { 2573 struct pci_devinfo { 2574 STAILQ_ENTRY(pci_devinfo) pci_links; 2575 struct resource_list resources; 2576 pcicfgregs cfg; 2577 } * dinfo = device_get_ivars(tag); 2578 sc->ha_pciBusNum = dinfo->cfg.bus; 2579 sc->ha_pciDeviceNum = (dinfo->cfg.slot << 3) 2580 | dinfo->cfg.func; 2581 } 2582#else 2583 pci_conf_write (tag, PCIR_COMMAND, 2584 pci_conf_read (tag, PCIR_COMMAND) 2585 | PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN); 2586 /* Knowledge is power, responsibility is direct */ 2587 switch (pci_mechanism) { 2588 2589 case 1: 2590 sc->ha_pciBusNum = tag.cfg1 >> 16; 2591 sc->ha_pciDeviceNum = tag.cfg1 >> 8; 2592 2593 case 2: 2594 sc->ha_pciBusNum = tag.cfg2.forward; 2595 sc->ha_pciDeviceNum = ((tag.cfg2.enable >> 1) & 7) 2596 | (tag.cfg2.port >> 5); 2597 } 2598#endif 2599 /* Check if the device is there? */ 2600 if ((ASR_resetIOP(sc->ha_Virt) == 0) 2601 || ((status = (PI2O_EXEC_STATUS_GET_REPLY)malloc ( 2602 sizeof(I2O_EXEC_STATUS_GET_REPLY), M_TEMP, M_WAITOK)) 2603 == (PI2O_EXEC_STATUS_GET_REPLY)NULL) 2604 || (ASR_getStatus(sc->ha_Virt, status) == NULL)) { 2605 printf ("asr%d: could not initialize hardware\n", unit); 2606 ATTACH_RETURN(ENODEV); /* Get next, maybe better luck */ 2607 } 2608 sc->ha_SystemTable.OrganizationID = status->OrganizationID; 2609 sc->ha_SystemTable.IOP_ID = status->IOP_ID; 2610 sc->ha_SystemTable.I2oVersion = status->I2oVersion; 2611 sc->ha_SystemTable.IopState = status->IopState; 2612 sc->ha_SystemTable.MessengerType = status->MessengerType; 2613 sc->ha_SystemTable.InboundMessageFrameSize 2614 = status->InboundMFrameSize; 2615 sc->ha_SystemTable.MessengerInfo.InboundMessagePortAddressLow 2616 = (U32)(sc->ha_Base) + (U32)(&(((i2oRegs_t *)NULL)->ToFIFO)); 2617 2618 if (!asr_pci_map_int(tag, (void *)sc)) { 2619 printf ("asr%d: could not map interrupt\n", unit); 2620 ATTACH_RETURN(ENXIO); 2621 } 2622 2623 /* Adjust the maximim inbound count */ 2624 if (((sc->ha_QueueSize 2625 = I2O_EXEC_STATUS_GET_REPLY_getMaxInboundMFrames(status)) 2626 > MAX_INBOUND) 2627 || (sc->ha_QueueSize == 0)) { 2628 sc->ha_QueueSize = MAX_INBOUND; 2629 } 2630 2631 /* Adjust the maximum outbound count */ 2632 if (((sc->ha_Msgs_Count 2633 = I2O_EXEC_STATUS_GET_REPLY_getMaxOutboundMFrames(status)) 2634 > MAX_OUTBOUND) 2635 || (sc->ha_Msgs_Count == 0)) { 2636 sc->ha_Msgs_Count = MAX_OUTBOUND; 2637 } 2638 if (sc->ha_Msgs_Count > sc->ha_QueueSize) { 2639 sc->ha_Msgs_Count = sc->ha_QueueSize; 2640 } 2641 2642 /* Adjust the maximum SG size to adapter */ 2643 if ((size = (I2O_EXEC_STATUS_GET_REPLY_getInboundMFrameSize( 2644 status) << 2)) > MAX_INBOUND_SIZE) { 2645 size = MAX_INBOUND_SIZE; 2646 } 2647 free (status, M_TEMP); 2648 sc->ha_SgSize = (size - sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) 2649 + sizeof(I2O_SG_ELEMENT)) / sizeof(I2O_SGE_SIMPLE_ELEMENT); 2650 } 2651 2652 /* 2653 * Only do a bus/HBA reset on the first time through. On this 2654 * first time through, we do not send a flush to the devices. 2655 */ 2656 if (ASR_init(sc) == 0) { 2657 struct BufferInfo { 2658 I2O_PARAM_RESULTS_LIST_HEADER Header; 2659 I2O_PARAM_READ_OPERATION_RESULT Read; 2660 I2O_DPT_EXEC_IOP_BUFFERS_SCALAR Info; 2661 }; 2662 defAlignLong (struct BufferInfo, Buffer); 2663 PI2O_DPT_EXEC_IOP_BUFFERS_SCALAR Info; 2664# define FW_DEBUG_BLED_OFFSET 8 2665 2666 if ((Info = (PI2O_DPT_EXEC_IOP_BUFFERS_SCALAR) 2667 ASR_getParams(sc, 0, 2668 I2O_DPT_EXEC_IOP_BUFFERS_GROUP_NO, 2669 Buffer, sizeof(struct BufferInfo))) 2670 != (PI2O_DPT_EXEC_IOP_BUFFERS_SCALAR)NULL) { 2671 sc->ha_blinkLED = sc->ha_Virt->Address 2672 + I2O_DPT_EXEC_IOP_BUFFERS_SCALAR_getSerialOutputOffset(Info) 2673 + FW_DEBUG_BLED_OFFSET; 2674 } 2675 if (ASR_acquireLct(sc) == 0) { 2676 (void)ASR_acquireHrt(sc); 2677 } 2678 } else { 2679 printf ("asr%d: failed to initialize\n", unit); 2680 ATTACH_RETURN(ENXIO); 2681 } 2682 /* 2683 * Add in additional probe responses for more channels. We 2684 * are reusing the variable `target' for a channel loop counter. 2685 * Done here because of we need both the acquireLct and 2686 * acquireHrt data. 2687 */ 2688 { PI2O_LCT_ENTRY Device; 2689 2690 for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY) 2691 (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT)); 2692 ++Device) { 2693 if (Device->le_type == I2O_UNKNOWN) { 2694 continue; 2695 } 2696 if (I2O_LCT_ENTRY_getUserTID(Device) == 0xFFF) { 2697 if (Device->le_target > sc->ha_MaxId) { 2698 sc->ha_MaxId = Device->le_target; 2699 } 2700 if (Device->le_lun > sc->ha_MaxLun) { 2701 sc->ha_MaxLun = Device->le_lun; 2702 } 2703 } 2704 if (((Device->le_type & I2O_PORT) != 0) 2705 && (Device->le_bus <= MAX_CHANNEL)) { 2706 /* Do not increase MaxId for efficiency */ 2707 sc->ha_adapter_target[Device->le_bus] 2708 = Device->le_target; 2709 } 2710 } 2711 } 2712 2713 2714 /* 2715 * Print the HBA model number as inquired from the card. 2716 */ 2717 2718 printf ("asr%d:", unit); 2719 2720 if ((iq = (struct scsi_inquiry_data *)malloc ( 2721 sizeof(struct scsi_inquiry_data), M_TEMP, M_WAITOK)) 2722 != (struct scsi_inquiry_data *)NULL) { 2723 defAlignLong(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE,Message); 2724 PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE Message_Ptr; 2725 int posted = 0; 2726 2727 bzero (iq, sizeof(struct scsi_inquiry_data)); 2728 bzero (Message_Ptr 2729 = getAlignLong(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE, Message), 2730 sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) 2731 - sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT)); 2732 2733 I2O_MESSAGE_FRAME_setVersionOffset( 2734 (PI2O_MESSAGE_FRAME)Message_Ptr, 2735 I2O_VERSION_11 2736 | (((sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) 2737 - sizeof(I2O_SG_ELEMENT)) 2738 / sizeof(U32)) << 4)); 2739 I2O_MESSAGE_FRAME_setMessageSize( 2740 (PI2O_MESSAGE_FRAME)Message_Ptr, 2741 (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) 2742 - sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT)) 2743 / sizeof(U32)); 2744 I2O_MESSAGE_FRAME_setInitiatorAddress ( 2745 (PI2O_MESSAGE_FRAME)Message_Ptr, 1); 2746 I2O_MESSAGE_FRAME_setFunction( 2747 (PI2O_MESSAGE_FRAME)Message_Ptr, I2O_PRIVATE_MESSAGE); 2748 I2O_PRIVATE_MESSAGE_FRAME_setXFunctionCode ( 2749 (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr, 2750 I2O_SCSI_SCB_EXEC); 2751 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr, 2752 I2O_SCB_FLAG_ENABLE_DISCONNECT 2753 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG 2754 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER); 2755 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setInterpret(Message_Ptr, 1); 2756 I2O_PRIVATE_MESSAGE_FRAME_setOrganizationID( 2757 (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr, 2758 DPT_ORGANIZATION_ID); 2759 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setCDBLength(Message_Ptr, 6); 2760 Message_Ptr->CDB[0] = INQUIRY; 2761 Message_Ptr->CDB[4] = (unsigned char)sizeof(struct scsi_inquiry_data); 2762 if (Message_Ptr->CDB[4] == 0) { 2763 Message_Ptr->CDB[4] = 255; 2764 } 2765 2766 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr, 2767 (I2O_SCB_FLAG_XFER_FROM_DEVICE 2768 | I2O_SCB_FLAG_ENABLE_DISCONNECT 2769 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG 2770 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER)); 2771 2772 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setByteCount( 2773 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr, 2774 sizeof(struct scsi_inquiry_data)); 2775 SG(&(Message_Ptr->SGL), 0, 2776 I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER, 2777 iq, sizeof(struct scsi_inquiry_data)); 2778 (void)ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr); 2779 2780 if (iq->vendor[0] && (iq->vendor[0] != ' ')) { 2781 printf (" "); 2782 ASR_prstring (iq->vendor, 8); 2783 ++posted; 2784 } 2785 if (iq->product[0] && (iq->product[0] != ' ')) { 2786 printf (" "); 2787 ASR_prstring (iq->product, 16); 2788 ++posted; 2789 } 2790 if (iq->revision[0] && (iq->revision[0] != ' ')) { 2791 printf (" FW Rev. "); 2792 ASR_prstring (iq->revision, 4); 2793 ++posted; 2794 } 2795 free ((caddr_t)iq, M_TEMP); 2796 if (posted) { 2797 printf (","); 2798 } 2799 } 2800 printf (" %d channel, %d CCBs, Protocol I2O\n", sc->ha_MaxBus + 1, 2801 (sc->ha_QueueSize > MAX_INBOUND) ? MAX_INBOUND : sc->ha_QueueSize); 2802 2803 /* 2804 * fill in the prototype cam_path. 2805 */ 2806 { 2807 int bus; 2808 union asr_ccb * ccb; 2809 2810 if ((ccb = asr_alloc_ccb (sc)) == (union asr_ccb *)NULL) { 2811 printf ("asr%d: CAM could not be notified of asynchronous callback parameters\n", unit); 2812 ATTACH_RETURN(ENOMEM); 2813 } 2814 for (bus = 0; bus <= sc->ha_MaxBus; ++bus) { 2815 struct cam_devq * devq; 2816 int QueueSize = sc->ha_QueueSize; 2817 2818 if (QueueSize > MAX_INBOUND) { 2819 QueueSize = MAX_INBOUND; 2820 } 2821 2822 /* 2823 * Create the device queue for our SIM(s). 2824 */ 2825 if ((devq = cam_simq_alloc(QueueSize)) == NULL) { 2826 continue; 2827 } 2828 2829 /* 2830 * Construct our first channel SIM entry 2831 */ 2832 sc->ha_sim[bus] = cam_sim_alloc( 2833 asr_action, asr_poll, "asr", sc, 2834 unit, 1, QueueSize, devq); 2835 if (sc->ha_sim[bus] == NULL) { 2836 continue; 2837 } 2838 2839 if (xpt_bus_register(sc->ha_sim[bus], bus) 2840 != CAM_SUCCESS) { 2841 cam_sim_free(sc->ha_sim[bus], 2842 /*free_devq*/TRUE); 2843 sc->ha_sim[bus] = NULL; 2844 continue; 2845 } 2846 2847 if (xpt_create_path(&(sc->ha_path[bus]), /*periph*/NULL, 2848 cam_sim_path(sc->ha_sim[bus]), CAM_TARGET_WILDCARD, 2849 CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 2850 xpt_bus_deregister( 2851 cam_sim_path(sc->ha_sim[bus])); 2852 cam_sim_free(sc->ha_sim[bus], 2853 /*free_devq*/TRUE); 2854 sc->ha_sim[bus] = NULL; 2855 continue; 2856 } 2857 xpt_setup_ccb(&(ccb->ccb_h), 2858 sc->ha_path[bus], /*priority*/5); 2859 ccb->ccb_h.func_code = XPT_SASYNC_CB; 2860 ccb->csa.event_enable = AC_LOST_DEVICE; 2861 ccb->csa.callback = asr_async; 2862 ccb->csa.callback_arg = sc->ha_sim[bus]; 2863 xpt_action((union ccb *)ccb); 2864 } 2865 asr_free_ccb (ccb); 2866 } 2867 /* 2868 * Generate the device node information 2869 */ 2870 (void)make_dev(&asr_cdevsw, unit, 0, 0, S_IRWXU, "rasr%d", unit); 2871 destroy_dev(makedev(asr_cdevsw.d_maj,unit+1)); 2872 ATTACH_RETURN(0); 2873} /* asr_attach */ 2874 2875#if (!defined(UNREFERENCED_PARAMETER)) 2876# define UNREFERENCED_PARAMETER(x) (void)(x) 2877#endif 2878 2879STATIC void 2880asr_async( 2881 void * callback_arg, 2882 u_int32_t code, 2883 struct cam_path * path, 2884 void * arg) 2885{ 2886 UNREFERENCED_PARAMETER(callback_arg); 2887 UNREFERENCED_PARAMETER(code); 2888 UNREFERENCED_PARAMETER(path); 2889 UNREFERENCED_PARAMETER(arg); 2890} /* asr_async */ 2891 2892STATIC void 2893asr_poll( 2894 IN struct cam_sim *sim) 2895{ 2896 asr_intr(cam_sim_softc(sim)); 2897} /* asr_poll */ 2898 2899STATIC void 2900asr_action( 2901 IN struct cam_sim * sim, 2902 IN union ccb * ccb) 2903{ 2904 struct Asr_softc * sc; 2905 2906 debug_asr_printf ("asr_action(%lx,%lx{%x})\n", 2907 (u_long)sim, (u_long)ccb, ccb->ccb_h.func_code); 2908 2909 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("asr_action\n")); 2910 2911 ccb->ccb_h.spriv_ptr0 = sc = (struct Asr_softc *)cam_sim_softc(sim); 2912 2913 switch (ccb->ccb_h.func_code) { 2914 2915 /* Common cases first */ 2916 case XPT_SCSI_IO: /* Execute the requested I/O operation */ 2917 { 2918 struct Message { 2919 char M[MAX_INBOUND_SIZE]; 2920 }; 2921 defAlignLong(struct Message,Message); 2922 PI2O_MESSAGE_FRAME Message_Ptr; 2923 2924 if (ccb->ccb_h.status != CAM_REQ_INPROG) { 2925 printf( 2926 "asr%d WARNING: scsi_cmd(%x) already done on b%dt%du%d\n", 2927 cam_sim_unit(xpt_path_sim(ccb->ccb_h.path)), 2928 ccb->csio.cdb_io.cdb_bytes[0], 2929 cam_sim_bus(sim), 2930 ccb->ccb_h.target_id, 2931 ccb->ccb_h.target_lun); 2932 } 2933 debug_asr_cmd_printf ("(%d,%d,%d,%d)", 2934 cam_sim_unit(sim), 2935 cam_sim_bus(sim), 2936 ccb->ccb_h.target_id, 2937 ccb->ccb_h.target_lun); 2938 debug_asr_cmd_dump_ccb(ccb); 2939 2940 if ((Message_Ptr = ASR_init_message ((union asr_ccb *)ccb, 2941 (PI2O_MESSAGE_FRAME)Message)) != (PI2O_MESSAGE_FRAME)NULL) { 2942 debug_asr_cmd2_printf ("TID=%x:\n", 2943 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_getTID( 2944 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr)); 2945 debug_asr_cmd2_dump_message(Message_Ptr); 2946 debug_asr_cmd1_printf (" q"); 2947 2948 if (ASR_queue (sc, Message_Ptr) == EMPTY_QUEUE) { 2949#ifdef ASR_MEASURE_PERFORMANCE 2950 ++sc->ha_performance.command_too_busy; 2951#endif 2952 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 2953 ccb->ccb_h.status |= CAM_REQUEUE_REQ; 2954 debug_asr_cmd_printf (" E\n"); 2955 xpt_done(ccb); 2956 } 2957 debug_asr_cmd_printf (" Q\n"); 2958 break; 2959 } 2960 /* 2961 * We will get here if there is no valid TID for the device 2962 * referenced in the scsi command packet. 2963 */ 2964 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 2965 ccb->ccb_h.status |= CAM_SEL_TIMEOUT; 2966 debug_asr_cmd_printf (" B\n"); 2967 xpt_done(ccb); 2968 break; 2969 } 2970 2971 case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */ 2972 /* Rese HBA device ... */ 2973 asr_hbareset (sc); 2974 ccb->ccb_h.status = CAM_REQ_CMP; 2975 xpt_done(ccb); 2976 break; 2977 2978# if (defined(REPORT_LUNS)) 2979 case REPORT_LUNS: 2980# endif 2981 case XPT_ABORT: /* Abort the specified CCB */ 2982 /* XXX Implement */ 2983 ccb->ccb_h.status = CAM_REQ_INVALID; 2984 xpt_done(ccb); 2985 break; 2986 2987 case XPT_SET_TRAN_SETTINGS: 2988 /* XXX Implement */ 2989 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; 2990 xpt_done(ccb); 2991 break; 2992 2993 case XPT_GET_TRAN_SETTINGS: 2994 /* Get default/user set transfer settings for the target */ 2995 { 2996 struct ccb_trans_settings *cts; 2997 u_int target_mask; 2998 2999 cts = &ccb->cts; 3000 target_mask = 0x01 << ccb->ccb_h.target_id; 3001 if ((cts->flags & CCB_TRANS_USER_SETTINGS) != 0) { 3002 cts->flags = CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB; 3003 cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT; 3004 cts->sync_period = 6; /* 40MHz */ 3005 cts->sync_offset = 15; 3006 3007 cts->valid = CCB_TRANS_SYNC_RATE_VALID 3008 | CCB_TRANS_SYNC_OFFSET_VALID 3009 | CCB_TRANS_BUS_WIDTH_VALID 3010 | CCB_TRANS_DISC_VALID 3011 | CCB_TRANS_TQ_VALID; 3012 ccb->ccb_h.status = CAM_REQ_CMP; 3013 } else { 3014 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; 3015 } 3016 xpt_done(ccb); 3017 break; 3018 } 3019 3020 case XPT_CALC_GEOMETRY: 3021 { 3022 struct ccb_calc_geometry *ccg; 3023 u_int32_t size_mb; 3024 u_int32_t secs_per_cylinder; 3025 3026 ccg = &ccb->ccg; 3027 size_mb = ccg->volume_size 3028 / ((1024L * 1024L) / ccg->block_size); 3029 3030 if (size_mb > 4096) { 3031 ccg->heads = 255; 3032 ccg->secs_per_track = 63; 3033 } else if (size_mb > 2048) { 3034 ccg->heads = 128; 3035 ccg->secs_per_track = 63; 3036 } else if (size_mb > 1024) { 3037 ccg->heads = 65; 3038 ccg->secs_per_track = 63; 3039 } else { 3040 ccg->heads = 64; 3041 ccg->secs_per_track = 32; 3042 } 3043 secs_per_cylinder = ccg->heads * ccg->secs_per_track; 3044 ccg->cylinders = ccg->volume_size / secs_per_cylinder; 3045 ccb->ccb_h.status = CAM_REQ_CMP; 3046 xpt_done(ccb); 3047 break; 3048 } 3049 3050 case XPT_RESET_BUS: /* Reset the specified SCSI bus */ 3051 ASR_resetBus (sc, cam_sim_bus(sim)); 3052 ccb->ccb_h.status = CAM_REQ_CMP; 3053 xpt_done(ccb); 3054 break; 3055 3056 case XPT_TERM_IO: /* Terminate the I/O process */ 3057 /* XXX Implement */ 3058 ccb->ccb_h.status = CAM_REQ_INVALID; 3059 xpt_done(ccb); 3060 break; 3061 3062 case XPT_PATH_INQ: /* Path routing inquiry */ 3063 { 3064 struct ccb_pathinq *cpi = &ccb->cpi; 3065 3066 cpi->version_num = 1; /* XXX??? */ 3067 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16; 3068 cpi->target_sprt = 0; 3069 /* Not necessary to reset bus, done by HDM initialization */ 3070 cpi->hba_misc = PIM_NOBUSRESET; 3071 cpi->hba_eng_cnt = 0; 3072 cpi->max_target = sc->ha_MaxId; 3073 cpi->max_lun = sc->ha_MaxLun; 3074 cpi->initiator_id = sc->ha_adapter_target[cam_sim_bus(sim)]; 3075 cpi->bus_id = cam_sim_bus(sim); 3076 cpi->base_transfer_speed = 3300; 3077 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 3078 strncpy(cpi->hba_vid, "Adaptec", HBA_IDLEN); 3079 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); 3080 cpi->unit_number = cam_sim_unit(sim); 3081 cpi->ccb_h.status = CAM_REQ_CMP; 3082 xpt_done(ccb); 3083 break; 3084 } 3085 default: 3086 ccb->ccb_h.status = CAM_REQ_INVALID; 3087 xpt_done(ccb); 3088 break; 3089 } 3090} /* asr_action */ 3091 3092#ifdef ASR_MEASURE_PERFORMANCE 3093#define WRITE_OP 1 3094#define READ_OP 2 3095#define min_submitR sc->ha_performance.read_by_size_min_time[index] 3096#define max_submitR sc->ha_performance.read_by_size_max_time[index] 3097#define min_submitW sc->ha_performance.write_by_size_min_time[index] 3098#define max_submitW sc->ha_performance.write_by_size_max_time[index] 3099 3100STATIC INLINE void 3101asr_IObySize( 3102 IN Asr_softc_t * sc, 3103 IN u_int32_t submitted_time, 3104 IN int op, 3105 IN int index) 3106{ 3107 struct timeval submitted_timeval; 3108 3109 submitted_timeval.tv_sec = 0; 3110 submitted_timeval.tv_usec = submitted_time; 3111 3112 if ( op == READ_OP ) { 3113 ++sc->ha_performance.read_by_size_count[index]; 3114 3115 if ( submitted_time != 0xffffffff ) { 3116 timevaladd( 3117 &sc->ha_performance.read_by_size_total_time[index], 3118 &submitted_timeval); 3119 if ( (min_submitR == 0) 3120 || (submitted_time < min_submitR) ) { 3121 min_submitR = submitted_time; 3122 } 3123 3124 if ( submitted_time > max_submitR ) { 3125 max_submitR = submitted_time; 3126 } 3127 } 3128 } else { 3129 ++sc->ha_performance.write_by_size_count[index]; 3130 if ( submitted_time != 0xffffffff ) { 3131 timevaladd( 3132 &sc->ha_performance.write_by_size_total_time[index], 3133 &submitted_timeval); 3134 if ( (submitted_time < min_submitW) 3135 || (min_submitW == 0) ) { 3136 min_submitW = submitted_time; 3137 } 3138 3139 if ( submitted_time > max_submitW ) { 3140 max_submitW = submitted_time; 3141 } 3142 } 3143 } 3144} /* asr_IObySize */ 3145#endif 3146 3147/* 3148 * Handle processing of current CCB as pointed to by the Status. 3149 */ 3150STATIC int 3151asr_intr ( 3152 IN Asr_softc_t * sc) 3153{ 3154 OUT int processed; 3155 3156#ifdef ASR_MEASURE_PERFORMANCE 3157 struct timeval junk; 3158 3159 microtime(&junk); 3160 sc->ha_performance.intr_started = junk; 3161#endif 3162 3163 for (processed = 0; 3164 sc->ha_Virt->Status & Mask_InterruptsDisabled; 3165 processed = 1) { 3166 union asr_ccb * ccb; 3167 U32 ReplyOffset; 3168 PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME Reply; 3169 3170 if (((ReplyOffset = sc->ha_Virt->FromFIFO) == EMPTY_QUEUE) 3171 && ((ReplyOffset = sc->ha_Virt->FromFIFO) == EMPTY_QUEUE)) { 3172 break; 3173 } 3174 Reply = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)(ReplyOffset 3175 - sc->ha_Msgs_Phys + (char *)(sc->ha_Msgs)); 3176 /* 3177 * We do not need any (optional byteswapping) method access to 3178 * the Initiator context field. 3179 */ 3180 ccb = (union asr_ccb *)(long) 3181 I2O_MESSAGE_FRAME_getInitiatorContext64( 3182 &(Reply->StdReplyFrame.StdMessageFrame)); 3183 if (I2O_MESSAGE_FRAME_getMsgFlags( 3184 &(Reply->StdReplyFrame.StdMessageFrame)) 3185 & I2O_MESSAGE_FLAGS_FAIL) { 3186 defAlignLong(I2O_UTIL_NOP_MESSAGE,Message); 3187 PI2O_UTIL_NOP_MESSAGE Message_Ptr; 3188 U32 MessageOffset; 3189 3190 MessageOffset = (u_long) 3191 I2O_FAILURE_REPLY_MESSAGE_FRAME_getPreservedMFA( 3192 (PI2O_FAILURE_REPLY_MESSAGE_FRAME)Reply); 3193 /* 3194 * Get the Original Message Frame's address, and get 3195 * it's Transaction Context into our space. (Currently 3196 * unused at original authorship, but better to be 3197 * safe than sorry). Straight copy means that we 3198 * need not concern ourselves with the (optional 3199 * byteswapping) method access. 3200 */ 3201 Reply->StdReplyFrame.TransactionContext 3202 = ((PI2O_SINGLE_REPLY_MESSAGE_FRAME) 3203 (sc->ha_Virt->Address + MessageOffset)) 3204 ->TransactionContext; 3205 /* 3206 * For 64 bit machines, we need to reconstruct the 3207 * 64 bit context. 3208 */ 3209 ccb = (union asr_ccb *)(long) 3210 I2O_MESSAGE_FRAME_getInitiatorContext64( 3211 &(Reply->StdReplyFrame.StdMessageFrame)); 3212 /* 3213 * Unique error code for command failure. 3214 */ 3215 I2O_SINGLE_REPLY_MESSAGE_FRAME_setDetailedStatusCode( 3216 &(Reply->StdReplyFrame), (u_int16_t)-2); 3217 /* 3218 * Modify the message frame to contain a NOP and 3219 * re-issue it to the controller. 3220 */ 3221 Message_Ptr = (PI2O_UTIL_NOP_MESSAGE)ASR_fillMessage( 3222 Message, sizeof(I2O_UTIL_NOP_MESSAGE)); 3223# if (I2O_UTIL_NOP != 0) 3224 I2O_MESSAGE_FRAME_setFunction ( 3225 &(Message_Ptr->StdMessageFrame), 3226 I2O_UTIL_NOP); 3227# endif 3228 /* 3229 * Copy the packet out to the Original Message 3230 */ 3231 bcopy ((caddr_t)Message_Ptr, 3232 sc->ha_Virt->Address + MessageOffset, 3233 sizeof(I2O_UTIL_NOP_MESSAGE)); 3234 /* 3235 * Issue the NOP 3236 */ 3237 sc->ha_Virt->ToFIFO = MessageOffset; 3238 } 3239 3240 /* 3241 * Asynchronous command with no return requirements, 3242 * and a generic handler for immunity against odd error 3243 * returns from the adapter. 3244 */ 3245 if (ccb == (union asr_ccb *)NULL) { 3246 /* 3247 * Return Reply so that it can be used for the 3248 * next command 3249 */ 3250 sc->ha_Virt->FromFIFO = ReplyOffset; 3251 continue; 3252 } 3253 3254 /* Welease Wadjah! (and stop timeouts) */ 3255 ASR_ccbRemove (sc, ccb); 3256 3257 switch ( 3258 I2O_SINGLE_REPLY_MESSAGE_FRAME_getDetailedStatusCode( 3259 &(Reply->StdReplyFrame))) { 3260 3261 case I2O_SCSI_DSC_SUCCESS: 3262 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 3263 ccb->ccb_h.status |= CAM_REQ_CMP; 3264 break; 3265 3266 case I2O_SCSI_DSC_CHECK_CONDITION: 3267 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 3268 ccb->ccb_h.status |= CAM_REQ_CMP|CAM_AUTOSNS_VALID; 3269 break; 3270 3271 case I2O_SCSI_DSC_BUSY: 3272 /* FALLTHRU */ 3273 case I2O_SCSI_HBA_DSC_ADAPTER_BUSY: 3274 /* FALLTHRU */ 3275 case I2O_SCSI_HBA_DSC_SCSI_BUS_RESET: 3276 /* FALLTHRU */ 3277 case I2O_SCSI_HBA_DSC_BUS_BUSY: 3278 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 3279 ccb->ccb_h.status |= CAM_SCSI_BUSY; 3280 break; 3281 3282 case I2O_SCSI_HBA_DSC_SELECTION_TIMEOUT: 3283 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 3284 ccb->ccb_h.status |= CAM_SEL_TIMEOUT; 3285 break; 3286 3287 case I2O_SCSI_HBA_DSC_COMMAND_TIMEOUT: 3288 /* FALLTHRU */ 3289 case I2O_SCSI_HBA_DSC_DEVICE_NOT_PRESENT: 3290 /* FALLTHRU */ 3291 case I2O_SCSI_HBA_DSC_LUN_INVALID: 3292 /* FALLTHRU */ 3293 case I2O_SCSI_HBA_DSC_SCSI_TID_INVALID: 3294 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 3295 ccb->ccb_h.status |= CAM_CMD_TIMEOUT; 3296 break; 3297 3298 case I2O_SCSI_HBA_DSC_DATA_OVERRUN: 3299 /* FALLTHRU */ 3300 case I2O_SCSI_HBA_DSC_REQUEST_LENGTH_ERROR: 3301 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 3302 ccb->ccb_h.status |= CAM_DATA_RUN_ERR; 3303 break; 3304 3305 default: 3306 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 3307 ccb->ccb_h.status |= CAM_REQUEUE_REQ; 3308 break; 3309 } 3310 if ((ccb->csio.resid = ccb->csio.dxfer_len) != 0) { 3311 ccb->csio.resid -= 3312 I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME_getTransferCount( 3313 Reply); 3314 } 3315 3316#ifdef ASR_MEASURE_PERFORMANCE 3317 { 3318 struct timeval endTime; 3319 u_int32_t submitted_time; 3320 u_int32_t size; 3321 int op_type; 3322 int startTimeIndex; 3323 3324 --sc->ha_submitted_ccbs_count; 3325 startTimeIndex 3326 = (int)Reply->StdReplyFrame.TransactionContext; 3327 if (-1 != startTimeIndex) { 3328 /* Compute the time spent in device/adapter */ 3329 microtime(&endTime); 3330 submitted_time = asr_time_delta(sc->ha_timeQ[ 3331 startTimeIndex], endTime); 3332 /* put the startTimeIndex back on free list */ 3333 ENQ_TIMEQ_FREE_LIST(startTimeIndex, 3334 sc->ha_timeQFreeList, 3335 sc->ha_timeQFreeHead, 3336 sc->ha_timeQFreeTail); 3337 } else { 3338 submitted_time = 0xffffffff; 3339 } 3340 3341#define maxctime sc->ha_performance.max_command_time[ccb->csio.cdb_io.cdb_bytes[0]] 3342#define minctime sc->ha_performance.min_command_time[ccb->csio.cdb_io.cdb_bytes[0]] 3343 if (submitted_time != 0xffffffff) { 3344 if ( maxctime < submitted_time ) { 3345 maxctime = submitted_time; 3346 } 3347 if ( (minctime == 0) 3348 || (minctime > submitted_time) ) { 3349 minctime = submitted_time; 3350 } 3351 3352 if ( sc->ha_performance.max_submit_time 3353 < submitted_time ) { 3354 sc->ha_performance.max_submit_time 3355 = submitted_time; 3356 } 3357 if ( sc->ha_performance.min_submit_time == 0 3358 || sc->ha_performance.min_submit_time 3359 > submitted_time) { 3360 sc->ha_performance.min_submit_time 3361 = submitted_time; 3362 } 3363 3364 switch ( ccb->csio.cdb_io.cdb_bytes[0] ) { 3365 3366 case 0xa8: /* 12-byte READ */ 3367 /* FALLTHRU */ 3368 case 0x08: /* 6-byte READ */ 3369 /* FALLTHRU */ 3370 case 0x28: /* 10-byte READ */ 3371 op_type = READ_OP; 3372 break; 3373 3374 case 0x0a: /* 6-byte WRITE */ 3375 /* FALLTHRU */ 3376 case 0xaa: /* 12-byte WRITE */ 3377 /* FALLTHRU */ 3378 case 0x2a: /* 10-byte WRITE */ 3379 op_type = WRITE_OP; 3380 break; 3381 3382 default: 3383 op_type = 0; 3384 break; 3385 } 3386 3387 if ( op_type != 0 ) { 3388 struct scsi_rw_big * cmd; 3389 3390 cmd = (struct scsi_rw_big *) 3391 &(ccb->csio.cdb_io); 3392 3393 size = (((u_int32_t) cmd->length2 << 8) 3394 | ((u_int32_t) cmd->length1)) << 9; 3395 3396 switch ( size ) { 3397 3398 case 512: 3399 asr_IObySize(sc, 3400 submitted_time, op_type, 3401 SIZE_512); 3402 break; 3403 3404 case 1024: 3405 asr_IObySize(sc, 3406 submitted_time, op_type, 3407 SIZE_1K); 3408 break; 3409 3410 case 2048: 3411 asr_IObySize(sc, 3412 submitted_time, op_type, 3413 SIZE_2K); 3414 break; 3415 3416 case 4096: 3417 asr_IObySize(sc, 3418 submitted_time, op_type, 3419 SIZE_4K); 3420 break; 3421 3422 case 8192: 3423 asr_IObySize(sc, 3424 submitted_time, op_type, 3425 SIZE_8K); 3426 break; 3427 3428 case 16384: 3429 asr_IObySize(sc, 3430 submitted_time, op_type, 3431 SIZE_16K); 3432 break; 3433 3434 case 32768: 3435 asr_IObySize(sc, 3436 submitted_time, op_type, 3437 SIZE_32K); 3438 break; 3439 3440 case 65536: 3441 asr_IObySize(sc, 3442 submitted_time, op_type, 3443 SIZE_64K); 3444 break; 3445 3446 default: 3447 if ( size > (1 << 16) ) { 3448 asr_IObySize(sc, 3449 submitted_time, 3450 op_type, 3451 SIZE_BIGGER); 3452 } else { 3453 asr_IObySize(sc, 3454 submitted_time, 3455 op_type, 3456 SIZE_OTHER); 3457 } 3458 break; 3459 } 3460 } 3461 } 3462 } 3463#endif 3464 /* Sense data in reply packet */ 3465 if (ccb->ccb_h.status & CAM_AUTOSNS_VALID) { 3466 u_int16_t size = I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME_getAutoSenseTransferCount(Reply); 3467 3468 if (size) { 3469 if (size > sizeof(ccb->csio.sense_data)) { 3470 size = sizeof(ccb->csio.sense_data); 3471 } 3472 if (size > I2O_SCSI_SENSE_DATA_SZ) { 3473 size = I2O_SCSI_SENSE_DATA_SZ; 3474 } 3475 if ((ccb->csio.sense_len) 3476 && (size > ccb->csio.sense_len)) { 3477 size = ccb->csio.sense_len; 3478 } 3479 bcopy ((caddr_t)Reply->SenseData, 3480 (caddr_t)&(ccb->csio.sense_data), size); 3481 } 3482 } 3483 3484 /* 3485 * Return Reply so that it can be used for the next command 3486 * since we have no more need for it now 3487 */ 3488 sc->ha_Virt->FromFIFO = ReplyOffset; 3489 3490 if (ccb->ccb_h.path) { 3491 xpt_done ((union ccb *)ccb); 3492 } else { 3493 wakeup ((caddr_t)ccb); 3494 } 3495 } 3496#ifdef ASR_MEASURE_PERFORMANCE 3497 { 3498 u_int32_t result; 3499 3500 microtime(&junk); 3501 result = asr_time_delta(sc->ha_performance.intr_started, junk); 3502 3503 if (result != 0xffffffff) { 3504 if ( sc->ha_performance.max_intr_time < result ) { 3505 sc->ha_performance.max_intr_time = result; 3506 } 3507 3508 if ( (sc->ha_performance.min_intr_time == 0) 3509 || (sc->ha_performance.min_intr_time > result) ) { 3510 sc->ha_performance.min_intr_time = result; 3511 } 3512 } 3513 } 3514#endif 3515 return (processed); 3516} /* asr_intr */ 3517 3518#undef QueueSize /* Grrrr */ 3519#undef SG_Size /* Grrrr */ 3520 3521/* 3522 * Meant to be included at the bottom of asr.c !!! 3523 */ 3524 3525/* 3526 * Included here as hard coded. Done because other necessary include 3527 * files utilize C++ comment structures which make them a nuisance to 3528 * included here just to pick up these three typedefs. 3529 */ 3530typedef U32 DPT_TAG_T; 3531typedef U32 DPT_MSG_T; 3532typedef U32 DPT_RTN_T; 3533 3534#undef SCSI_RESET /* Conflicts with "scsi/scsiconf.h" defintion */ 3535#include "dev/asr/osd_unix.h" 3536 3537#define asr_unit(dev) minor(dev) 3538 3539STATIC INLINE Asr_softc_t * 3540ASR_get_sc ( 3541 IN dev_t dev) 3542{ 3543 int unit = asr_unit(dev); 3544 OUT Asr_softc_t * sc = Asr_softc; 3545 3546 while (sc && sc->ha_sim[0] && (cam_sim_unit(sc->ha_sim[0]) != unit)) { 3547 sc = sc->ha_next; 3548 } 3549 return (sc); 3550} /* ASR_get_sc */ 3551 3552STATIC u_int8_t ASR_ctlr_held; 3553 3554STATIC int 3555asr_open( 3556 IN dev_t dev, 3557 int32_t flags, 3558 int32_t ifmt, 3559 IN struct proc * proc) 3560{ 3561 int s; 3562 OUT int error; 3563 UNREFERENCED_PARAMETER(flags); 3564 UNREFERENCED_PARAMETER(ifmt); 3565 3566 if (ASR_get_sc (dev) == (Asr_softc_t *)NULL) { 3567 return (ENODEV); 3568 } 3569 s = splcam (); 3570 if (ASR_ctlr_held) { 3571 error = EBUSY; 3572 } else if ((error = suser(proc)) == 0) { 3573 ++ASR_ctlr_held; 3574 } 3575 splx(s); 3576 return (error); 3577} /* asr_open */ 3578 3579STATIC int 3580asr_close( 3581 dev_t dev, 3582 int flags, 3583 int ifmt, 3584 struct proc * proc) 3585{ 3586 UNREFERENCED_PARAMETER(dev); 3587 UNREFERENCED_PARAMETER(flags); 3588 UNREFERENCED_PARAMETER(ifmt); 3589 UNREFERENCED_PARAMETER(proc); 3590 3591 ASR_ctlr_held = 0; 3592 return (0); 3593} /* asr_close */ 3594 3595 3596/*-------------------------------------------------------------------------*/ 3597/* Function ASR_queue_i */ 3598/*-------------------------------------------------------------------------*/ 3599/* The Parameters Passed To This Function Are : */ 3600/* Asr_softc_t * : HBA miniport driver's adapter data storage. */ 3601/* PI2O_MESSAGE_FRAME : Msg Structure Pointer For This Command */ 3602/* I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME following the Msg Structure */ 3603/* */ 3604/* This Function Will Take The User Request Packet And Convert It To An */ 3605/* I2O MSG And Send It Off To The Adapter. */ 3606/* */ 3607/* Return : 0 For OK, Error Code Otherwise */ 3608/*-------------------------------------------------------------------------*/ 3609STATIC INLINE int 3610ASR_queue_i( 3611 IN Asr_softc_t * sc, 3612 INOUT PI2O_MESSAGE_FRAME Packet) 3613{ 3614 union asr_ccb * ccb; 3615 PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME Reply; 3616 PI2O_MESSAGE_FRAME Message_Ptr; 3617 PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME Reply_Ptr; 3618 int MessageSizeInBytes; 3619 int ReplySizeInBytes; 3620 int error; 3621 int s; 3622 /* Scatter Gather buffer list */ 3623 struct ioctlSgList_S { 3624 SLIST_ENTRY(ioctlSgList_S) link; 3625 caddr_t UserSpace; 3626 I2O_FLAGS_COUNT FlagsCount; 3627 char KernelSpace[sizeof(long)]; 3628 } * elm; 3629 /* Generates a `first' entry */ 3630 SLIST_HEAD(ioctlSgListHead_S, ioctlSgList_S) sgList; 3631 3632 if (ASR_getBlinkLedCode(sc)) { 3633 debug_usr_cmd_printf ("Adapter currently in BlinkLed %x\n", 3634 ASR_getBlinkLedCode(sc)); 3635 return (EIO); 3636 } 3637 /* Copy in the message into a local allocation */ 3638 if ((Message_Ptr = (PI2O_MESSAGE_FRAME)malloc ( 3639 sizeof(I2O_MESSAGE_FRAME), M_TEMP, M_WAITOK)) 3640 == (PI2O_MESSAGE_FRAME)NULL) { 3641 debug_usr_cmd_printf ( 3642 "Failed to acquire I2O_MESSAGE_FRAME memory\n"); 3643 return (ENOMEM); 3644 } 3645 if ((error = copyin ((caddr_t)Packet, (caddr_t)Message_Ptr, 3646 sizeof(I2O_MESSAGE_FRAME))) != 0) { 3647 free (Message_Ptr, M_TEMP); 3648 debug_usr_cmd_printf ("Can't copy in packet errno=%d\n", error); 3649 return (error); 3650 } 3651 /* Acquire information to determine type of packet */ 3652 MessageSizeInBytes = (I2O_MESSAGE_FRAME_getMessageSize(Message_Ptr)<<2); 3653 /* The offset of the reply information within the user packet */ 3654 Reply = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)((char *)Packet 3655 + MessageSizeInBytes); 3656 3657 /* Check if the message is a synchronous initialization command */ 3658 s = I2O_MESSAGE_FRAME_getFunction(Message_Ptr); 3659 free (Message_Ptr, M_TEMP); 3660 switch (s) { 3661 3662 case I2O_EXEC_IOP_RESET: 3663 { U32 status; 3664 3665 status = ASR_resetIOP(sc->ha_Virt); 3666 ReplySizeInBytes = sizeof(status); 3667 debug_usr_cmd_printf ("resetIOP done\n"); 3668 return (copyout ((caddr_t)&status, (caddr_t)Reply, 3669 ReplySizeInBytes)); 3670 } 3671 3672 case I2O_EXEC_STATUS_GET: 3673 { I2O_EXEC_STATUS_GET_REPLY status; 3674 3675 if (ASR_getStatus (sc->ha_Virt, &status) 3676 == (PI2O_EXEC_STATUS_GET_REPLY)NULL) { 3677 debug_usr_cmd_printf ("getStatus failed\n"); 3678 return (ENXIO); 3679 } 3680 ReplySizeInBytes = sizeof(status); 3681 debug_usr_cmd_printf ("getStatus done\n"); 3682 return (copyout ((caddr_t)&status, (caddr_t)Reply, 3683 ReplySizeInBytes)); 3684 } 3685 3686 case I2O_EXEC_OUTBOUND_INIT: 3687 { U32 status; 3688 3689 status = ASR_initOutBound(sc); 3690 ReplySizeInBytes = sizeof(status); 3691 debug_usr_cmd_printf ("intOutBound done\n"); 3692 return (copyout ((caddr_t)&status, (caddr_t)Reply, 3693 ReplySizeInBytes)); 3694 } 3695 } 3696 3697 /* Determine if the message size is valid */ 3698 if ((MessageSizeInBytes < sizeof(I2O_MESSAGE_FRAME)) 3699 || (MAX_INBOUND_SIZE < MessageSizeInBytes)) { 3700 debug_usr_cmd_printf ("Packet size %d incorrect\n", 3701 MessageSizeInBytes); 3702 return (EINVAL); 3703 } 3704 3705 if ((Message_Ptr = (PI2O_MESSAGE_FRAME)malloc (MessageSizeInBytes, 3706 M_TEMP, M_WAITOK)) == (PI2O_MESSAGE_FRAME)NULL) { 3707 debug_usr_cmd_printf ("Failed to acquire frame[%d] memory\n", 3708 MessageSizeInBytes); 3709 return (ENOMEM); 3710 } 3711 if ((error = copyin ((caddr_t)Packet, (caddr_t)Message_Ptr, 3712 MessageSizeInBytes)) != 0) { 3713 free (Message_Ptr, M_TEMP); 3714 debug_usr_cmd_printf ("Can't copy in packet[%d] errno=%d\n", 3715 MessageSizeInBytes, error); 3716 return (error); 3717 } 3718 3719 /* Check the size of the reply frame, and start constructing */ 3720 3721 if ((Reply_Ptr = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)malloc ( 3722 sizeof(I2O_MESSAGE_FRAME), M_TEMP, M_WAITOK)) 3723 == (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)NULL) { 3724 free (Message_Ptr, M_TEMP); 3725 debug_usr_cmd_printf ( 3726 "Failed to acquire I2O_MESSAGE_FRAME memory\n"); 3727 return (ENOMEM); 3728 } 3729 if ((error = copyin ((caddr_t)Reply, (caddr_t)Reply_Ptr, 3730 sizeof(I2O_MESSAGE_FRAME))) != 0) { 3731 free (Reply_Ptr, M_TEMP); 3732 free (Message_Ptr, M_TEMP); 3733 debug_usr_cmd_printf ( 3734 "Failed to copy in reply frame, errno=%d\n", 3735 error); 3736 return (error); 3737 } 3738 ReplySizeInBytes = (I2O_MESSAGE_FRAME_getMessageSize( 3739 &(Reply_Ptr->StdReplyFrame.StdMessageFrame)) << 2); 3740 free (Reply_Ptr, M_TEMP); 3741 if (ReplySizeInBytes < sizeof(I2O_SINGLE_REPLY_MESSAGE_FRAME)) { 3742 free (Message_Ptr, M_TEMP); 3743 debug_usr_cmd_printf ( 3744 "Failed to copy in reply frame[%d], errno=%d\n", 3745 ReplySizeInBytes, error); 3746 return (EINVAL); 3747 } 3748 3749 if ((Reply_Ptr = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)malloc ( 3750 ((ReplySizeInBytes > sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)) 3751 ? ReplySizeInBytes 3752 : sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)), 3753 M_TEMP, M_WAITOK)) == (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)NULL) { 3754 free (Message_Ptr, M_TEMP); 3755 debug_usr_cmd_printf ("Failed to acquire frame[%d] memory\n", 3756 ReplySizeInBytes); 3757 return (ENOMEM); 3758 } 3759 (void)ASR_fillMessage ((char *)Reply_Ptr, ReplySizeInBytes); 3760 Reply_Ptr->StdReplyFrame.StdMessageFrame.InitiatorContext 3761 = Message_Ptr->InitiatorContext; 3762 Reply_Ptr->StdReplyFrame.TransactionContext 3763 = ((PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr)->TransactionContext; 3764 I2O_MESSAGE_FRAME_setMsgFlags( 3765 &(Reply_Ptr->StdReplyFrame.StdMessageFrame), 3766 I2O_MESSAGE_FRAME_getMsgFlags( 3767 &(Reply_Ptr->StdReplyFrame.StdMessageFrame)) 3768 | I2O_MESSAGE_FLAGS_REPLY); 3769 3770 /* Check if the message is a special case command */ 3771 switch (I2O_MESSAGE_FRAME_getFunction(Message_Ptr)) { 3772 case I2O_EXEC_SYS_TAB_SET: /* Special Case of empty Scatter Gather */ 3773 if (MessageSizeInBytes == ((I2O_MESSAGE_FRAME_getVersionOffset( 3774 Message_Ptr) & 0xF0) >> 2)) { 3775 free (Message_Ptr, M_TEMP); 3776 I2O_SINGLE_REPLY_MESSAGE_FRAME_setDetailedStatusCode( 3777 &(Reply_Ptr->StdReplyFrame), (ASR_setSysTab(sc) != CAM_REQ_CMP)); 3778 I2O_MESSAGE_FRAME_setMessageSize( 3779 &(Reply_Ptr->StdReplyFrame.StdMessageFrame), 3780 sizeof(I2O_SINGLE_REPLY_MESSAGE_FRAME)); 3781 error = copyout ((caddr_t)Reply_Ptr, (caddr_t)Reply, 3782 ReplySizeInBytes); 3783 free (Reply_Ptr, M_TEMP); 3784 return (error); 3785 } 3786 } 3787 3788 /* Deal in the general case */ 3789 /* First allocate and optionally copy in each scatter gather element */ 3790 SLIST_INIT(&sgList); 3791 if ((I2O_MESSAGE_FRAME_getVersionOffset(Message_Ptr) & 0xF0) != 0) { 3792 PI2O_SGE_SIMPLE_ELEMENT sg; 3793 3794 /* 3795 * since this code is reused in several systems, code 3796 * efficiency is greater by using a shift operation rather 3797 * than a divide by sizeof(u_int32_t). 3798 */ 3799 sg = (PI2O_SGE_SIMPLE_ELEMENT)((char *)Message_Ptr 3800 + ((I2O_MESSAGE_FRAME_getVersionOffset(Message_Ptr) & 0xF0) 3801 >> 2)); 3802 while (sg < (PI2O_SGE_SIMPLE_ELEMENT)(((caddr_t)Message_Ptr) 3803 + MessageSizeInBytes)) { 3804 caddr_t v; 3805 int len; 3806 3807 if ((I2O_FLAGS_COUNT_getFlags(&(sg->FlagsCount)) 3808 & I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT) == 0) { 3809 error = EINVAL; 3810 break; 3811 } 3812 len = I2O_FLAGS_COUNT_getCount(&(sg->FlagsCount)); 3813 debug_usr_cmd_printf ("SG[%d] = %x[%d]\n", 3814 sg - (PI2O_SGE_SIMPLE_ELEMENT)((char *)Message_Ptr 3815 + ((I2O_MESSAGE_FRAME_getVersionOffset( 3816 Message_Ptr) & 0xF0) >> 2)), 3817 I2O_SGE_SIMPLE_ELEMENT_getPhysicalAddress(sg), len); 3818 3819 if ((elm = (struct ioctlSgList_S *)malloc ( 3820 sizeof(*elm) - sizeof(elm->KernelSpace) + len, 3821 M_TEMP, M_WAITOK)) 3822 == (struct ioctlSgList_S *)NULL) { 3823 debug_usr_cmd_printf ( 3824 "Failed to allocate SG[%d]\n", len); 3825 error = ENOMEM; 3826 break; 3827 } 3828 SLIST_INSERT_HEAD(&sgList, elm, link); 3829 elm->FlagsCount = sg->FlagsCount; 3830 elm->UserSpace = (caddr_t) 3831 (I2O_SGE_SIMPLE_ELEMENT_getPhysicalAddress(sg)); 3832 v = elm->KernelSpace; 3833 /* Copy in outgoing data (DIR bit could be invalid) */ 3834 if ((error = copyin (elm->UserSpace, (caddr_t)v, len)) 3835 != 0) { 3836 break; 3837 } 3838 /* 3839 * If the buffer is not contiguous, lets 3840 * break up the scatter/gather entries. 3841 */ 3842 while ((len > 0) 3843 && (sg < (PI2O_SGE_SIMPLE_ELEMENT) 3844 (((caddr_t)Message_Ptr) + MAX_INBOUND_SIZE))) { 3845 int next, base, span; 3846 3847 span = 0; 3848 next = base = KVTOPHYS(v); 3849 I2O_SGE_SIMPLE_ELEMENT_setPhysicalAddress(sg, 3850 base); 3851 3852 /* How far can we go physically contiguously */ 3853 while ((len > 0) && (base == next)) { 3854 int size; 3855 3856 next = trunc_page(base) + PAGE_SIZE; 3857 size = next - base; 3858 if (size > len) { 3859 size = len; 3860 } 3861 span += size; 3862 v += size; 3863 len -= size; 3864 base = KVTOPHYS(v); 3865 } 3866 3867 /* Construct the Flags */ 3868 I2O_FLAGS_COUNT_setCount(&(sg->FlagsCount), 3869 span); 3870 { 3871 int flags = I2O_FLAGS_COUNT_getFlags( 3872 &(elm->FlagsCount)); 3873 /* Any remaining length? */ 3874 if (len > 0) { 3875 flags &= 3876 ~(I2O_SGL_FLAGS_END_OF_BUFFER 3877 | I2O_SGL_FLAGS_LAST_ELEMENT); 3878 } 3879 I2O_FLAGS_COUNT_setFlags( 3880 &(sg->FlagsCount), flags); 3881 } 3882 3883 debug_usr_cmd_printf ("sg[%d] = %x[%d]\n", 3884 sg - (PI2O_SGE_SIMPLE_ELEMENT) 3885 ((char *)Message_Ptr 3886 + ((I2O_MESSAGE_FRAME_getVersionOffset( 3887 Message_Ptr) & 0xF0) >> 2)), 3888 I2O_SGE_SIMPLE_ELEMENT_getPhysicalAddress(sg), 3889 span); 3890 if (len <= 0) { 3891 break; 3892 } 3893 3894 /* 3895 * Incrementing requires resizing of the 3896 * packet, and moving up the existing SG 3897 * elements. 3898 */ 3899 ++sg; 3900 MessageSizeInBytes += sizeof(*sg); 3901 I2O_MESSAGE_FRAME_setMessageSize(Message_Ptr, 3902 I2O_MESSAGE_FRAME_getMessageSize(Message_Ptr) 3903 + (sizeof(*sg) / sizeof(U32))); 3904 { 3905 PI2O_MESSAGE_FRAME NewMessage_Ptr; 3906 3907 if ((NewMessage_Ptr 3908 = (PI2O_MESSAGE_FRAME) 3909 malloc (MessageSizeInBytes, 3910 M_TEMP, M_WAITOK)) 3911 == (PI2O_MESSAGE_FRAME)NULL) { 3912 debug_usr_cmd_printf ( 3913 "Failed to acquire frame[%d] memory\n", 3914 MessageSizeInBytes); 3915 error = ENOMEM; 3916 break; 3917 } 3918 span = ((caddr_t)sg) 3919 - (caddr_t)Message_Ptr; 3920 bcopy ((caddr_t)Message_Ptr, 3921 (caddr_t)NewMessage_Ptr, span); 3922 bcopy ((caddr_t)(sg-1), 3923 ((caddr_t)NewMessage_Ptr) + span, 3924 MessageSizeInBytes - span); 3925 free (Message_Ptr, M_TEMP); 3926 sg = (PI2O_SGE_SIMPLE_ELEMENT) 3927 (((caddr_t)NewMessage_Ptr) + span); 3928 Message_Ptr = NewMessage_Ptr; 3929 } 3930 } 3931 if ((error) 3932 || ((I2O_FLAGS_COUNT_getFlags(&(sg->FlagsCount)) 3933 & I2O_SGL_FLAGS_LAST_ELEMENT) != 0)) { 3934 break; 3935 } 3936 ++sg; 3937 } 3938 if (error) { 3939 while ((elm = SLIST_FIRST(&sgList)) 3940 != (struct ioctlSgList_S *)NULL) { 3941 SLIST_REMOVE_HEAD(&sgList,link); 3942 free (elm, M_TEMP); 3943 } 3944 free (Reply_Ptr, M_TEMP); 3945 free (Message_Ptr, M_TEMP); 3946 return (error); 3947 } 3948 } 3949 3950 debug_usr_cmd_printf ("Inbound: "); 3951 debug_usr_cmd_dump_message(Message_Ptr); 3952 3953 /* Send the command */ 3954 if ((ccb = asr_alloc_ccb (sc)) == (union asr_ccb *)NULL) { 3955 /* Free up in-kernel buffers */ 3956 while ((elm = SLIST_FIRST(&sgList)) 3957 != (struct ioctlSgList_S *)NULL) { 3958 SLIST_REMOVE_HEAD(&sgList,link); 3959 free (elm, M_TEMP); 3960 } 3961 free (Reply_Ptr, M_TEMP); 3962 free (Message_Ptr, M_TEMP); 3963 return (ENOMEM); 3964 } 3965 3966 /* 3967 * We do not need any (optional byteswapping) method access to 3968 * the Initiator context field. 3969 */ 3970 I2O_MESSAGE_FRAME_setInitiatorContext64( 3971 (PI2O_MESSAGE_FRAME)Message_Ptr, (long)ccb); 3972 3973 (void)ASR_queue (sc, (PI2O_MESSAGE_FRAME)Message_Ptr); 3974 3975 free (Message_Ptr, M_TEMP); 3976 3977 /* 3978 * Wait for the board to report a finished instruction. 3979 */ 3980 s = splcam(); 3981 while (ccb->ccb_h.status == CAM_REQ_INPROG) { 3982 if (ASR_getBlinkLedCode(sc)) { 3983 /* Reset Adapter */ 3984 printf ("asr%d: Blink LED 0x%x resetting adapter\n", 3985 cam_sim_unit(xpt_path_sim(ccb->ccb_h.path)), 3986 ASR_getBlinkLedCode(sc)); 3987 ASR_reset (sc); 3988 splx(s); 3989 /* Command Cleanup */ 3990 ASR_ccbRemove(sc, ccb); 3991 /* Free up in-kernel buffers */ 3992 while ((elm = SLIST_FIRST(&sgList)) 3993 != (struct ioctlSgList_S *)NULL) { 3994 SLIST_REMOVE_HEAD(&sgList,link); 3995 free (elm, M_TEMP); 3996 } 3997 free (Reply_Ptr, M_TEMP); 3998 asr_free_ccb(ccb); 3999 return (EIO); 4000 } 4001 /* Check every second for BlinkLed */ 4002 /* There is no PRICAM, but outwardly PRIBIO is functional */ 4003 tsleep((caddr_t)ccb, PRIBIO, "asr", hz); 4004 } 4005 splx(s); 4006 4007 debug_usr_cmd_printf ("Outbound: "); 4008 debug_usr_cmd_dump_message(Reply_Ptr); 4009 4010 I2O_SINGLE_REPLY_MESSAGE_FRAME_setDetailedStatusCode( 4011 &(Reply_Ptr->StdReplyFrame), (ccb->ccb_h.status != CAM_REQ_CMP)); 4012 4013 if (ReplySizeInBytes >= (sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME) 4014 - I2O_SCSI_SENSE_DATA_SZ - sizeof(U32))) { 4015 I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME_setTransferCount(Reply_Ptr, 4016 ccb->csio.dxfer_len - ccb->csio.resid); 4017 } 4018 if ((ccb->ccb_h.status & CAM_AUTOSNS_VALID) && (ReplySizeInBytes 4019 > (sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME) 4020 - I2O_SCSI_SENSE_DATA_SZ))) { 4021 int size = ReplySizeInBytes 4022 - sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME) 4023 - I2O_SCSI_SENSE_DATA_SZ; 4024 4025 if (size > sizeof(ccb->csio.sense_data)) { 4026 size = sizeof(ccb->csio.sense_data); 4027 } 4028 bcopy ((caddr_t)&(ccb->csio.sense_data), (caddr_t)Reply_Ptr->SenseData, 4029 size); 4030 I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME_setAutoSenseTransferCount( 4031 Reply_Ptr, size); 4032 } 4033 4034 /* Free up in-kernel buffers */ 4035 while ((elm = SLIST_FIRST(&sgList)) != (struct ioctlSgList_S *)NULL) { 4036 /* Copy out as necessary */ 4037 if ((error == 0) 4038 /* DIR bit considered `valid', error due to ignorance works */ 4039 && ((I2O_FLAGS_COUNT_getFlags(&(elm->FlagsCount)) 4040 & I2O_SGL_FLAGS_DIR) == 0)) { 4041 error = copyout ((caddr_t)(elm->KernelSpace), 4042 elm->UserSpace, 4043 I2O_FLAGS_COUNT_getCount(&(elm->FlagsCount))); 4044 } 4045 SLIST_REMOVE_HEAD(&sgList,link); 4046 free (elm, M_TEMP); 4047 } 4048 if (error == 0) { 4049 /* Copy reply frame to user space */ 4050 error = copyout ((caddr_t)Reply_Ptr, (caddr_t)Reply, 4051 ReplySizeInBytes); 4052 } 4053 free (Reply_Ptr, M_TEMP); 4054 asr_free_ccb(ccb); 4055 4056 return (error); 4057} /* ASR_queue_i */ 4058 4059/*----------------------------------------------------------------------*/ 4060/* Function asr_ioctl */ 4061/*----------------------------------------------------------------------*/ 4062/* The parameters passed to this function are : */ 4063/* dev : Device number. */ 4064/* cmd : Ioctl Command */ 4065/* data : User Argument Passed In. */ 4066/* flag : Mode Parameter */ 4067/* proc : Process Parameter */ 4068/* */ 4069/* This function is the user interface into this adapter driver */ 4070/* */ 4071/* Return : zero if OK, error code if not */ 4072/*----------------------------------------------------------------------*/ 4073 4074STATIC int 4075asr_ioctl( 4076 IN dev_t dev, 4077 IN u_long cmd, 4078 INOUT caddr_t data, 4079 int flag, 4080 struct proc * proc) 4081{ 4082 int i, j; 4083 OUT int error = 0; 4084 Asr_softc_t * sc = ASR_get_sc (dev); 4085 UNREFERENCED_PARAMETER(flag); 4086 UNREFERENCED_PARAMETER(proc); 4087 4088 if (sc != (Asr_softc_t *)NULL) 4089 switch(cmd) { 4090 4091 case DPT_SIGNATURE: 4092# if (dsDescription_size != 50) 4093 case DPT_SIGNATURE + ((50 - dsDescription_size) << 16): 4094# endif 4095 if (cmd & 0xFFFF0000) { 4096 (void)bcopy ((caddr_t)(&ASR_sig), data, 4097 sizeof(dpt_sig_S)); 4098 return (0); 4099 } 4100 /* Traditional version of the ioctl interface */ 4101 case DPT_SIGNATURE & 0x0000FFFF: 4102 return (copyout ((caddr_t)(&ASR_sig), *((caddr_t *)data), 4103 sizeof(dpt_sig_S))); 4104 4105 /* Traditional version of the ioctl interface */ 4106 case DPT_CTRLINFO & 0x0000FFFF: 4107 case DPT_CTRLINFO: { 4108 struct { 4109 u_int16_t length; 4110 u_int16_t drvrHBAnum; 4111 u_int32_t baseAddr; 4112 u_int16_t blinkState; 4113 u_int8_t pciBusNum; 4114 u_int8_t pciDeviceNum; 4115 u_int16_t hbaFlags; 4116 u_int16_t Interrupt; 4117 u_int32_t reserved1; 4118 u_int32_t reserved2; 4119 u_int32_t reserved3; 4120 } CtlrInfo; 4121 4122 bzero (&CtlrInfo, sizeof(CtlrInfo)); 4123 CtlrInfo.length = sizeof(CtlrInfo) - sizeof(u_int16_t); 4124 CtlrInfo.drvrHBAnum = asr_unit(dev); 4125 CtlrInfo.baseAddr = (u_long)sc->ha_Base; 4126 i = ASR_getBlinkLedCode (sc); 4127 if (i == -1) { 4128 i = 0; 4129 } 4130 CtlrInfo.blinkState = i; 4131 CtlrInfo.pciBusNum = sc->ha_pciBusNum; 4132 CtlrInfo.pciDeviceNum = sc->ha_pciDeviceNum; 4133#define FLG_OSD_PCI_VALID 0x0001 4134#define FLG_OSD_DMA 0x0002 4135#define FLG_OSD_I2O 0x0004 4136 CtlrInfo.hbaFlags = FLG_OSD_PCI_VALID | FLG_OSD_DMA | FLG_OSD_I2O; 4137 CtlrInfo.Interrupt = sc->ha_irq; 4138 if (cmd & 0xFFFF0000) { 4139 bcopy (&CtlrInfo, data, sizeof(CtlrInfo)); 4140 } else { 4141 error = copyout (&CtlrInfo, *(caddr_t *)data, sizeof(CtlrInfo)); 4142 } 4143 } return (error); 4144 4145 /* Traditional version of the ioctl interface */ 4146 case DPT_SYSINFO & 0x0000FFFF: 4147 case DPT_SYSINFO: { 4148 sysInfo_S Info; 4149 caddr_t c_addr; 4150 /* Kernel Specific ptok `hack' */ 4151# define ptok(a) ((char *)(a) + KERNBASE) 4152 4153 bzero (&Info, sizeof(Info)); 4154 4155 outb (0x70, 0x12); 4156 i = inb(0x71); 4157 j = i >> 4; 4158 if (i == 0x0f) { 4159 outb (0x70, 0x19); 4160 j = inb (0x71); 4161 } 4162 Info.drive0CMOS = j; 4163 4164 j = i & 0x0f; 4165 if (i == 0x0f) { 4166 outb (0x70, 0x1a); 4167 j = inb (0x71); 4168 } 4169 Info.drive1CMOS = j; 4170 4171 Info.numDrives = *((char *)ptok(0x475)); 4172 4173 Info.processorFamily = ASR_sig.dsProcessorFamily; 4174 switch (cpu) { 4175 case CPU_386SX: case CPU_386: 4176 Info.processorType = PROC_386; break; 4177 case CPU_486SX: case CPU_486: 4178 Info.processorType = PROC_486; break; 4179 case CPU_586: 4180 Info.processorType = PROC_PENTIUM; break; 4181 case CPU_686: 4182 Info.processorType = PROC_SEXIUM; break; 4183 } 4184 Info.osType = OS_BSDI_UNIX; 4185 Info.osMajorVersion = osrelease[0] - '0'; 4186 Info.osMinorVersion = osrelease[2] - '0'; 4187 /* Info.osRevision = 0; */ 4188 /* Info.osSubRevision = 0; */ 4189 Info.busType = SI_PCI_BUS; 4190 Info.flags = SI_CMOS_Valid | SI_NumDrivesValid 4191 | SI_OSversionValid |SI_BusTypeValid; 4192 4193 /* Go Out And Look For SmartROM */ 4194 for(i = 0; i < 3; ++i) { 4195 int k; 4196 4197 if (i == 0) { 4198 j = 0xC8000; 4199 } else if (i == 1) { 4200 j = 0xD8000; 4201 } else { 4202 j = 0xDC000; 4203 } 4204 c_addr = ptok(j); 4205 if (*((unsigned short *)c_addr) != 0xAA55) { 4206 continue; 4207 } 4208 if (*((u_long *)(c_addr + 6)) != 0x202053) { 4209 continue; 4210 } 4211 if (*((u_long *)(c_addr + 10)) != 0x545044) { 4212 continue; 4213 } 4214 c_addr += 0x24; 4215 for (k = 0; k < 64; ++k) { 4216 if ((*((unsigned char *)(c_addr++)) == ' ') 4217 && (*((unsigned char *)(c_addr)) == 'v')) { 4218 break; 4219 } 4220 } 4221 if (k < 64) { 4222 Info.smartROMMajorVersion 4223 = *((unsigned char *)(c_addr += 3)) - '0'; 4224 Info.smartROMMinorVersion 4225 = *((unsigned char *)(c_addr += 2)); 4226 Info.smartROMRevision 4227 = *((unsigned char *)(++c_addr)); 4228 Info.flags |= SI_SmartROMverValid; 4229 break; 4230 } 4231 } 4232 if (i >= 3) { 4233 Info.flags |= SI_NO_SmartROM; 4234 } 4235 /* Get The Conventional Memory Size From CMOS */ 4236 outb (0x70, 0x16); 4237 j = inb (0x71); 4238 j <<= 8; 4239 outb (0x70, 0x15); 4240 j |= inb(0x71); 4241 Info.conventionalMemSize = j; 4242 4243 /* Get The Extended Memory Found At Power On From CMOS */ 4244 outb (0x70, 0x31); 4245 j = inb (0x71); 4246 j <<= 8; 4247 outb (0x70, 0x30); 4248 j |= inb(0x71); 4249 Info.extendedMemSize = j; 4250 Info.flags |= SI_MemorySizeValid; 4251 4252# if (defined(THIS_IS_BROKEN)) 4253 /* If There Is 1 or 2 Drives Found, Set Up Drive Parameters */ 4254 if (Info.numDrives > 0) { 4255 /* 4256 * Get The Pointer From Int 41 For The First 4257 * Drive Parameters 4258 */ 4259 j = ((unsigned)(*((unsigned short *)ptok(0x104+2))) << 4) 4260 + (unsigned)(*((unsigned short *)ptok(0x104+0))); 4261 /* 4262 * It appears that SmartROM's Int41/Int46 pointers 4263 * use memory that gets stepped on by the kernel 4264 * loading. We no longer have access to this 4265 * geometry information but try anyways (!?) 4266 */ 4267 Info.drives[0].cylinders = *((unsigned char *)ptok(j)); 4268 ++j; 4269 Info.drives[0].cylinders += ((int)*((unsigned char *) 4270 ptok(j))) << 8; 4271 ++j; 4272 Info.drives[0].heads = *((unsigned char *)ptok(j)); 4273 j += 12; 4274 Info.drives[0].sectors = *((unsigned char *)ptok(j)); 4275 Info.flags |= SI_DriveParamsValid; 4276 if ((Info.drives[0].cylinders == 0) 4277 || (Info.drives[0].heads == 0) 4278 || (Info.drives[0].sectors == 0)) { 4279 Info.flags &= ~SI_DriveParamsValid; 4280 } 4281 if (Info.numDrives > 1) { 4282 /* 4283 * Get The Pointer From Int 46 For The 4284 * Second Drive Parameters 4285 */ 4286 j = ((unsigned)(*((unsigned short *)ptok(0x118+2))) << 4) 4287 + (unsigned)(*((unsigned short *)ptok(0x118+0))); 4288 Info.drives[1].cylinders = *((unsigned char *) 4289 ptok(j)); 4290 ++j; 4291 Info.drives[1].cylinders += ((int) 4292 *((unsigned char *)ptok(j))) << 8; 4293 ++j; 4294 Info.drives[1].heads = *((unsigned char *) 4295 ptok(j)); 4296 j += 12; 4297 Info.drives[1].sectors = *((unsigned char *) 4298 ptok(j)); 4299 if ((Info.drives[1].cylinders == 0) 4300 || (Info.drives[1].heads == 0) 4301 || (Info.drives[1].sectors == 0)) { 4302 Info.flags &= ~SI_DriveParamsValid; 4303 } 4304 } 4305 } 4306# endif 4307 /* Copy Out The Info Structure To The User */ 4308 if (cmd & 0xFFFF0000) { 4309 bcopy (&Info, data, sizeof(Info)); 4310 } else { 4311 error = copyout (&Info, *(caddr_t *)data, sizeof(Info)); 4312 } 4313 return (error); } 4314 4315 /* Get The BlinkLED State */ 4316 case DPT_BLINKLED: 4317 i = ASR_getBlinkLedCode (sc); 4318 if (i == -1) { 4319 i = 0; 4320 } 4321 if (cmd & 0xFFFF0000) { 4322 bcopy ((caddr_t)(&i), data, sizeof(i)); 4323 } else { 4324 error = copyout (&i, *(caddr_t *)data, sizeof(i)); 4325 } 4326 break; 4327 4328 /* Get performance metrics */ 4329#ifdef ASR_MEASURE_PERFORMANCE 4330 case DPT_PERF_INFO: 4331 bcopy((caddr_t) &sc->ha_performance, data, 4332 sizeof(sc->ha_performance)); 4333 return (0); 4334#endif 4335 4336 /* Send an I2O command */ 4337 case I2OUSRCMD: 4338 return (ASR_queue_i (sc, *((PI2O_MESSAGE_FRAME *)data))); 4339 4340 /* Reset and re-initialize the adapter */ 4341 case I2ORESETCMD: 4342 ASR_reset (sc); 4343 return (0); 4344 4345 /* Rescan the LCT table and resynchronize the information */ 4346 case I2ORESCANCMD: 4347 return (ASR_rescan (sc)); 4348 } 4349 return (EINVAL); 4350} /* asr_ioctl */ 4351 4352#ifdef ASR_MEASURE_PERFORMANCE 4353/* 4354 * This function subtracts one timeval structure from another, 4355 * Returning the result in usec. 4356 * It assumes that less than 4 billion usecs passed form start to end. 4357 * If times are sensless, 0xffffffff is returned. 4358 */ 4359 4360STATIC u_int32_t 4361asr_time_delta( 4362 IN struct timeval start, 4363 IN struct timeval end) 4364{ 4365 OUT u_int32_t result; 4366 4367 if (start.tv_sec > end.tv_sec) { 4368 result = 0xffffffff; 4369 } 4370 else { 4371 if (start.tv_sec == end.tv_sec) { 4372 if (start.tv_usec > end.tv_usec) { 4373 result = 0xffffffff; 4374 } else { 4375 return (end.tv_usec - start.tv_usec); 4376 } 4377 } else { 4378 return (end.tv_sec - start.tv_sec) * 1000000 + 4379 end.tv_usec + (1000000 - start.tv_usec); 4380 } 4381 } 4382 return(result); 4383} /* asr_time_delta */ 4384#endif 4385