asr.c revision 72760
1/* $FreeBSD: head/sys/dev/asr/asr.c 72760 2001-02-20 10:30:22Z kris $ */ 2/* 3 * Copyright (c) 1996-2000 Distributed Processing Technology Corporation 4 * Copyright (c) 2000 Adaptec Corporation 5 * All rights reserved. 6 * 7 * TERMS AND CONDITIONS OF USE 8 * 9 * Redistribution and use in source form, with or without modification, are 10 * permitted provided that redistributions of source code must retain the 11 * above copyright notice, this list of conditions and the following disclaimer. 12 * 13 * This software is provided `as is' by Adaptec and any express or implied 14 * warranties, including, but not limited to, the implied warranties of 15 * merchantability and fitness for a particular purpose, are disclaimed. In no 16 * event shall Adaptec be liable for any direct, indirect, incidental, special, 17 * exemplary or consequential damages (including, but not limited to, 18 * procurement of substitute goods or services; loss of use, data, or profits; 19 * or business interruptions) however caused and on any theory of liability, 20 * whether in contract, strict liability, or tort (including negligence or 21 * otherwise) arising in any way out of the use of this driver software, even 22 * if advised of the possibility of such damage. 23 * 24 * SCSI I2O host adapter driver 25 * 26 * V1.03 2000/07/12 Mark_Salyzyn@adaptec.com 27 * - The controller is not actually an ASR (Adaptec SCSI RAID) 28 * series that is visible, it's more of an internal code name. 29 * remove any visible references within reason for now. 30 * - bus_ptr->LUN was not correctly zeroed when initially 31 * allocated causing a possible panic of the operating system 32 * during boot. 33 * V1.02 2000/06/26 Mark_Salyzyn@adaptec.com 34 * - Code always fails for ASR_getTid affecting performance. 35 * - initiated a set of changes that resulted from a formal 36 * code inspection by Mark_Salyzyn@adaptec.com, 37 * George_Dake@adaptec.com, Jeff_Zeak@adaptec.com, 38 * Martin_Wilson@adaptec.com and Vincent_Trandoan@adaptec.com. 39 * Their findings were focussed on the LCT & TID handler, and 40 * all resulting changes were to improve code readability, 41 * consistency or have a positive effect on performance. 42 * V1.01 2000/06/14 Mark_Salyzyn@adaptec.com 43 * - Passthrough returned an incorrect error. 44 * - Passthrough did not migrate the intrinsic scsi layer wakeup 45 * on command completion. 46 * - generate control device nodes using make_dev and delete_dev. 47 * - Performance affected by TID caching reallocing. 48 * - Made suggested changes by Justin_Gibbs@adaptec.com 49 * - use splcam instead of splbio. 50 * - use cam_imask instead of bio_imask. 51 * - use u_int8_t instead of u_char. 52 * - use u_int16_t instead of u_short. 53 * - use u_int32_t instead of u_long where appropriate. 54 * - use 64 bit context handler instead of 32 bit. 55 * - create_ccb should only allocate the worst case 56 * requirements for the driver since CAM may evolve 57 * making union ccb much larger than needed here. 58 * renamed create_ccb to asr_alloc_ccb. 59 * - go nutz justifying all debug prints as macros 60 * defined at the top and remove unsightly ifdefs. 61 * - INLINE STATIC viewed as confusing. Historically 62 * utilized to affect code performance and debug 63 * issues in OS, Compiler or OEM specific situations. 64 * V1.00 2000/05/31 Mark_Salyzyn@adaptec.com 65 * - Ported from FreeBSD 2.2.X DPT I2O driver. 66 * changed struct scsi_xfer to union ccb/struct ccb_hdr 67 * changed variable name xs to ccb 68 * changed struct scsi_link to struct cam_path 69 * changed struct scsibus_data to struct cam_sim 70 * stopped using fordriver for holding on to the TID 71 * use proprietary packet creation instead of scsi_inquire 72 * CAM layer sends synchronize commands. 73 */ 74 75#define ASR_VERSION 1 76#define ASR_REVISION '0' 77#define ASR_SUBREVISION '3' 78#define ASR_MONTH 7 79#define ASR_DAY 12 80#define ASR_YEAR 2000 - 1980 81 82/* 83 * Debug macros to resude the unsightly ifdefs 84 */ 85#if (defined(DEBUG_ASR) || defined(DEBUG_ASR_USR_CMD) || defined(DEBUG_ASR_CMD)) 86# define debug_asr_message(message) \ 87 { \ 88 u_int32_t * pointer = (u_int32_t *)message; \ 89 u_int32_t length = I2O_MESSAGE_FRAME_getMessageSize(message);\ 90 u_int32_t counter = 0; \ 91 \ 92 while (length--) { \ 93 printf ("%08lx%c", (u_long)*(pointer++), \ 94 (((++counter & 7) == 0) || (length == 0)) \ 95 ? '\n' \ 96 : ' '); \ 97 } \ 98 } 99#endif /* DEBUG_ASR || DEBUG_ASR_USR_CMD || DEBUG_ASR_CMD */ 100 101#if (defined(DEBUG_ASR)) 102 /* Breaks on none STDC based compilers :-( */ 103# define debug_asr_printf(fmt,args...) printf(fmt, ##args) 104# define debug_asr_dump_message(message) debug_asr_message(message) 105# define debug_asr_print_path(ccb) xpt_print_path(ccb->ccb_h.path); 106 /* None fatal version of the ASSERT macro */ 107# if (defined(__STDC__)) 108# define ASSERT(phrase) if(!(phrase))printf(#phrase " at line %d file %s\n",__LINE__,__FILE__) 109# else 110# define ASSERT(phrase) if(!(phrase))printf("phrase" " at line %d file %s\n",__LINE__,__FILE__) 111# endif 112#else /* DEBUG_ASR */ 113# define debug_asr_printf(fmt,args...) 114# define debug_asr_dump_message(message) 115# define debug_asr_print_path(ccb) 116# define ASSERT(x) 117#endif /* DEBUG_ASR */ 118 119/* 120 * If DEBUG_ASR_CMD is defined: 121 * 0 - Display incoming SCSI commands 122 * 1 - add in a quick character before queueing. 123 * 2 - add in outgoing message frames. 124 */ 125#if (defined(DEBUG_ASR_CMD)) 126# define debug_asr_cmd_printf(fmt,args...) printf(fmt,##args) 127# define debug_asr_dump_ccb(ccb) \ 128 { \ 129 u_int8_t * cp = (unsigned char *)&(ccb->csio.cdb_io); \ 130 int len = ccb->csio.cdb_len; \ 131 \ 132 while (len) { \ 133 debug_asr_cmd_printf (" %02x", *(cp++)); \ 134 --len; \ 135 } \ 136 } 137# if (DEBUG_ASR_CMD > 0) 138# define debug_asr_cmd1_printf debug_asr_cmd_printf 139# else 140# define debug_asr_cmd1_printf(fmt,args...) 141# endif 142# if (DEBUG_ASR_CMD > 1) 143# define debug_asr_cmd2_printf debug_asr_cmd_printf 144# define debug_asr_cmd2_dump_message(message) debug_asr_message(message) 145# else 146# define debug_asr_cmd2_printf(fmt,args...) 147# define debug_asr_cmd2_dump_message(message) 148# endif 149#else /* DEBUG_ASR_CMD */ 150# define debug_asr_cmd_printf(fmt,args...) 151# define debug_asr_cmd_dump_ccb(ccb) 152# define debug_asr_cmd1_printf(fmt,args...) 153# define debug_asr_cmd2_printf(fmt,args...) 154# define debug_asr_cmd2_dump_message(message) 155#endif /* DEBUG_ASR_CMD */ 156 157#if (defined(DEBUG_ASR_USR_CMD)) 158# define debug_usr_cmd_printf(fmt,args...) printf(fmt,##args) 159# define debug_usr_cmd_dump_message(message) debug_usr_message(message) 160#else /* DEBUG_ASR_USR_CMD */ 161# define debug_usr_cmd_printf(fmt,args...) 162# define debug_usr_cmd_dump_message(message) 163#endif /* DEBUG_ASR_USR_CMD */ 164 165#define dsDescription_size 46 /* Snug as a bug in a rug */ 166#include "dev/asr/dptsig.h" 167 168static dpt_sig_S ASR_sig = { 169 { 'd', 'P', 't', 'S', 'i', 'G'}, SIG_VERSION, PROC_INTEL, 170 PROC_386 | PROC_486 | PROC_PENTIUM | PROC_SEXIUM, FT_HBADRVR, 0, 171 OEM_DPT, OS_FREE_BSD, CAP_ABOVE16MB, DEV_ALL, 172 ADF_ALL_SC5, 173 0, 0, ASR_VERSION, ASR_REVISION, ASR_SUBREVISION, 174 ASR_MONTH, ASR_DAY, ASR_YEAR, 175/* 01234567890123456789012345678901234567890123456789 < 50 chars */ 176 "Adaptec FreeBSD 4.0.0 Unix SCSI I2O HBA Driver" 177 /* ^^^^^ asr_attach alters these to match OS */ 178}; 179 180#include <sys/param.h> /* TRUE=1 and FALSE=0 defined here */ 181#include <sys/kernel.h> 182#include <sys/systm.h> 183#include <sys/malloc.h> 184#include <sys/conf.h> 185#include <sys/disklabel.h> 186#include <sys/bus.h> 187#include <machine/resource.h> 188#include <machine/bus.h> 189#include <sys/rman.h> 190#include <sys/stat.h> 191 192#include <cam/cam.h> 193#include <cam/cam_ccb.h> 194#include <cam/cam_sim.h> 195#include <cam/cam_xpt_sim.h> 196#include <cam/cam_xpt_periph.h> 197 198#include <cam/scsi/scsi_all.h> 199#include <cam/scsi/scsi_message.h> 200 201#include <vm/vm.h> 202#include <vm/pmap.h> 203#include <machine/cputypes.h> 204#include <i386/include/vmparam.h> 205 206#include <pci/pcivar.h> 207#include <pci/pcireg.h> 208 209#define STATIC static 210#define INLINE 211 212#if (defined(DEBUG_ASR) && (DEBUG_ASR > 0)) 213# undef STATIC 214# define STATIC 215# undef INLINE 216# define INLINE 217#endif 218#define IN 219#define OUT 220#define INOUT 221 222#define osdSwap4(x) ((u_long)ntohl((u_long)(x))) 223#define KVTOPHYS(x) vtophys(x) 224#include "dev/asr/dptalign.h" 225#include "dev/asr/i2oexec.h" 226#include "dev/asr/i2obscsi.h" 227#include "dev/asr/i2odpt.h" 228#include "dev/asr/i2oadptr.h" 229#include "opt_asr.h" 230 231#include "dev/asr/sys_info.h" 232 233/* Configuration Definitions */ 234 235#define SG_SIZE 58 /* Scatter Gather list Size */ 236#define MAX_TARGET_ID 126 /* Maximum Target ID supported */ 237#define MAX_LUN 255 /* Maximum LUN Supported */ 238#define MAX_CHANNEL 7 /* Maximum Channel # Supported by driver */ 239#define MAX_INBOUND 2000 /* Max CCBs, Also Max Queue Size */ 240#define MAX_OUTBOUND 256 /* Maximum outbound frames/adapter */ 241#define MAX_INBOUND_SIZE 512 /* Maximum inbound frame size */ 242#define MAX_MAP 4194304L /* Maximum mapping size of IOP */ 243 244/************************************************************************** 245** ASR Host Adapter structure - One Structure For Each Host Adapter That ** 246** Is Configured Into The System. The Structure Supplies Configuration ** 247** Information, Status Info, Queue Info And An Active CCB List Pointer. ** 248***************************************************************************/ 249 250/* I2O register set */ 251typedef struct { 252 U8 Address[0x30]; 253 volatile U32 Status; 254 volatile U32 Mask; 255# define Mask_InterruptsDisabled 0x08 256 U32 x[2]; 257 volatile U32 ToFIFO; /* In Bound FIFO */ 258 volatile U32 FromFIFO; /* Out Bound FIFO */ 259} i2oRegs_t; 260 261/* 262 * A MIX of performance and space considerations for TID lookups 263 */ 264typedef u_int16_t tid_t; 265 266typedef struct { 267 u_int32_t size; /* up to MAX_LUN */ 268 tid_t TID[1]; 269} lun2tid_t; 270 271typedef struct { 272 u_int32_t size; /* up to MAX_TARGET */ 273 lun2tid_t * LUN[1]; 274} target2lun_t; 275 276/* 277 * To ensure that we only allocate and use the worst case ccb here, lets 278 * make our own local ccb union. If asr_alloc_ccb is utilized for another 279 * ccb type, ensure that you add the additional structures into our local 280 * ccb union. To ensure strict type checking, we will utilize the local 281 * ccb definition wherever possible. 282 */ 283union asr_ccb { 284 struct ccb_hdr ccb_h; /* For convenience */ 285 struct ccb_scsiio csio; 286 struct ccb_setasync csa; 287}; 288 289typedef struct Asr_softc { 290 u_int16_t ha_irq; 291 void * ha_Base; /* base port for each board */ 292 u_int8_t * volatile ha_blinkLED; 293 i2oRegs_t * ha_Virt; /* Base address of adapter */ 294 I2O_IOP_ENTRY ha_SystemTable; 295 LIST_HEAD(,ccb_hdr) ha_ccb; /* ccbs in use */ 296 struct cam_path * ha_path[MAX_CHANNEL+1]; 297 struct cam_sim * ha_sim[MAX_CHANNEL+1]; 298#if __FreeBSD_version >= 400000 299 struct resource * ha_mem_res; 300 struct resource * ha_irq_res; 301 void * ha_intr; 302#endif 303 u_int8_t ha_adapter_target[MAX_CHANNEL+1]; 304 PI2O_LCT ha_LCT; /* Complete list of devices */ 305# define le_type IdentityTag[0] 306# define I2O_BSA 0x20 307# define I2O_FCA 0x40 308# define I2O_SCSI 0x00 309# define I2O_PORT 0x80 310# define I2O_UNKNOWN 0x7F 311# define le_bus IdentityTag[1] 312# define le_target IdentityTag[2] 313# define le_lun IdentityTag[3] 314 target2lun_t * ha_targets[MAX_CHANNEL+1]; 315 PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME ha_Msgs; 316 u_long ha_Msgs_Phys; 317 u_int16_t ha_Msgs_Count; 318 319 /* Configuration information */ 320 /* The target id maximums we take */ 321 u_int8_t ha_MaxBus; /* Maximum bus */ 322 u_int8_t ha_MaxId; /* Maximum target ID */ 323 u_int8_t ha_MaxLun; /* Maximum target LUN */ 324 u_int8_t ha_SgSize; /* Max SG elements */ 325 u_int8_t ha_pciBusNum; 326 u_int8_t ha_pciDeviceNum; 327 u_int16_t ha_QueueSize; /* Max outstanding commands */ 328 329 /* Links into other parents and HBAs */ 330 struct Asr_softc * ha_next; /* HBA list */ 331 332#ifdef ASR_MEASURE_PERFORMANCE 333#define MAX_TIMEQ_SIZE 256 // assumes MAX 256 scsi commands sent 334 asr_perf_t ha_performance; 335 u_int32_t ha_submitted_ccbs_count; 336 337 // Queueing macros for a circular queue 338#define TIMEQ_FREE_LIST_EMPTY(head, tail) (-1 == (head) && -1 == (tail)) 339#define TIMEQ_FREE_LIST_FULL(head, tail) ((((tail) + 1) % MAX_TIMEQ_SIZE) == (head)) 340#define ENQ_TIMEQ_FREE_LIST(item, Q, head, tail) \ 341 if (!TIMEQ_FREE_LIST_FULL((head), (tail))) { \ 342 if TIMEQ_FREE_LIST_EMPTY((head),(tail)) { \ 343 (head) = (tail) = 0; \ 344 } \ 345 else (tail) = ((tail) + 1) % MAX_TIMEQ_SIZE; \ 346 Q[(tail)] = (item); \ 347 } \ 348 else { \ 349 debug_asr_printf("asr: Enqueueing when TimeQ Free List is full... This should not happen!\n"); \ 350 } 351#define DEQ_TIMEQ_FREE_LIST(item, Q, head, tail) \ 352 if (!TIMEQ_FREE_LIST_EMPTY((head), (tail))) { \ 353 item = Q[(head)]; \ 354 if ((head) == (tail)) { (head) = (tail) = -1; } \ 355 else (head) = ((head) + 1) % MAX_TIMEQ_SIZE; \ 356 } \ 357 else { \ 358 (item) = -1; \ 359 debug_asr_printf("asr: Dequeueing when TimeQ Free List is empty... This should not happen!\n"); \ 360 } 361 362 // Circular queue of time stamps 363 struct timeval ha_timeQ[MAX_TIMEQ_SIZE]; 364 u_int32_t ha_timeQFreeList[MAX_TIMEQ_SIZE]; 365 int ha_timeQFreeHead; 366 int ha_timeQFreeTail; 367#endif 368} Asr_softc_t; 369 370STATIC Asr_softc_t * Asr_softc; 371 372/* 373 * Prototypes of the routines we have in this object. 374 */ 375 376/* Externally callable routines */ 377#if __FreeBSD_version >= 400000 378#define PROBE_ARGS IN device_t tag 379#define PROBE_RET int 380#define PROBE_SET() u_long id = (pci_get_device(tag)<<16)|pci_get_vendor(tag) 381#define PROBE_RETURN(retval) if(retval){device_set_desc(tag,retval);return(0);}else{return(ENXIO);} 382#define ATTACH_ARGS IN device_t tag 383#define ATTACH_RET int 384#define ATTACH_SET() int unit = device_get_unit(tag) 385#define ATTACH_RETURN(retval) return(retval) 386#else 387#define PROBE_ARGS IN pcici_t tag, IN pcidi_t id 388#define PROBE_RET const char * 389#define PROBE_SET() 390#define PROBE_RETURN(retval) return(retval) 391#define ATTACH_ARGS IN pcici_t tag, IN int unit 392#define ATTACH_RET void 393#define ATTACH_SET() 394#define ATTACH_RETURN(retval) return 395#endif 396/* I2O HDM interface */ 397STATIC PROBE_RET asr_probe __P((PROBE_ARGS)); 398STATIC ATTACH_RET asr_attach __P((ATTACH_ARGS)); 399/* DOMINO placeholder */ 400STATIC PROBE_RET domino_probe __P((PROBE_ARGS)); 401STATIC ATTACH_RET domino_attach __P((ATTACH_ARGS)); 402/* MODE0 adapter placeholder */ 403STATIC PROBE_RET mode0_probe __P((PROBE_ARGS)); 404STATIC ATTACH_RET mode0_attach __P((ATTACH_ARGS)); 405 406STATIC Asr_softc_t * ASR_get_sc __P(( 407 IN dev_t dev)); 408STATIC int asr_ioctl __P(( 409 IN dev_t dev, 410 IN u_long cmd, 411 INOUT caddr_t data, 412 int flag, 413 struct proc * proc)); 414STATIC int asr_open __P(( 415 IN dev_t dev, 416 int32_t flags, 417 int32_t ifmt, 418 IN struct proc * proc)); 419STATIC int asr_close __P(( 420 dev_t dev, 421 int flags, 422 int ifmt, 423 struct proc * proc)); 424STATIC int asr_intr __P(( 425 IN Asr_softc_t * sc)); 426STATIC void asr_timeout __P(( 427 INOUT void * arg)); 428STATIC int ASR_init __P(( 429 IN Asr_softc_t * sc)); 430STATIC INLINE int ASR_acquireLct __P(( 431 INOUT Asr_softc_t * sc)); 432STATIC INLINE int ASR_acquireHrt __P(( 433 INOUT Asr_softc_t * sc)); 434STATIC void asr_action __P(( 435 IN struct cam_sim * sim, 436 IN union ccb * ccb)); 437STATIC void asr_async __P(( 438 void * callback_arg, 439 u_int32_t code, 440 struct cam_path * path, 441 void * arg)); 442STATIC void asr_poll __P(( 443 IN struct cam_sim * sim)); 444 445/* 446 * Here is the auto-probe structure used to nest our tests appropriately 447 * during the startup phase of the operating system. 448 */ 449#if __FreeBSD_version >= 400000 450STATIC device_method_t asr_methods[] = { 451 DEVMETHOD(device_probe, asr_probe), 452 DEVMETHOD(device_attach, asr_attach), 453 { 0, 0 } 454}; 455 456STATIC driver_t asr_driver = { 457 "asr", 458 asr_methods, 459 sizeof(Asr_softc_t) 460}; 461 462STATIC devclass_t asr_devclass; 463 464DRIVER_MODULE(asr, pci, asr_driver, asr_devclass, 0, 0); 465 466STATIC device_method_t domino_methods[] = { 467 DEVMETHOD(device_probe, domino_probe), 468 DEVMETHOD(device_attach, domino_attach), 469 { 0, 0 } 470}; 471 472STATIC driver_t domino_driver = { 473 "domino", 474 domino_methods, 475 0 476}; 477 478STATIC devclass_t domino_devclass; 479 480DRIVER_MODULE(domino, pci, domino_driver, domino_devclass, 0, 0); 481 482STATIC device_method_t mode0_methods[] = { 483 DEVMETHOD(device_probe, mode0_probe), 484 DEVMETHOD(device_attach, mode0_attach), 485 { 0, 0 } 486}; 487 488STATIC driver_t mode0_driver = { 489 "mode0", 490 mode0_methods, 491 0 492}; 493 494STATIC devclass_t mode0_devclass; 495 496DRIVER_MODULE(mode0, pci, mode0_driver, mode0_devclass, 0, 0); 497#else 498STATIC u_long asr_pcicount = 0; 499STATIC struct pci_device asr_pcidev = { 500 "asr", 501 asr_probe, 502 asr_attach, 503 &asr_pcicount, 504 NULL 505}; 506DATA_SET (asr_pciset, asr_pcidev); 507 508STATIC u_long domino_pcicount = 0; 509STATIC struct pci_device domino_pcidev = { 510 "domino", 511 domino_probe, 512 domino_attach, 513 &domino_pcicount, 514 NULL 515}; 516DATA_SET (domino_pciset, domino_pcidev); 517 518STATIC u_long mode0_pcicount = 0; 519STATIC struct pci_device mode0_pcidev = { 520 "mode0", 521 mode0_probe, 522 mode0_attach, 523 &mode0_pcicount, 524 NULL 525}; 526DATA_SET (mode0_pciset, mode0_pcidev); 527#endif 528 529/* 530 * devsw for asr hba driver 531 * 532 * only ioctl is used. the sd driver provides all other access. 533 */ 534#define CDEV_MAJOR 154 /* preferred default character major */ 535STATIC struct cdevsw asr_cdevsw = { 536 asr_open, /* open */ 537 asr_close, /* close */ 538 noread, /* read */ 539 nowrite, /* write */ 540 asr_ioctl, /* ioctl */ 541 nopoll, /* poll */ 542 nommap, /* mmap */ 543 nostrategy, /* strategy */ 544 "asr", /* name */ 545 CDEV_MAJOR, /* maj */ 546 nodump, /* dump */ 547 nopsize, /* psize */ 548 0, /* flags */ 549 -1 /* bmaj */ 550}; 551 552#ifdef ASR_MEASURE_PERFORMANCE 553STATIC u_int32_t asr_time_delta __P((IN struct timeval start, 554 IN struct timeval end)); 555#endif 556 557/* 558 * Initialize the dynamic cdevsw hooks. 559 */ 560STATIC void 561asr_drvinit ( 562 void * unused) 563{ 564 static int asr_devsw_installed = 0; 565 566 if (asr_devsw_installed) { 567 return; 568 } 569 asr_devsw_installed++; 570 /* 571 * Find a free spot (the report during driver load used by 572 * osd layer in engine to generate the controlling nodes). 573 */ 574 while ((asr_cdevsw.d_maj < NUMCDEVSW) 575 && (devsw(makedev(asr_cdevsw.d_maj,0)) != (struct cdevsw *)NULL)) { 576 ++asr_cdevsw.d_maj; 577 } 578 if (asr_cdevsw.d_maj >= NUMCDEVSW) for ( 579 asr_cdevsw.d_maj = 0; 580 (asr_cdevsw.d_maj < CDEV_MAJOR) 581 && (devsw(makedev(asr_cdevsw.d_maj,0)) != (struct cdevsw *)NULL); 582 ++asr_cdevsw.d_maj); 583 /* 584 * Come to papa 585 */ 586 cdevsw_add(&asr_cdevsw); 587 /* 588 * delete any nodes that would attach to the primary adapter, 589 * let the adapter scans add them. 590 */ 591 destroy_dev(makedev(asr_cdevsw.d_maj,0)); 592} /* asr_drvinit */ 593 594/* Must initialize before CAM layer picks up our HBA driver */ 595SYSINIT(asrdev,SI_SUB_DRIVERS,SI_ORDER_MIDDLE+CDEV_MAJOR,asr_drvinit,NULL) 596 597/* I2O support routines */ 598#define defAlignLong(STRUCT,NAME) char NAME[sizeof(STRUCT)] 599#define getAlignLong(STRUCT,NAME) ((STRUCT *)(NAME)) 600 601/* 602 * Fill message with default. 603 */ 604STATIC PI2O_MESSAGE_FRAME 605ASR_fillMessage ( 606 IN char * Message, 607 IN u_int16_t size) 608{ 609 OUT PI2O_MESSAGE_FRAME Message_Ptr; 610 611 Message_Ptr = getAlignLong(I2O_MESSAGE_FRAME, Message); 612 bzero ((void *)Message_Ptr, size); 613 I2O_MESSAGE_FRAME_setVersionOffset(Message_Ptr, I2O_VERSION_11); 614 I2O_MESSAGE_FRAME_setMessageSize(Message_Ptr, 615 (size + sizeof(U32) - 1) >> 2); 616 I2O_MESSAGE_FRAME_setInitiatorAddress (Message_Ptr, 1); 617 return (Message_Ptr); 618} /* ASR_fillMessage */ 619 620#define EMPTY_QUEUE ((U32)-1L) 621 622STATIC INLINE U32 623ASR_getMessage( 624 IN i2oRegs_t * virt) 625{ 626 OUT U32 MessageOffset; 627 628 if ((MessageOffset = virt->ToFIFO) == EMPTY_QUEUE) { 629 MessageOffset = virt->ToFIFO; 630 } 631 return (MessageOffset); 632} /* ASR_getMessage */ 633 634/* Issue a polled command */ 635STATIC U32 636ASR_initiateCp ( 637 INOUT i2oRegs_t * virt, 638 IN PI2O_MESSAGE_FRAME Message) 639{ 640 OUT U32 Mask = -1L; 641 U32 MessageOffset; 642 u_int Delay = 1500; 643 644 /* 645 * ASR_initiateCp is only used for synchronous commands and will 646 * be made more resiliant to adapter delays since commands like 647 * resetIOP can cause the adapter to be deaf for a little time. 648 */ 649 while (((MessageOffset = ASR_getMessage(virt)) == EMPTY_QUEUE) 650 && (--Delay != 0)) { 651 DELAY (10000); 652 } 653 if (MessageOffset != EMPTY_QUEUE) { 654 bcopy (Message, virt->Address + MessageOffset, 655 I2O_MESSAGE_FRAME_getMessageSize(Message) << 2); 656 /* 657 * Disable the Interrupts 658 */ 659 virt->Mask = (Mask = virt->Mask) | Mask_InterruptsDisabled; 660 virt->ToFIFO = MessageOffset; 661 } 662 return (Mask); 663} /* ASR_initiateCp */ 664 665/* 666 * Reset the adapter. 667 */ 668STATIC U32 669ASR_resetIOP ( 670 INOUT i2oRegs_t * virt) 671{ 672 struct resetMessage { 673 I2O_EXEC_IOP_RESET_MESSAGE M; 674 U32 R; 675 }; 676 defAlignLong(struct resetMessage,Message); 677 PI2O_EXEC_IOP_RESET_MESSAGE Message_Ptr; 678 OUT U32 * volatile Reply_Ptr; 679 U32 Old; 680 681 /* 682 * Build up our copy of the Message. 683 */ 684 Message_Ptr = (PI2O_EXEC_IOP_RESET_MESSAGE)ASR_fillMessage(Message, 685 sizeof(I2O_EXEC_IOP_RESET_MESSAGE)); 686 I2O_EXEC_IOP_RESET_MESSAGE_setFunction(Message_Ptr, I2O_EXEC_IOP_RESET); 687 /* 688 * Reset the Reply Status 689 */ 690 *(Reply_Ptr = (U32 *)((char *)Message_Ptr 691 + sizeof(I2O_EXEC_IOP_RESET_MESSAGE))) = 0; 692 I2O_EXEC_IOP_RESET_MESSAGE_setStatusWordLowAddress(Message_Ptr, 693 KVTOPHYS((void *)Reply_Ptr)); 694 /* 695 * Send the Message out 696 */ 697 if ((Old = ASR_initiateCp (virt, (PI2O_MESSAGE_FRAME)Message_Ptr)) != (U32)-1L) { 698 /* 699 * Wait for a response (Poll), timeouts are dangerous if 700 * the card is truly responsive. We assume response in 2s. 701 */ 702 u_int8_t Delay = 200; 703 704 while ((*Reply_Ptr == 0) && (--Delay != 0)) { 705 DELAY (10000); 706 } 707 /* 708 * Re-enable the interrupts. 709 */ 710 virt->Mask = Old; 711 ASSERT (*Reply_Ptr); 712 return (*Reply_Ptr); 713 } 714 ASSERT (Old != (U32)-1L); 715 return (0); 716} /* ASR_resetIOP */ 717 718/* 719 * Get the curent state of the adapter 720 */ 721STATIC INLINE PI2O_EXEC_STATUS_GET_REPLY 722ASR_getStatus ( 723 INOUT i2oRegs_t * virt, 724 OUT PI2O_EXEC_STATUS_GET_REPLY buffer) 725{ 726 defAlignLong(I2O_EXEC_STATUS_GET_MESSAGE,Message); 727 PI2O_EXEC_STATUS_GET_MESSAGE Message_Ptr; 728 U32 Old; 729 730 /* 731 * Build up our copy of the Message. 732 */ 733 Message_Ptr = (PI2O_EXEC_STATUS_GET_MESSAGE)ASR_fillMessage(Message, 734 sizeof(I2O_EXEC_STATUS_GET_MESSAGE)); 735 I2O_EXEC_STATUS_GET_MESSAGE_setFunction(Message_Ptr, 736 I2O_EXEC_STATUS_GET); 737 I2O_EXEC_STATUS_GET_MESSAGE_setReplyBufferAddressLow(Message_Ptr, 738 KVTOPHYS((void *)buffer)); 739 /* This one is a Byte Count */ 740 I2O_EXEC_STATUS_GET_MESSAGE_setReplyBufferLength(Message_Ptr, 741 sizeof(I2O_EXEC_STATUS_GET_REPLY)); 742 /* 743 * Reset the Reply Status 744 */ 745 bzero ((void *)buffer, sizeof(I2O_EXEC_STATUS_GET_REPLY)); 746 /* 747 * Send the Message out 748 */ 749 if ((Old = ASR_initiateCp (virt, (PI2O_MESSAGE_FRAME)Message_Ptr)) != (U32)-1L) { 750 /* 751 * Wait for a response (Poll), timeouts are dangerous if 752 * the card is truly responsive. We assume response in 50ms. 753 */ 754 u_int8_t Delay = 50; 755 756 while (*((U8 * volatile)&buffer->SyncByte) == 0) { 757 if (--Delay == 0) { 758 buffer = (PI2O_EXEC_STATUS_GET_REPLY)NULL; 759 break; 760 } 761 DELAY (1000); 762 } 763 /* 764 * Re-enable the interrupts. 765 */ 766 virt->Mask = Old; 767 return (buffer); 768 } 769 return ((PI2O_EXEC_STATUS_GET_REPLY)NULL); 770} /* ASR_getStatus */ 771 772/* 773 * Check if the device is a SCSI I2O HBA, and add it to the list. 774 */ 775 776/* 777 * Probe for ASR controller. If we find it, we will use it. 778 * virtual adapters. 779 */ 780STATIC PROBE_RET 781asr_probe(PROBE_ARGS) 782{ 783 PROBE_SET(); 784 if (id == 0xA5011044) { 785 PROBE_RETURN ("Adaptec Caching SCSI RAID"); 786 } 787 PROBE_RETURN (NULL); 788} /* asr_probe */ 789 790/* 791 * Probe/Attach for DOMINO chipset. 792 */ 793STATIC PROBE_RET 794domino_probe(PROBE_ARGS) 795{ 796 PROBE_SET(); 797 if (id == 0x10121044) { 798 PROBE_RETURN ("Adaptec Caching Memory Controller"); 799 } 800 PROBE_RETURN (NULL); 801} /* domino_probe */ 802 803STATIC ATTACH_RET 804domino_attach (ATTACH_ARGS) 805{ 806 ATTACH_RETURN (0); 807} /* domino_attach */ 808 809/* 810 * Probe/Attach for MODE0 adapters. 811 */ 812STATIC PROBE_RET 813mode0_probe(PROBE_ARGS) 814{ 815 PROBE_SET(); 816 if (id == 0x908010B5) { 817 PROBE_RETURN ("Adaptec Mode0 3xxx"); 818 } 819#if 0 /* this would match any generic i960 -- mjs */ 820 if (id == 0x19608086) { 821 PROBE_RETURN ("Adaptec Mode0 1xxx"); 822 } 823#endif 824 PROBE_RETURN (NULL); 825} /* mode0_probe */ 826 827STATIC ATTACH_RET 828mode0_attach (ATTACH_ARGS) 829{ 830 ATTACH_RETURN (0); 831} /* mode0_attach */ 832 833STATIC INLINE union asr_ccb * 834asr_alloc_ccb ( 835 IN Asr_softc_t * sc) 836{ 837 OUT union asr_ccb * new_ccb; 838 839 if ((new_ccb = (union asr_ccb *)malloc(sizeof(*new_ccb), 840 M_DEVBUF, M_WAITOK | M_ZERO)) != (union asr_ccb *)NULL) { 841 new_ccb->ccb_h.pinfo.priority = 1; 842 new_ccb->ccb_h.pinfo.index = CAM_UNQUEUED_INDEX; 843 new_ccb->ccb_h.spriv_ptr0 = sc; 844 } 845 return (new_ccb); 846} /* asr_alloc_ccb */ 847 848STATIC INLINE void 849asr_free_ccb ( 850 IN union asr_ccb * free_ccb) 851{ 852 free(free_ccb, M_DEVBUF); 853} /* asr_free_ccb */ 854 855/* 856 * Print inquiry data `carefully' 857 */ 858STATIC void 859ASR_prstring ( 860 u_int8_t * s, 861 int len) 862{ 863 while ((--len >= 0) && (*s) && (*s != ' ') && (*s != '-')) { 864 printf ("%c", *(s++)); 865 } 866} /* ASR_prstring */ 867 868/* 869 * Prototypes 870 */ 871STATIC INLINE int ASR_queue __P(( 872 IN Asr_softc_t * sc, 873 IN PI2O_MESSAGE_FRAME Message)); 874/* 875 * Send a message synchronously and without Interrupt to a ccb. 876 */ 877STATIC int 878ASR_queue_s ( 879 INOUT union asr_ccb * ccb, 880 IN PI2O_MESSAGE_FRAME Message) 881{ 882 int s; 883 U32 Mask; 884 Asr_softc_t * sc = (Asr_softc_t *)(ccb->ccb_h.spriv_ptr0); 885 886 /* 887 * We do not need any (optional byteswapping) method access to 888 * the Initiator context field. 889 */ 890 I2O_MESSAGE_FRAME_setInitiatorContext64(Message, (long)ccb); 891 892 /* Prevent interrupt service */ 893 s = splcam (); 894 sc->ha_Virt->Mask = (Mask = sc->ha_Virt->Mask) 895 | Mask_InterruptsDisabled; 896 897 if (ASR_queue (sc, Message) == EMPTY_QUEUE) { 898 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 899 ccb->ccb_h.status |= CAM_REQUEUE_REQ; 900 } 901 902 /* 903 * Wait for this board to report a finished instruction. 904 */ 905 while (ccb->ccb_h.status == CAM_REQ_INPROG) { 906 (void)asr_intr (sc); 907 } 908 909 /* Re-enable Interrupts */ 910 sc->ha_Virt->Mask = Mask; 911 splx(s); 912 913 return (ccb->ccb_h.status); 914} /* ASR_queue_s */ 915 916/* 917 * Send a message synchronously to a Asr_softc_t 918 */ 919STATIC int 920ASR_queue_c ( 921 IN Asr_softc_t * sc, 922 IN PI2O_MESSAGE_FRAME Message) 923{ 924 union asr_ccb * ccb; 925 OUT int status; 926 927 if ((ccb = asr_alloc_ccb (sc)) == (union asr_ccb *)NULL) { 928 return (CAM_REQUEUE_REQ); 929 } 930 931 status = ASR_queue_s (ccb, Message); 932 933 asr_free_ccb(ccb); 934 935 return (status); 936} /* ASR_queue_c */ 937 938/* 939 * Add the specified ccb to the active queue 940 */ 941STATIC INLINE void 942ASR_ccbAdd ( 943 IN Asr_softc_t * sc, 944 INOUT union asr_ccb * ccb) 945{ 946 int s; 947 948 s = splcam(); 949 LIST_INSERT_HEAD(&(sc->ha_ccb), &(ccb->ccb_h), sim_links.le); 950 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) { 951 if (ccb->ccb_h.timeout == CAM_TIME_DEFAULT) { 952 /* 953 * RAID systems can take considerable time to 954 * complete some commands given the large cache 955 * flashes switching from write back to write thru. 956 */ 957 ccb->ccb_h.timeout = 6 * 60 * 1000; 958 } 959 ccb->ccb_h.timeout_ch = timeout(asr_timeout, (caddr_t)ccb, 960 (ccb->ccb_h.timeout * hz) / 1000); 961 } 962 splx(s); 963} /* ASR_ccbAdd */ 964 965/* 966 * Remove the specified ccb from the active queue. 967 */ 968STATIC INLINE void 969ASR_ccbRemove ( 970 IN Asr_softc_t * sc, 971 INOUT union asr_ccb * ccb) 972{ 973 int s; 974 975 s = splcam(); 976 untimeout(asr_timeout, (caddr_t)ccb, ccb->ccb_h.timeout_ch); 977 LIST_REMOVE(&ccb->ccb_h, sim_links.le); 978 splx(s); 979} /* ASR_ccbRemove */ 980 981/* 982 * Fail all the active commands, so they get re-issued by the operating 983 * system. 984 */ 985STATIC INLINE void 986ASR_failActiveCommands ( 987 IN Asr_softc_t * sc) 988{ 989 struct ccb_hdr * ccb; 990 defAlignLong(I2O_EXEC_LCT_NOTIFY_MESSAGE,Message); 991 PI2O_EXEC_LCT_NOTIFY_MESSAGE Message_Ptr; 992 int s; 993 994 /* Send a blind LCT command to wait for the enableSys to complete */ 995 Message_Ptr = (PI2O_EXEC_LCT_NOTIFY_MESSAGE)ASR_fillMessage(Message, 996 sizeof(I2O_EXEC_LCT_NOTIFY_MESSAGE) - sizeof(I2O_SG_ELEMENT)); 997 I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame), 998 I2O_EXEC_LCT_NOTIFY); 999 I2O_EXEC_LCT_NOTIFY_MESSAGE_setClassIdentifier(Message_Ptr, 1000 I2O_CLASS_MATCH_ANYCLASS); 1001 (void)ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr); 1002 1003 s = splcam(); 1004 LIST_FOREACH(ccb, &(sc->ha_ccb), sim_links.le) { 1005 1006 ASR_ccbRemove (sc, (union asr_ccb *)ccb); 1007 1008 ccb->status &= ~CAM_STATUS_MASK; 1009 ccb->status |= CAM_REQUEUE_REQ; 1010 ((struct ccb_scsiio *)ccb)->resid 1011 = ((struct ccb_scsiio *)ccb)->dxfer_len; 1012 1013 if (ccb->path) { 1014 xpt_done ((union ccb *)ccb); 1015 } else { 1016 wakeup ((caddr_t)ccb); 1017 } 1018 } 1019 splx(s); 1020} /* ASR_failActiveCommands */ 1021 1022/* 1023 * The following command causes the HBA to reset the specific bus 1024 */ 1025STATIC INLINE void 1026ASR_resetBus( 1027 IN Asr_softc_t * sc, 1028 IN int bus) 1029{ 1030 defAlignLong(I2O_HBA_BUS_RESET_MESSAGE,Message); 1031 I2O_HBA_BUS_RESET_MESSAGE * Message_Ptr; 1032 PI2O_LCT_ENTRY Device; 1033 1034 Message_Ptr = (I2O_HBA_BUS_RESET_MESSAGE *)ASR_fillMessage(Message, 1035 sizeof(I2O_HBA_BUS_RESET_MESSAGE)); 1036 I2O_MESSAGE_FRAME_setFunction(&Message_Ptr->StdMessageFrame, 1037 I2O_HBA_BUS_RESET); 1038 for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY) 1039 (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT)); 1040 ++Device) { 1041 if (((Device->le_type & I2O_PORT) != 0) 1042 && (Device->le_bus == bus)) { 1043 I2O_MESSAGE_FRAME_setTargetAddress( 1044 &Message_Ptr->StdMessageFrame, 1045 I2O_LCT_ENTRY_getLocalTID(Device)); 1046 /* Asynchronous command, with no expectations */ 1047 (void)ASR_queue(sc, (PI2O_MESSAGE_FRAME)Message_Ptr); 1048 break; 1049 } 1050 } 1051} /* ASR_resetBus */ 1052 1053STATIC INLINE int 1054ASR_getBlinkLedCode ( 1055 IN Asr_softc_t * sc) 1056{ 1057 if ((sc != (Asr_softc_t *)NULL) 1058 && (sc->ha_blinkLED != (u_int8_t *)NULL) 1059 && (sc->ha_blinkLED[1] == 0xBC)) { 1060 return (sc->ha_blinkLED[0]); 1061 } 1062 return (0); 1063} /* ASR_getBlinkCode */ 1064 1065/* 1066 * Determine the address of an TID lookup. Must be done at high priority 1067 * since the address can be changed by other threads of execution. 1068 * 1069 * Returns NULL pointer if not indexible (but will attempt to generate 1070 * an index if `new_entry' flag is set to TRUE). 1071 * 1072 * All addressible entries are to be guaranteed zero if never initialized. 1073 */ 1074STATIC INLINE tid_t * 1075ASR_getTidAddress( 1076 INOUT Asr_softc_t * sc, 1077 IN int bus, 1078 IN int target, 1079 IN int lun, 1080 IN int new_entry) 1081{ 1082 target2lun_t * bus_ptr; 1083 lun2tid_t * target_ptr; 1084 unsigned new_size; 1085 1086 /* 1087 * Validity checking of incoming parameters. More of a bound 1088 * expansion limit than an issue with the code dealing with the 1089 * values. 1090 * 1091 * sc must be valid before it gets here, so that check could be 1092 * dropped if speed a critical issue. 1093 */ 1094 if ((sc == (Asr_softc_t *)NULL) 1095 || (bus > MAX_CHANNEL) 1096 || (target > sc->ha_MaxId) 1097 || (lun > sc->ha_MaxLun)) { 1098 debug_asr_printf("(%lx,%d,%d,%d) target out of range\n", 1099 (u_long)sc, bus, target, lun); 1100 return ((tid_t *)NULL); 1101 } 1102 /* 1103 * See if there is an associated bus list. 1104 * 1105 * for performance, allocate in size of BUS_CHUNK chunks. 1106 * BUS_CHUNK must be a power of two. This is to reduce 1107 * fragmentation effects on the allocations. 1108 */ 1109# define BUS_CHUNK 8 1110 new_size = ((target + BUS_CHUNK - 1) & ~(BUS_CHUNK - 1)); 1111 if ((bus_ptr = sc->ha_targets[bus]) == (target2lun_t *)NULL) { 1112 /* 1113 * Allocate a new structure? 1114 * Since one element in structure, the +1 1115 * needed for size has been abstracted. 1116 */ 1117 if ((new_entry == FALSE) 1118 || ((sc->ha_targets[bus] = bus_ptr = (target2lun_t *)malloc ( 1119 sizeof(*bus_ptr) + (sizeof(bus_ptr->LUN) * new_size), 1120 M_TEMP, M_WAITOK | M_ZERO)) 1121 == (target2lun_t *)NULL)) { 1122 debug_asr_printf("failed to allocate bus list\n"); 1123 return ((tid_t *)NULL); 1124 } 1125 bus_ptr->size = new_size + 1; 1126 } else if (bus_ptr->size <= new_size) { 1127 target2lun_t * new_bus_ptr; 1128 1129 /* 1130 * Reallocate a new structure? 1131 * Since one element in structure, the +1 1132 * needed for size has been abstracted. 1133 */ 1134 if ((new_entry == FALSE) 1135 || ((new_bus_ptr = (target2lun_t *)malloc ( 1136 sizeof(*bus_ptr) + (sizeof(bus_ptr->LUN) * new_size), 1137 M_TEMP, M_WAITOK | M_ZERO)) 1138 == (target2lun_t *)NULL)) { 1139 debug_asr_printf("failed to reallocate bus list\n"); 1140 return ((tid_t *)NULL); 1141 } 1142 /* 1143 * Copy the whole thing, safer, simpler coding 1144 * and not really performance critical at this point. 1145 */ 1146 bcopy (bus_ptr, new_bus_ptr, sizeof(*bus_ptr) 1147 + (sizeof(bus_ptr->LUN) * (bus_ptr->size - 1))); 1148 sc->ha_targets[bus] = new_bus_ptr; 1149 free (bus_ptr, M_TEMP); 1150 bus_ptr = new_bus_ptr; 1151 bus_ptr->size = new_size + 1; 1152 } 1153 /* 1154 * We now have the bus list, lets get to the target list. 1155 * Since most systems have only *one* lun, we do not allocate 1156 * in chunks as above, here we allow one, then in chunk sizes. 1157 * TARGET_CHUNK must be a power of two. This is to reduce 1158 * fragmentation effects on the allocations. 1159 */ 1160# define TARGET_CHUNK 8 1161 if ((new_size = lun) != 0) { 1162 new_size = ((lun + TARGET_CHUNK - 1) & ~(TARGET_CHUNK - 1)); 1163 } 1164 if ((target_ptr = bus_ptr->LUN[target]) == (lun2tid_t *)NULL) { 1165 /* 1166 * Allocate a new structure? 1167 * Since one element in structure, the +1 1168 * needed for size has been abstracted. 1169 */ 1170 if ((new_entry == FALSE) 1171 || ((bus_ptr->LUN[target] = target_ptr = (lun2tid_t *)malloc ( 1172 sizeof(*target_ptr) + (sizeof(target_ptr->TID) * new_size), 1173 M_TEMP, M_WAITOK | M_ZERO)) 1174 == (lun2tid_t *)NULL)) { 1175 debug_asr_printf("failed to allocate target list\n"); 1176 return ((tid_t *)NULL); 1177 } 1178 target_ptr->size = new_size + 1; 1179 } else if (target_ptr->size <= new_size) { 1180 lun2tid_t * new_target_ptr; 1181 1182 /* 1183 * Reallocate a new structure? 1184 * Since one element in structure, the +1 1185 * needed for size has been abstracted. 1186 */ 1187 if ((new_entry == FALSE) 1188 || ((new_target_ptr = (lun2tid_t *)malloc ( 1189 sizeof(*target_ptr) + (sizeof(target_ptr->TID) * new_size), 1190 M_TEMP, M_WAITOK | M_ZERO)) 1191 == (lun2tid_t *)NULL)) { 1192 debug_asr_printf("failed to reallocate target list\n"); 1193 return ((tid_t *)NULL); 1194 } 1195 /* 1196 * Copy the whole thing, safer, simpler coding 1197 * and not really performance critical at this point. 1198 */ 1199 bcopy (target_ptr, new_target_ptr, 1200 sizeof(*target_ptr) 1201 + (sizeof(target_ptr->TID) * (target_ptr->size - 1))); 1202 bus_ptr->LUN[target] = new_target_ptr; 1203 free (target_ptr, M_TEMP); 1204 target_ptr = new_target_ptr; 1205 target_ptr->size = new_size + 1; 1206 } 1207 /* 1208 * Now, acquire the TID address from the LUN indexed list. 1209 */ 1210 return (&(target_ptr->TID[lun])); 1211} /* ASR_getTidAddress */ 1212 1213/* 1214 * Get a pre-existing TID relationship. 1215 * 1216 * If the TID was never set, return (tid_t)-1. 1217 * 1218 * should use mutex rather than spl. 1219 */ 1220STATIC INLINE tid_t 1221ASR_getTid ( 1222 IN Asr_softc_t * sc, 1223 IN int bus, 1224 IN int target, 1225 IN int lun) 1226{ 1227 tid_t * tid_ptr; 1228 int s; 1229 OUT tid_t retval; 1230 1231 s = splcam(); 1232 if (((tid_ptr = ASR_getTidAddress (sc, bus, target, lun, FALSE)) 1233 == (tid_t *)NULL) 1234 /* (tid_t)0 or (tid_t)-1 indicate no TID */ 1235 || (*tid_ptr == (tid_t)0)) { 1236 splx(s); 1237 return ((tid_t)-1); 1238 } 1239 retval = *tid_ptr; 1240 splx(s); 1241 return (retval); 1242} /* ASR_getTid */ 1243 1244/* 1245 * Set a TID relationship. 1246 * 1247 * If the TID was not set, return (tid_t)-1. 1248 * 1249 * should use mutex rather than spl. 1250 */ 1251STATIC INLINE tid_t 1252ASR_setTid ( 1253 INOUT Asr_softc_t * sc, 1254 IN int bus, 1255 IN int target, 1256 IN int lun, 1257 INOUT tid_t TID) 1258{ 1259 tid_t * tid_ptr; 1260 int s; 1261 1262 if (TID != (tid_t)-1) { 1263 if (TID == 0) { 1264 return ((tid_t)-1); 1265 } 1266 s = splcam(); 1267 if ((tid_ptr = ASR_getTidAddress (sc, bus, target, lun, TRUE)) 1268 == (tid_t *)NULL) { 1269 splx(s); 1270 return ((tid_t)-1); 1271 } 1272 *tid_ptr = TID; 1273 splx(s); 1274 } 1275 return (TID); 1276} /* ASR_setTid */ 1277 1278/*-------------------------------------------------------------------------*/ 1279/* Function ASR_rescan */ 1280/*-------------------------------------------------------------------------*/ 1281/* The Parameters Passed To This Function Are : */ 1282/* Asr_softc_t * : HBA miniport driver's adapter data storage. */ 1283/* */ 1284/* This Function Will rescan the adapter and resynchronize any data */ 1285/* */ 1286/* Return : 0 For OK, Error Code Otherwise */ 1287/*-------------------------------------------------------------------------*/ 1288 1289STATIC INLINE int 1290ASR_rescan( 1291 IN Asr_softc_t * sc) 1292{ 1293 int bus; 1294 OUT int error; 1295 1296 /* 1297 * Re-acquire the LCT table and synchronize us to the adapter. 1298 */ 1299 if ((error = ASR_acquireLct(sc)) == 0) { 1300 error = ASR_acquireHrt(sc); 1301 } 1302 1303 if (error != 0) { 1304 return error; 1305 } 1306 1307 bus = sc->ha_MaxBus; 1308 /* Reset all existing cached TID lookups */ 1309 do { 1310 int target; 1311 1312 /* 1313 * Scan for all targets on this bus to see if they 1314 * got affected by the rescan. 1315 */ 1316 for (target = 0; target <= sc->ha_MaxId; ++target) { 1317 int lun; 1318 1319 for (lun = 0; lun <= sc->ha_MaxLun; ++lun) { 1320 PI2O_LCT_ENTRY Device; 1321 tid_t TID = (tid_t)-1; 1322 1323 /* 1324 * See if the cached TID changed. Search for 1325 * the device in our new LCT. 1326 */ 1327 for (Device = sc->ha_LCT->LCTEntry; 1328 Device < (PI2O_LCT_ENTRY)(((U32 *)sc->ha_LCT) 1329 + I2O_LCT_getTableSize(sc->ha_LCT)); 1330 ++Device) { 1331 if ((Device->le_type != I2O_UNKNOWN) 1332 && (Device->le_bus == bus) 1333 && (Device->le_target == target) 1334 && (Device->le_lun == lun) 1335 && (I2O_LCT_ENTRY_getUserTID(Device) 1336 == 0xFFF)) { 1337 TID = I2O_LCT_ENTRY_getLocalTID( 1338 Device); 1339 break; 1340 } 1341 } 1342 /* 1343 * Indicate to the OS that the label needs 1344 * to be recalculated, or that the specific 1345 * open device is no longer valid (Merde) 1346 * because the cached TID changed. 1347 * ASR_getTid (sc, bus, target, lun) != TI 1348 */ 1349 /* 1350 * We have the option of clearing the 1351 * cached TID for it to be rescanned, or to 1352 * set it now even if the device never got 1353 * accessed. We chose the later since we 1354 * currently do not use the condition that 1355 * the TID ever got cached. 1356 */ 1357 ASR_setTid (sc, bus, target, lun, TID); 1358 } 1359 } 1360 } while (--bus >= 0); 1361 return (error); 1362} /* ASR_rescan */ 1363 1364/*-------------------------------------------------------------------------*/ 1365/* Function ASR_reset */ 1366/*-------------------------------------------------------------------------*/ 1367/* The Parameters Passed To This Function Are : */ 1368/* Asr_softc_t * : HBA miniport driver's adapter data storage. */ 1369/* */ 1370/* This Function Will reset the adapter and resynchronize any data */ 1371/* */ 1372/* Return : None */ 1373/*-------------------------------------------------------------------------*/ 1374 1375STATIC INLINE void 1376ASR_reset( 1377 IN Asr_softc_t * sc) 1378{ 1379 (void)ASR_resetIOP (sc->ha_Virt); 1380 (void)ASR_init (sc); 1381 (void)ASR_rescan (sc); 1382 (void)ASR_failActiveCommands (sc); 1383} /* ASR_reset */ 1384 1385/* 1386 * Device timeout handler. 1387 */ 1388STATIC void 1389asr_timeout( 1390 INOUT void * arg) 1391{ 1392 union asr_ccb * ccb = (union asr_ccb *)arg; 1393 Asr_softc_t * sc = (Asr_softc_t *)(ccb->ccb_h.spriv_ptr0); 1394 int s; 1395 1396 debug_asr_print_path(ccb); 1397 debug_asr_printf("timed out"); 1398 1399 /* 1400 * Check if the adapter has locked up? 1401 */ 1402 if ((s = ASR_getBlinkLedCode(sc)) != 0) { 1403 debug_asr_printf ( 1404 " due to adapter blinkled code %x\nresetting adapter\n", s); 1405 ASR_reset (sc); 1406 return; 1407 } 1408 /* 1409 * Abort does not function on the ASR card!!! Walking away from 1410 * the SCSI command is also *very* dangerous. A SCSI BUS reset is 1411 * our best bet, followed by a complete adapter reset if that fails. 1412 */ 1413 s = splcam(); 1414 if (ccb->ccb_h.status == CAM_CMD_TIMEOUT) { 1415 debug_asr_printf (" AGAIN\nreinitializing adapter\n"); 1416 ASR_reset (sc); 1417 splx(s); 1418 return; 1419 } 1420 debug_asr_printf ("\nresetting bus\n"); 1421 /* If the BUS reset does not take, then an adapter reset is next! */ 1422 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 1423 ccb->ccb_h.status |= CAM_CMD_TIMEOUT; 1424 ccb->ccb_h.timeout_ch = timeout(asr_timeout, (caddr_t)ccb, 1425 (ccb->ccb_h.timeout * hz) / 1000); 1426 ASR_resetBus (sc, cam_sim_bus(xpt_path_sim(ccb->ccb_h.path))); 1427 splx(s); 1428} /* asr_timeout */ 1429 1430/* 1431 * send a message asynchronously 1432 */ 1433STATIC INLINE int 1434ASR_queue( 1435 IN Asr_softc_t * sc, 1436 IN PI2O_MESSAGE_FRAME Message) 1437{ 1438 OUT U32 MessageOffset; 1439 union asr_ccb * ccb; 1440 1441 debug_asr_printf ("Host Command Dump:\n"); 1442 debug_asr_dump_message (Message); 1443 1444 /* 1445 * Limit the number of Messages sent to this HBA. Better to sleep, 1446 * than to hardware loop like a nut! By limiting the number of 1447 * messages to an individual HBA here, we manage to perform all 1448 * the processing of the message ready to drop the next one into 1449 * the controller. We could limit the messages we are allowed to 1450 * take, but that may have a performance hit. 1451 */ 1452 ccb = (union asr_ccb *)(long) 1453 I2O_MESSAGE_FRAME_getInitiatorContext64(Message); 1454 1455 if (((MessageOffset = ASR_getMessage(sc->ha_Virt)) != EMPTY_QUEUE) 1456 || ((MessageOffset = ASR_getMessage(sc->ha_Virt)) != EMPTY_QUEUE)) { 1457#ifdef ASR_MEASURE_PERFORMANCE 1458 int startTimeIndex; 1459 1460 if (ccb) { 1461 ++sc->ha_performance.command_count[ 1462 (int) ccb->csio.cdb_io.cdb_bytes[0]]; 1463 DEQ_TIMEQ_FREE_LIST(startTimeIndex, 1464 sc->ha_timeQFreeList, 1465 sc->ha_timeQFreeHead, 1466 sc->ha_timeQFreeTail); 1467 if (-1 != startTimeIndex) { 1468 microtime(&sc->ha_timeQ[startTimeIndex]); 1469 } 1470 /* Time stamp the command before we send it out */ 1471 ((PRIVATE_SCSI_SCB_EXECUTE_MESSAGE *) Message)-> 1472 PrivateMessageFrame.TransactionContext 1473 = (I2O_TRANSACTION_CONTEXT) startTimeIndex; 1474 1475 ++sc->ha_submitted_ccbs_count; 1476 if (sc->ha_performance.max_submit_count 1477 < sc->ha_submitted_ccbs_count) { 1478 sc->ha_performance.max_submit_count 1479 = sc->ha_submitted_ccbs_count; 1480 } 1481 } 1482#endif 1483 bcopy (Message, sc->ha_Virt->Address + MessageOffset, 1484 I2O_MESSAGE_FRAME_getMessageSize(Message) << 2); 1485 if (ccb) { 1486 ASR_ccbAdd (sc, ccb); 1487 } 1488 /* Post the command */ 1489 sc->ha_Virt->ToFIFO = MessageOffset; 1490 } else { 1491 if (ASR_getBlinkLedCode(sc)) { 1492 ASR_reset (sc); 1493 } 1494 } 1495 return (MessageOffset); 1496} /* ASR_queue */ 1497 1498 1499/* Simple Scatter Gather elements */ 1500#define SG(SGL,Index,Flags,Buffer,Size) \ 1501 I2O_FLAGS_COUNT_setCount( \ 1502 &(((PI2O_SG_ELEMENT)(SGL))->u.Simple[Index].FlagsCount), \ 1503 Size); \ 1504 I2O_FLAGS_COUNT_setFlags( \ 1505 &(((PI2O_SG_ELEMENT)(SGL))->u.Simple[Index].FlagsCount), \ 1506 I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT | (Flags)); \ 1507 I2O_SGE_SIMPLE_ELEMENT_setPhysicalAddress( \ 1508 &(((PI2O_SG_ELEMENT)(SGL))->u.Simple[Index]), \ 1509 (Buffer == NULL) ? NULL : KVTOPHYS(Buffer)) 1510 1511/* 1512 * Retrieve Parameter Group. 1513 * Buffer must be allocated using defAlignLong macro. 1514 */ 1515STATIC void * 1516ASR_getParams( 1517 IN Asr_softc_t * sc, 1518 IN tid_t TID, 1519 IN int Group, 1520 OUT void * Buffer, 1521 IN unsigned BufferSize) 1522{ 1523 struct paramGetMessage { 1524 I2O_UTIL_PARAMS_GET_MESSAGE M; 1525 char F[ 1526 sizeof(I2O_SGE_SIMPLE_ELEMENT)*2 - sizeof(I2O_SG_ELEMENT)]; 1527 struct Operations { 1528 I2O_PARAM_OPERATIONS_LIST_HEADER Header; 1529 I2O_PARAM_OPERATION_ALL_TEMPLATE Template[1]; 1530 } O; 1531 }; 1532 defAlignLong(struct paramGetMessage, Message); 1533 struct Operations * Operations_Ptr; 1534 I2O_UTIL_PARAMS_GET_MESSAGE * Message_Ptr; 1535 struct ParamBuffer { 1536 I2O_PARAM_RESULTS_LIST_HEADER Header; 1537 I2O_PARAM_READ_OPERATION_RESULT Read; 1538 char Info[1]; 1539 } * Buffer_Ptr; 1540 1541 Message_Ptr = (I2O_UTIL_PARAMS_GET_MESSAGE *)ASR_fillMessage(Message, 1542 sizeof(I2O_UTIL_PARAMS_GET_MESSAGE) 1543 + sizeof(I2O_SGE_SIMPLE_ELEMENT)*2 - sizeof(I2O_SG_ELEMENT)); 1544 Operations_Ptr = (struct Operations *)((char *)Message_Ptr 1545 + sizeof(I2O_UTIL_PARAMS_GET_MESSAGE) 1546 + sizeof(I2O_SGE_SIMPLE_ELEMENT)*2 - sizeof(I2O_SG_ELEMENT)); 1547 bzero ((void *)Operations_Ptr, sizeof(struct Operations)); 1548 I2O_PARAM_OPERATIONS_LIST_HEADER_setOperationCount( 1549 &(Operations_Ptr->Header), 1); 1550 I2O_PARAM_OPERATION_ALL_TEMPLATE_setOperation( 1551 &(Operations_Ptr->Template[0]), I2O_PARAMS_OPERATION_FIELD_GET); 1552 I2O_PARAM_OPERATION_ALL_TEMPLATE_setFieldCount( 1553 &(Operations_Ptr->Template[0]), 0xFFFF); 1554 I2O_PARAM_OPERATION_ALL_TEMPLATE_setGroupNumber( 1555 &(Operations_Ptr->Template[0]), Group); 1556 bzero ((void *)(Buffer_Ptr = getAlignLong(struct ParamBuffer, Buffer)), 1557 BufferSize); 1558 1559 I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame), 1560 I2O_VERSION_11 1561 + (((sizeof(I2O_UTIL_PARAMS_GET_MESSAGE) - sizeof(I2O_SG_ELEMENT)) 1562 / sizeof(U32)) << 4)); 1563 I2O_MESSAGE_FRAME_setTargetAddress (&(Message_Ptr->StdMessageFrame), 1564 TID); 1565 I2O_MESSAGE_FRAME_setFunction (&(Message_Ptr->StdMessageFrame), 1566 I2O_UTIL_PARAMS_GET); 1567 /* 1568 * Set up the buffers as scatter gather elements. 1569 */ 1570 SG(&(Message_Ptr->SGL), 0, 1571 I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_END_OF_BUFFER, 1572 Operations_Ptr, sizeof(struct Operations)); 1573 SG(&(Message_Ptr->SGL), 1, 1574 I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER, 1575 Buffer_Ptr, BufferSize); 1576 1577 if ((ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr) == CAM_REQ_CMP) 1578 && (Buffer_Ptr->Header.ResultCount)) { 1579 return ((void *)(Buffer_Ptr->Info)); 1580 } 1581 return ((void *)NULL); 1582} /* ASR_getParams */ 1583 1584/* 1585 * Acquire the LCT information. 1586 */ 1587STATIC INLINE int 1588ASR_acquireLct ( 1589 INOUT Asr_softc_t * sc) 1590{ 1591 PI2O_EXEC_LCT_NOTIFY_MESSAGE Message_Ptr; 1592 PI2O_SGE_SIMPLE_ELEMENT sg; 1593 int MessageSizeInBytes; 1594 caddr_t v; 1595 int len; 1596 I2O_LCT Table; 1597 PI2O_LCT_ENTRY Entry; 1598 1599 /* 1600 * sc value assumed valid 1601 */ 1602 MessageSizeInBytes = sizeof(I2O_EXEC_LCT_NOTIFY_MESSAGE) 1603 - sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT); 1604 if ((Message_Ptr = (PI2O_EXEC_LCT_NOTIFY_MESSAGE)malloc ( 1605 MessageSizeInBytes, M_TEMP, M_WAITOK)) 1606 == (PI2O_EXEC_LCT_NOTIFY_MESSAGE)NULL) { 1607 return (ENOMEM); 1608 } 1609 (void)ASR_fillMessage((char *)Message_Ptr, MessageSizeInBytes); 1610 I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame), 1611 (I2O_VERSION_11 + 1612 (((sizeof(I2O_EXEC_LCT_NOTIFY_MESSAGE) - sizeof(I2O_SG_ELEMENT)) 1613 / sizeof(U32)) << 4))); 1614 I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame), 1615 I2O_EXEC_LCT_NOTIFY); 1616 I2O_EXEC_LCT_NOTIFY_MESSAGE_setClassIdentifier(Message_Ptr, 1617 I2O_CLASS_MATCH_ANYCLASS); 1618 /* 1619 * Call the LCT table to determine the number of device entries 1620 * to reserve space for. 1621 */ 1622 SG(&(Message_Ptr->SGL), 0, 1623 I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER, &Table, 1624 sizeof(I2O_LCT)); 1625 /* 1626 * since this code is reused in several systems, code efficiency 1627 * is greater by using a shift operation rather than a divide by 1628 * sizeof(u_int32_t). 1629 */ 1630 I2O_LCT_setTableSize(&Table, 1631 (sizeof(I2O_LCT) - sizeof(I2O_LCT_ENTRY)) >> 2); 1632 (void)ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr); 1633 /* 1634 * Determine the size of the LCT table. 1635 */ 1636 if (sc->ha_LCT) { 1637 free (sc->ha_LCT, M_TEMP); 1638 } 1639 /* 1640 * malloc only generates contiguous memory when less than a 1641 * page is expected. We must break the request up into an SG list ... 1642 */ 1643 if (((len = (I2O_LCT_getTableSize(&Table) << 2)) <= 1644 (sizeof(I2O_LCT) - sizeof(I2O_LCT_ENTRY))) 1645 || (len > (128 * 1024))) { /* Arbitrary */ 1646 free (Message_Ptr, M_TEMP); 1647 return (EINVAL); 1648 } 1649 if ((sc->ha_LCT = (PI2O_LCT)malloc (len, M_TEMP, M_WAITOK)) 1650 == (PI2O_LCT)NULL) { 1651 free (Message_Ptr, M_TEMP); 1652 return (ENOMEM); 1653 } 1654 /* 1655 * since this code is reused in several systems, code efficiency 1656 * is greater by using a shift operation rather than a divide by 1657 * sizeof(u_int32_t). 1658 */ 1659 I2O_LCT_setTableSize(sc->ha_LCT, 1660 (sizeof(I2O_LCT) - sizeof(I2O_LCT_ENTRY)) >> 2); 1661 /* 1662 * Convert the access to the LCT table into a SG list. 1663 */ 1664 sg = Message_Ptr->SGL.u.Simple; 1665 v = (caddr_t)(sc->ha_LCT); 1666 for (;;) { 1667 int next, base, span; 1668 1669 span = 0; 1670 next = base = KVTOPHYS(v); 1671 I2O_SGE_SIMPLE_ELEMENT_setPhysicalAddress(sg, base); 1672 1673 /* How far can we go contiguously */ 1674 while ((len > 0) && (base == next)) { 1675 int size; 1676 1677 next = trunc_page(base) + PAGE_SIZE; 1678 size = next - base; 1679 if (size > len) { 1680 size = len; 1681 } 1682 span += size; 1683 v += size; 1684 len -= size; 1685 base = KVTOPHYS(v); 1686 } 1687 1688 /* Construct the Flags */ 1689 I2O_FLAGS_COUNT_setCount(&(sg->FlagsCount), span); 1690 { 1691 int rw = I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT; 1692 if (len <= 0) { 1693 rw = (I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT 1694 | I2O_SGL_FLAGS_LAST_ELEMENT 1695 | I2O_SGL_FLAGS_END_OF_BUFFER); 1696 } 1697 I2O_FLAGS_COUNT_setFlags(&(sg->FlagsCount), rw); 1698 } 1699 1700 if (len <= 0) { 1701 break; 1702 } 1703 1704 /* 1705 * Incrementing requires resizing of the packet. 1706 */ 1707 ++sg; 1708 MessageSizeInBytes += sizeof(*sg); 1709 I2O_MESSAGE_FRAME_setMessageSize( 1710 &(Message_Ptr->StdMessageFrame), 1711 I2O_MESSAGE_FRAME_getMessageSize( 1712 &(Message_Ptr->StdMessageFrame)) 1713 + (sizeof(*sg) / sizeof(U32))); 1714 { 1715 PI2O_EXEC_LCT_NOTIFY_MESSAGE NewMessage_Ptr; 1716 1717 if ((NewMessage_Ptr = (PI2O_EXEC_LCT_NOTIFY_MESSAGE) 1718 malloc (MessageSizeInBytes, M_TEMP, M_WAITOK)) 1719 == (PI2O_EXEC_LCT_NOTIFY_MESSAGE)NULL) { 1720 free (sc->ha_LCT, M_TEMP); 1721 sc->ha_LCT = (PI2O_LCT)NULL; 1722 free (Message_Ptr, M_TEMP); 1723 return (ENOMEM); 1724 } 1725 span = ((caddr_t)sg) - (caddr_t)Message_Ptr; 1726 bcopy ((caddr_t)Message_Ptr, 1727 (caddr_t)NewMessage_Ptr, span); 1728 free (Message_Ptr, M_TEMP); 1729 sg = (PI2O_SGE_SIMPLE_ELEMENT) 1730 (((caddr_t)NewMessage_Ptr) + span); 1731 Message_Ptr = NewMessage_Ptr; 1732 } 1733 } 1734 { int retval; 1735 1736 retval = ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr); 1737 free (Message_Ptr, M_TEMP); 1738 if (retval != CAM_REQ_CMP) { 1739 return (ENODEV); 1740 } 1741 } 1742 /* If the LCT table grew, lets truncate accesses */ 1743 if (I2O_LCT_getTableSize(&Table) < I2O_LCT_getTableSize(sc->ha_LCT)) { 1744 I2O_LCT_setTableSize(sc->ha_LCT, I2O_LCT_getTableSize(&Table)); 1745 } 1746 for (Entry = sc->ha_LCT->LCTEntry; Entry < (PI2O_LCT_ENTRY) 1747 (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT)); 1748 ++Entry) { 1749 Entry->le_type = I2O_UNKNOWN; 1750 switch (I2O_CLASS_ID_getClass(&(Entry->ClassID))) { 1751 1752 case I2O_CLASS_RANDOM_BLOCK_STORAGE: 1753 Entry->le_type = I2O_BSA; 1754 break; 1755 1756 case I2O_CLASS_SCSI_PERIPHERAL: 1757 Entry->le_type = I2O_SCSI; 1758 break; 1759 1760 case I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL: 1761 Entry->le_type = I2O_FCA; 1762 break; 1763 1764 case I2O_CLASS_BUS_ADAPTER_PORT: 1765 Entry->le_type = I2O_PORT | I2O_SCSI; 1766 /* FALLTHRU */ 1767 case I2O_CLASS_FIBRE_CHANNEL_PORT: 1768 if (I2O_CLASS_ID_getClass(&(Entry->ClassID)) == 1769 I2O_CLASS_FIBRE_CHANNEL_PORT) { 1770 Entry->le_type = I2O_PORT | I2O_FCA; 1771 } 1772 { struct ControllerInfo { 1773 I2O_PARAM_RESULTS_LIST_HEADER Header; 1774 I2O_PARAM_READ_OPERATION_RESULT Read; 1775 I2O_HBA_SCSI_CONTROLLER_INFO_SCALAR Info; 1776 }; 1777 defAlignLong(struct ControllerInfo, Buffer); 1778 PI2O_HBA_SCSI_CONTROLLER_INFO_SCALAR Info; 1779 1780 Entry->le_bus = 0xff; 1781 Entry->le_target = 0xff; 1782 Entry->le_lun = 0xff; 1783 1784 if ((Info = (PI2O_HBA_SCSI_CONTROLLER_INFO_SCALAR) 1785 ASR_getParams(sc, 1786 I2O_LCT_ENTRY_getLocalTID(Entry), 1787 I2O_HBA_SCSI_CONTROLLER_INFO_GROUP_NO, 1788 Buffer, sizeof(struct ControllerInfo))) 1789 == (PI2O_HBA_SCSI_CONTROLLER_INFO_SCALAR)NULL) { 1790 continue; 1791 } 1792 Entry->le_target 1793 = I2O_HBA_SCSI_CONTROLLER_INFO_SCALAR_getInitiatorID( 1794 Info); 1795 Entry->le_lun = 0; 1796 } /* FALLTHRU */ 1797 default: 1798 continue; 1799 } 1800 { struct DeviceInfo { 1801 I2O_PARAM_RESULTS_LIST_HEADER Header; 1802 I2O_PARAM_READ_OPERATION_RESULT Read; 1803 I2O_DPT_DEVICE_INFO_SCALAR Info; 1804 }; 1805 defAlignLong (struct DeviceInfo, Buffer); 1806 PI2O_DPT_DEVICE_INFO_SCALAR Info; 1807 1808 Entry->le_bus = 0xff; 1809 Entry->le_target = 0xff; 1810 Entry->le_lun = 0xff; 1811 1812 if ((Info = (PI2O_DPT_DEVICE_INFO_SCALAR) 1813 ASR_getParams(sc, 1814 I2O_LCT_ENTRY_getLocalTID(Entry), 1815 I2O_DPT_DEVICE_INFO_GROUP_NO, 1816 Buffer, sizeof(struct DeviceInfo))) 1817 == (PI2O_DPT_DEVICE_INFO_SCALAR)NULL) { 1818 continue; 1819 } 1820 Entry->le_type 1821 |= I2O_DPT_DEVICE_INFO_SCALAR_getDeviceType(Info); 1822 Entry->le_bus 1823 = I2O_DPT_DEVICE_INFO_SCALAR_getBus(Info); 1824 if ((Entry->le_bus > sc->ha_MaxBus) 1825 && (Entry->le_bus <= MAX_CHANNEL)) { 1826 sc->ha_MaxBus = Entry->le_bus; 1827 } 1828 Entry->le_target 1829 = I2O_DPT_DEVICE_INFO_SCALAR_getIdentifier(Info); 1830 Entry->le_lun 1831 = I2O_DPT_DEVICE_INFO_SCALAR_getLunInfo(Info); 1832 } 1833 } 1834 /* 1835 * A zero return value indicates success. 1836 */ 1837 return (0); 1838} /* ASR_acquireLct */ 1839 1840/* 1841 * Initialize a message frame. 1842 * We assume that the CDB has already been set up, so all we do here is 1843 * generate the Scatter Gather list. 1844 */ 1845STATIC INLINE PI2O_MESSAGE_FRAME 1846ASR_init_message( 1847 IN union asr_ccb * ccb, 1848 OUT PI2O_MESSAGE_FRAME Message) 1849{ 1850 int next, span, base, rw; 1851 OUT PI2O_MESSAGE_FRAME Message_Ptr; 1852 Asr_softc_t * sc = (Asr_softc_t *)(ccb->ccb_h.spriv_ptr0); 1853 PI2O_SGE_SIMPLE_ELEMENT sg; 1854 caddr_t v; 1855 vm_size_t size, len; 1856 U32 MessageSize; 1857 1858 /* We only need to zero out the PRIVATE_SCSI_SCB_EXECUTE_MESSAGE */ 1859 bzero (Message_Ptr = getAlignLong(I2O_MESSAGE_FRAME, Message), 1860 (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) - sizeof(I2O_SG_ELEMENT))); 1861 1862 { 1863 int target = ccb->ccb_h.target_id; 1864 int lun = ccb->ccb_h.target_lun; 1865 int bus = cam_sim_bus(xpt_path_sim(ccb->ccb_h.path)); 1866 tid_t TID; 1867 1868 if ((TID = ASR_getTid (sc, bus, target, lun)) == (tid_t)-1) { 1869 PI2O_LCT_ENTRY Device; 1870 1871 TID = (tid_t)0; 1872 for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY) 1873 (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT)); 1874 ++Device) { 1875 if ((Device->le_type != I2O_UNKNOWN) 1876 && (Device->le_bus == bus) 1877 && (Device->le_target == target) 1878 && (Device->le_lun == lun) 1879 && (I2O_LCT_ENTRY_getUserTID(Device) == 0xFFF)) { 1880 TID = I2O_LCT_ENTRY_getLocalTID(Device); 1881 ASR_setTid (sc, Device->le_bus, 1882 Device->le_target, Device->le_lun, 1883 TID); 1884 break; 1885 } 1886 } 1887 } 1888 if (TID == (tid_t)0) { 1889 return ((PI2O_MESSAGE_FRAME)NULL); 1890 } 1891 I2O_MESSAGE_FRAME_setTargetAddress(Message_Ptr, TID); 1892 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setTID( 1893 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr, TID); 1894 } 1895 I2O_MESSAGE_FRAME_setVersionOffset(Message_Ptr, I2O_VERSION_11 | 1896 (((sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) - sizeof(I2O_SG_ELEMENT)) 1897 / sizeof(U32)) << 4)); 1898 I2O_MESSAGE_FRAME_setMessageSize(Message_Ptr, 1899 (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) 1900 - sizeof(I2O_SG_ELEMENT)) / sizeof(U32)); 1901 I2O_MESSAGE_FRAME_setInitiatorAddress (Message_Ptr, 1); 1902 I2O_MESSAGE_FRAME_setFunction(Message_Ptr, I2O_PRIVATE_MESSAGE); 1903 I2O_PRIVATE_MESSAGE_FRAME_setXFunctionCode ( 1904 (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr, I2O_SCSI_SCB_EXEC); 1905 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags ( 1906 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr, 1907 I2O_SCB_FLAG_ENABLE_DISCONNECT 1908 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG 1909 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER); 1910 /* 1911 * We do not need any (optional byteswapping) method access to 1912 * the Initiator & Transaction context field. 1913 */ 1914 I2O_MESSAGE_FRAME_setInitiatorContext64(Message, (long)ccb); 1915 1916 I2O_PRIVATE_MESSAGE_FRAME_setOrganizationID( 1917 (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr, DPT_ORGANIZATION_ID); 1918 /* 1919 * copy the cdb over 1920 */ 1921 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setCDBLength( 1922 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr, ccb->csio.cdb_len); 1923 bcopy (&(ccb->csio.cdb_io), 1924 ((PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr)->CDB, ccb->csio.cdb_len); 1925 1926 /* 1927 * Given a buffer describing a transfer, set up a scatter/gather map 1928 * in a ccb to map that SCSI transfer. 1929 */ 1930 1931 rw = (ccb->ccb_h.flags & CAM_DIR_IN) ? 0 : I2O_SGL_FLAGS_DIR; 1932 1933 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags ( 1934 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr, 1935 (ccb->csio.dxfer_len) 1936 ? ((rw) ? (I2O_SCB_FLAG_XFER_TO_DEVICE 1937 | I2O_SCB_FLAG_ENABLE_DISCONNECT 1938 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG 1939 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER) 1940 : (I2O_SCB_FLAG_XFER_FROM_DEVICE 1941 | I2O_SCB_FLAG_ENABLE_DISCONNECT 1942 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG 1943 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER)) 1944 : (I2O_SCB_FLAG_ENABLE_DISCONNECT 1945 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG 1946 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER)); 1947 1948 /* 1949 * Given a transfer described by a `data', fill in the SG list. 1950 */ 1951 sg = &((PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr)->SGL.u.Simple[0]; 1952 1953 len = ccb->csio.dxfer_len; 1954 v = ccb->csio.data_ptr; 1955 ASSERT (ccb->csio.dxfer_len >= 0); 1956 MessageSize = I2O_MESSAGE_FRAME_getMessageSize(Message_Ptr); 1957 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setByteCount( 1958 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr, len); 1959 while ((len > 0) && (sg < &((PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE) 1960 Message_Ptr)->SGL.u.Simple[SG_SIZE])) { 1961 span = 0; 1962 next = base = KVTOPHYS(v); 1963 I2O_SGE_SIMPLE_ELEMENT_setPhysicalAddress(sg, base); 1964 1965 /* How far can we go contiguously */ 1966 while ((len > 0) && (base == next)) { 1967 next = trunc_page(base) + PAGE_SIZE; 1968 size = next - base; 1969 if (size > len) { 1970 size = len; 1971 } 1972 span += size; 1973 v += size; 1974 len -= size; 1975 base = KVTOPHYS(v); 1976 } 1977 1978 I2O_FLAGS_COUNT_setCount(&(sg->FlagsCount), span); 1979 if (len == 0) { 1980 rw |= I2O_SGL_FLAGS_LAST_ELEMENT; 1981 } 1982 I2O_FLAGS_COUNT_setFlags(&(sg->FlagsCount), 1983 I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT | rw); 1984 ++sg; 1985 MessageSize += sizeof(*sg) / sizeof(U32); 1986 } 1987 /* We always do the request sense ... */ 1988 if ((span = ccb->csio.sense_len) == 0) { 1989 span = sizeof(ccb->csio.sense_data); 1990 } 1991 SG(sg, 0, I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER, 1992 &(ccb->csio.sense_data), span); 1993 I2O_MESSAGE_FRAME_setMessageSize(Message_Ptr, 1994 MessageSize + (sizeof(*sg) / sizeof(U32))); 1995 return (Message_Ptr); 1996} /* ASR_init_message */ 1997 1998/* 1999 * Reset the adapter. 2000 */ 2001STATIC INLINE U32 2002ASR_initOutBound ( 2003 INOUT Asr_softc_t * sc) 2004{ 2005 struct initOutBoundMessage { 2006 I2O_EXEC_OUTBOUND_INIT_MESSAGE M; 2007 U32 R; 2008 }; 2009 defAlignLong(struct initOutBoundMessage,Message); 2010 PI2O_EXEC_OUTBOUND_INIT_MESSAGE Message_Ptr; 2011 OUT U32 * volatile Reply_Ptr; 2012 U32 Old; 2013 2014 /* 2015 * Build up our copy of the Message. 2016 */ 2017 Message_Ptr = (PI2O_EXEC_OUTBOUND_INIT_MESSAGE)ASR_fillMessage(Message, 2018 sizeof(I2O_EXEC_OUTBOUND_INIT_MESSAGE)); 2019 I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame), 2020 I2O_EXEC_OUTBOUND_INIT); 2021 I2O_EXEC_OUTBOUND_INIT_MESSAGE_setHostPageFrameSize(Message_Ptr, PAGE_SIZE); 2022 I2O_EXEC_OUTBOUND_INIT_MESSAGE_setOutboundMFrameSize(Message_Ptr, 2023 sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)); 2024 /* 2025 * Reset the Reply Status 2026 */ 2027 *(Reply_Ptr = (U32 *)((char *)Message_Ptr 2028 + sizeof(I2O_EXEC_OUTBOUND_INIT_MESSAGE))) = 0; 2029 SG (&(Message_Ptr->SGL), 0, I2O_SGL_FLAGS_LAST_ELEMENT, Reply_Ptr, 2030 sizeof(U32)); 2031 /* 2032 * Send the Message out 2033 */ 2034 if ((Old = ASR_initiateCp (sc->ha_Virt, (PI2O_MESSAGE_FRAME)Message_Ptr)) != (U32)-1L) { 2035 u_long size, addr; 2036 2037 /* 2038 * Wait for a response (Poll). 2039 */ 2040 while (*Reply_Ptr < I2O_EXEC_OUTBOUND_INIT_REJECTED); 2041 /* 2042 * Re-enable the interrupts. 2043 */ 2044 sc->ha_Virt->Mask = Old; 2045 /* 2046 * Populate the outbound table. 2047 */ 2048 if (sc->ha_Msgs == (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)NULL) { 2049 2050 /* Allocate the reply frames */ 2051 size = sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME) 2052 * sc->ha_Msgs_Count; 2053 2054 /* 2055 * contigmalloc only works reliably at 2056 * initialization time. 2057 */ 2058 if ((sc->ha_Msgs = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME) 2059 contigmalloc (size, M_DEVBUF, M_WAITOK, 0ul, 2060 0xFFFFFFFFul, (u_long)sizeof(U32), 0ul)) 2061 != (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)NULL) { 2062 (void)bzero ((char *)sc->ha_Msgs, size); 2063 sc->ha_Msgs_Phys = KVTOPHYS(sc->ha_Msgs); 2064 } 2065 } 2066 2067 /* Initialize the outbound FIFO */ 2068 if (sc->ha_Msgs != (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)NULL) 2069 for (size = sc->ha_Msgs_Count, addr = sc->ha_Msgs_Phys; 2070 size; --size) { 2071 sc->ha_Virt->FromFIFO = addr; 2072 addr += sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME); 2073 } 2074 return (*Reply_Ptr); 2075 } 2076 return (0); 2077} /* ASR_initOutBound */ 2078 2079/* 2080 * Set the system table 2081 */ 2082STATIC INLINE int 2083ASR_setSysTab( 2084 IN Asr_softc_t * sc) 2085{ 2086 PI2O_EXEC_SYS_TAB_SET_MESSAGE Message_Ptr; 2087 PI2O_SET_SYSTAB_HEADER SystemTable; 2088 Asr_softc_t * ha; 2089 PI2O_SGE_SIMPLE_ELEMENT sg; 2090 int retVal; 2091 2092 if ((SystemTable = (PI2O_SET_SYSTAB_HEADER)malloc ( 2093 sizeof(I2O_SET_SYSTAB_HEADER), M_TEMP, M_WAITOK | M_ZERO)) 2094 == (PI2O_SET_SYSTAB_HEADER)NULL) { 2095 return (ENOMEM); 2096 } 2097 for (ha = Asr_softc; ha; ha = ha->ha_next) { 2098 ++SystemTable->NumberEntries; 2099 } 2100 if ((Message_Ptr = (PI2O_EXEC_SYS_TAB_SET_MESSAGE)malloc ( 2101 sizeof(I2O_EXEC_SYS_TAB_SET_MESSAGE) - sizeof(I2O_SG_ELEMENT) 2102 + ((3+SystemTable->NumberEntries) * sizeof(I2O_SGE_SIMPLE_ELEMENT)), 2103 M_TEMP, M_WAITOK)) == (PI2O_EXEC_SYS_TAB_SET_MESSAGE)NULL) { 2104 free (SystemTable, M_TEMP); 2105 return (ENOMEM); 2106 } 2107 (void)ASR_fillMessage((char *)Message_Ptr, 2108 sizeof(I2O_EXEC_SYS_TAB_SET_MESSAGE) - sizeof(I2O_SG_ELEMENT) 2109 + ((3+SystemTable->NumberEntries) * sizeof(I2O_SGE_SIMPLE_ELEMENT))); 2110 I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame), 2111 (I2O_VERSION_11 + 2112 (((sizeof(I2O_EXEC_SYS_TAB_SET_MESSAGE) - sizeof(I2O_SG_ELEMENT)) 2113 / sizeof(U32)) << 4))); 2114 I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame), 2115 I2O_EXEC_SYS_TAB_SET); 2116 /* 2117 * Call the LCT table to determine the number of device entries 2118 * to reserve space for. 2119 * since this code is reused in several systems, code efficiency 2120 * is greater by using a shift operation rather than a divide by 2121 * sizeof(u_int32_t). 2122 */ 2123 sg = (PI2O_SGE_SIMPLE_ELEMENT)((char *)Message_Ptr 2124 + ((I2O_MESSAGE_FRAME_getVersionOffset( 2125 &(Message_Ptr->StdMessageFrame)) & 0xF0) >> 2)); 2126 SG(sg, 0, I2O_SGL_FLAGS_DIR, SystemTable, sizeof(I2O_SET_SYSTAB_HEADER)); 2127 ++sg; 2128 for (ha = Asr_softc; ha; ha = ha->ha_next) { 2129 SG(sg, 0, 2130 ((ha->ha_next) 2131 ? (I2O_SGL_FLAGS_DIR) 2132 : (I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_END_OF_BUFFER)), 2133 &(ha->ha_SystemTable), sizeof(ha->ha_SystemTable)); 2134 ++sg; 2135 } 2136 SG(sg, 0, I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_END_OF_BUFFER, NULL, 0); 2137 SG(sg, 1, I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_LAST_ELEMENT 2138 | I2O_SGL_FLAGS_END_OF_BUFFER, NULL, 0); 2139 retVal = ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr); 2140 free (Message_Ptr, M_TEMP); 2141 free (SystemTable, M_TEMP); 2142 return (retVal); 2143} /* ASR_setSysTab */ 2144 2145STATIC INLINE int 2146ASR_acquireHrt ( 2147 INOUT Asr_softc_t * sc) 2148{ 2149 defAlignLong(I2O_EXEC_HRT_GET_MESSAGE,Message); 2150 I2O_EXEC_HRT_GET_MESSAGE * Message_Ptr; 2151 struct { 2152 I2O_HRT Header; 2153 I2O_HRT_ENTRY Entry[MAX_CHANNEL]; 2154 } Hrt; 2155 u_int8_t NumberOfEntries; 2156 PI2O_HRT_ENTRY Entry; 2157 2158 bzero ((void *)&Hrt, sizeof (Hrt)); 2159 Message_Ptr = (I2O_EXEC_HRT_GET_MESSAGE *)ASR_fillMessage(Message, 2160 sizeof(I2O_EXEC_HRT_GET_MESSAGE) - sizeof(I2O_SG_ELEMENT) 2161 + sizeof(I2O_SGE_SIMPLE_ELEMENT)); 2162 I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame), 2163 (I2O_VERSION_11 2164 + (((sizeof(I2O_EXEC_HRT_GET_MESSAGE) - sizeof(I2O_SG_ELEMENT)) 2165 / sizeof(U32)) << 4))); 2166 I2O_MESSAGE_FRAME_setFunction (&(Message_Ptr->StdMessageFrame), 2167 I2O_EXEC_HRT_GET); 2168 2169 /* 2170 * Set up the buffers as scatter gather elements. 2171 */ 2172 SG(&(Message_Ptr->SGL), 0, 2173 I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER, 2174 &Hrt, sizeof(Hrt)); 2175 if (ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr) != CAM_REQ_CMP) { 2176 return (ENODEV); 2177 } 2178 if ((NumberOfEntries = I2O_HRT_getNumberEntries(&Hrt.Header)) 2179 > (MAX_CHANNEL + 1)) { 2180 NumberOfEntries = MAX_CHANNEL + 1; 2181 } 2182 for (Entry = Hrt.Header.HRTEntry; 2183 NumberOfEntries != 0; 2184 ++Entry, --NumberOfEntries) { 2185 PI2O_LCT_ENTRY Device; 2186 2187 for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY) 2188 (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT)); 2189 ++Device) { 2190 if (I2O_LCT_ENTRY_getLocalTID(Device) 2191 == (I2O_HRT_ENTRY_getAdapterID(Entry) & 0xFFF)) { 2192 Device->le_bus = I2O_HRT_ENTRY_getAdapterID( 2193 Entry) >> 16; 2194 if ((Device->le_bus > sc->ha_MaxBus) 2195 && (Device->le_bus <= MAX_CHANNEL)) { 2196 sc->ha_MaxBus = Device->le_bus; 2197 } 2198 } 2199 } 2200 } 2201 return (0); 2202} /* ASR_acquireHrt */ 2203 2204/* 2205 * Enable the adapter. 2206 */ 2207STATIC INLINE int 2208ASR_enableSys ( 2209 IN Asr_softc_t * sc) 2210{ 2211 defAlignLong(I2O_EXEC_SYS_ENABLE_MESSAGE,Message); 2212 PI2O_EXEC_SYS_ENABLE_MESSAGE Message_Ptr; 2213 2214 Message_Ptr = (PI2O_EXEC_SYS_ENABLE_MESSAGE)ASR_fillMessage(Message, 2215 sizeof(I2O_EXEC_SYS_ENABLE_MESSAGE)); 2216 I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame), 2217 I2O_EXEC_SYS_ENABLE); 2218 return (ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr) != 0); 2219} /* ASR_enableSys */ 2220 2221/* 2222 * Perform the stages necessary to initialize the adapter 2223 */ 2224STATIC int 2225ASR_init( 2226 IN Asr_softc_t * sc) 2227{ 2228 return ((ASR_initOutBound(sc) == 0) 2229 || (ASR_setSysTab(sc) != CAM_REQ_CMP) 2230 || (ASR_enableSys(sc) != CAM_REQ_CMP)); 2231} /* ASR_init */ 2232 2233/* 2234 * Send a Synchronize Cache command to the target device. 2235 */ 2236STATIC INLINE void 2237ASR_sync ( 2238 IN Asr_softc_t * sc, 2239 IN int bus, 2240 IN int target, 2241 IN int lun) 2242{ 2243 tid_t TID; 2244 2245 /* 2246 * We will not synchronize the device when there are outstanding 2247 * commands issued by the OS (this is due to a locked up device, 2248 * as the OS normally would flush all outstanding commands before 2249 * issuing a shutdown or an adapter reset). 2250 */ 2251 if ((sc != (Asr_softc_t *)NULL) 2252 && (LIST_FIRST(&sc->ha_ccb) != (struct ccb_hdr *)NULL) 2253 && ((TID = ASR_getTid (sc, bus, target, lun)) != (tid_t)-1) 2254 && (TID != (tid_t)0)) { 2255 defAlignLong(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE,Message); 2256 PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE Message_Ptr; 2257 2258 bzero (Message_Ptr 2259 = getAlignLong(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE, Message), 2260 sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) 2261 - sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT)); 2262 2263 I2O_MESSAGE_FRAME_setVersionOffset( 2264 (PI2O_MESSAGE_FRAME)Message_Ptr, 2265 I2O_VERSION_11 2266 | (((sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) 2267 - sizeof(I2O_SG_ELEMENT)) 2268 / sizeof(U32)) << 4)); 2269 I2O_MESSAGE_FRAME_setMessageSize( 2270 (PI2O_MESSAGE_FRAME)Message_Ptr, 2271 (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) 2272 - sizeof(I2O_SG_ELEMENT)) 2273 / sizeof(U32)); 2274 I2O_MESSAGE_FRAME_setInitiatorAddress ( 2275 (PI2O_MESSAGE_FRAME)Message_Ptr, 1); 2276 I2O_MESSAGE_FRAME_setFunction( 2277 (PI2O_MESSAGE_FRAME)Message_Ptr, I2O_PRIVATE_MESSAGE); 2278 I2O_MESSAGE_FRAME_setTargetAddress( 2279 (PI2O_MESSAGE_FRAME)Message_Ptr, TID); 2280 I2O_PRIVATE_MESSAGE_FRAME_setXFunctionCode ( 2281 (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr, 2282 I2O_SCSI_SCB_EXEC); 2283 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setTID(Message_Ptr, TID); 2284 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr, 2285 I2O_SCB_FLAG_ENABLE_DISCONNECT 2286 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG 2287 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER); 2288 I2O_PRIVATE_MESSAGE_FRAME_setOrganizationID( 2289 (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr, 2290 DPT_ORGANIZATION_ID); 2291 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setCDBLength(Message_Ptr, 6); 2292 Message_Ptr->CDB[0] = SYNCHRONIZE_CACHE; 2293 Message_Ptr->CDB[1] = (lun << 5); 2294 2295 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr, 2296 (I2O_SCB_FLAG_XFER_FROM_DEVICE 2297 | I2O_SCB_FLAG_ENABLE_DISCONNECT 2298 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG 2299 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER)); 2300 2301 (void)ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr); 2302 2303 } 2304} 2305 2306STATIC INLINE void 2307ASR_synchronize ( 2308 IN Asr_softc_t * sc) 2309{ 2310 int bus, target, lun; 2311 2312 for (bus = 0; bus <= sc->ha_MaxBus; ++bus) { 2313 for (target = 0; target <= sc->ha_MaxId; ++target) { 2314 for (lun = 0; lun <= sc->ha_MaxLun; ++lun) { 2315 ASR_sync(sc,bus,target,lun); 2316 } 2317 } 2318 } 2319} 2320 2321/* 2322 * Reset the HBA, targets and BUS. 2323 * Currently this resets *all* the SCSI busses. 2324 */ 2325STATIC INLINE void 2326asr_hbareset( 2327 IN Asr_softc_t * sc) 2328{ 2329 ASR_synchronize (sc); 2330 ASR_reset (sc); 2331} /* asr_hbareset */ 2332 2333/* 2334 * A reduced copy of the real pci_map_mem, incorporating the MAX_MAP 2335 * limit and a reduction in error checking (in the pre 4.0 case). 2336 */ 2337STATIC int 2338asr_pci_map_mem ( 2339#if __FreeBSD_version >= 400000 2340 IN device_t tag, 2341#else 2342 IN pcici_t tag, 2343#endif 2344 IN Asr_softc_t * sc) 2345{ 2346 int rid; 2347 u_int32_t p, l; 2348 2349#if __FreeBSD_version >= 400000 2350 /* 2351 * I2O specification says we must find first *memory* mapped BAR 2352 */ 2353 for (rid = PCIR_MAPS; 2354 rid < (PCIR_MAPS + 4 * sizeof(u_int32_t)); 2355 ++rid) { 2356 p = pci_read_config(tag, rid, sizeof(p)); 2357 if ((p & 1) == 0) { 2358 break; 2359 } 2360 } 2361 /* 2362 * Give up? 2363 */ 2364 if (rid >= (PCIR_MAPS + 4 * sizeof(u_int32_t))) { 2365 rid = PCIR_MAPS; 2366 } 2367 p = pci_read_config(tag, rid, sizeof(p)); 2368 pci_write_config(tag, rid, -1, sizeof(p)); 2369 l = 0 - (pci_read_config(tag, rid, sizeof(l)) & ~15); 2370 pci_write_config(tag, rid, p, sizeof(p)); 2371 if (l > MAX_MAP) { 2372 l = MAX_MAP; 2373 } 2374 p &= ~15; 2375 sc->ha_mem_res = bus_alloc_resource(tag, SYS_RES_MEMORY, &rid, 2376 p, p + l, l, RF_ACTIVE); 2377 if (sc->ha_mem_res == (struct resource *)NULL) { 2378 return (0); 2379 } 2380 sc->ha_Base = (void *)rman_get_start(sc->ha_mem_res); 2381 if (sc->ha_Base == (void *)NULL) { 2382 return (0); 2383 } 2384 sc->ha_Virt = (i2oRegs_t *) rman_get_virtual(sc->ha_mem_res); 2385#else 2386 vm_size_t psize, poffs; 2387 2388 /* 2389 * I2O specification says we must find first *memory* mapped BAR 2390 */ 2391 for (rid = PCI_MAP_REG_START; 2392 rid < (PCI_MAP_REG_START + 4 * sizeof(u_int32_t)); 2393 ++rid) { 2394 p = pci_conf_read (tag, rid); 2395 if ((p & 1) == 0) { 2396 break; 2397 } 2398 } 2399 if (rid >= (PCI_MAP_REG_START + 4 * sizeof(u_int32_t))) { 2400 rid = PCI_MAP_REG_START; 2401 } 2402 /* 2403 ** save old mapping, get size and type of memory 2404 ** 2405 ** type is in the lowest four bits. 2406 ** If device requires 2^n bytes, the next 2407 ** n-4 bits are read as 0. 2408 */ 2409 2410 sc->ha_Base = (void *)((p = pci_conf_read (tag, rid)) 2411 & PCI_MAP_MEMORY_ADDRESS_MASK); 2412 pci_conf_write (tag, rid, 0xfffffffful); 2413 l = pci_conf_read (tag, rid); 2414 pci_conf_write (tag, rid, p); 2415 2416 /* 2417 ** check the type 2418 */ 2419 2420 if (!((l & PCI_MAP_MEMORY_TYPE_MASK) == PCI_MAP_MEMORY_TYPE_32BIT_1M 2421 && ((u_long)sc->ha_Base & ~0xfffff) == 0) 2422 && ((l & PCI_MAP_MEMORY_TYPE_MASK) != PCI_MAP_MEMORY_TYPE_32BIT)) { 2423 debug_asr_printf ( 2424 "asr_pci_map_mem failed: bad memory type=0x%x\n", 2425 (unsigned) l); 2426 return (0); 2427 }; 2428 2429 /* 2430 ** get the size. 2431 */ 2432 2433 psize = -(l & PCI_MAP_MEMORY_ADDRESS_MASK); 2434 if (psize > MAX_MAP) { 2435 psize = MAX_MAP; 2436 } 2437 2438 if ((sc->ha_Base == (void *)NULL) 2439 || (sc->ha_Base == (void *)PCI_MAP_MEMORY_ADDRESS_MASK)) { 2440 debug_asr_printf ("asr_pci_map_mem: not configured by bios.\n"); 2441 return (0); 2442 }; 2443 2444 /* 2445 ** Truncate sc->ha_Base to page boundary. 2446 ** (Or does pmap_mapdev the job?) 2447 */ 2448 2449 poffs = (u_long)sc->ha_Base - trunc_page ((u_long)sc->ha_Base); 2450 sc->ha_Virt = (i2oRegs_t *)pmap_mapdev ((u_long)sc->ha_Base - poffs, 2451 psize + poffs); 2452 2453 if (sc->ha_Virt == (i2oRegs_t *)NULL) { 2454 return (0); 2455 } 2456 2457 sc->ha_Virt = (i2oRegs_t *)((u_long)sc->ha_Virt + poffs); 2458#endif 2459 return (1); 2460} /* asr_pci_map_mem */ 2461 2462/* 2463 * A simplified copy of the real pci_map_int with additional 2464 * registration requirements. 2465 */ 2466STATIC int 2467asr_pci_map_int ( 2468#if __FreeBSD_version >= 400000 2469 IN device_t tag, 2470#else 2471 IN pcici_t tag, 2472#endif 2473 IN Asr_softc_t * sc) 2474{ 2475#if __FreeBSD_version >= 400000 2476 int rid = 0; 2477 2478 sc->ha_irq_res = bus_alloc_resource(tag, SYS_RES_IRQ, &rid, 2479 0, ~0, 1, RF_ACTIVE | RF_SHAREABLE); 2480 if (sc->ha_irq_res == (struct resource *)NULL) { 2481 return (0); 2482 } 2483 if (bus_setup_intr(tag, sc->ha_irq_res, INTR_TYPE_CAM, 2484 (driver_intr_t *)asr_intr, (void *)sc, &(sc->ha_intr))) { 2485 return (0); 2486 } 2487 sc->ha_irq = pci_read_config(tag, PCIR_INTLINE, sizeof(char)); 2488#else 2489 if (!pci_map_int(tag, (pci_inthand_t *)asr_intr, 2490 (void *)sc, &cam_imask)) { 2491 return (0); 2492 } 2493 sc->ha_irq = pci_conf_read(tag, PCIR_INTLINE); 2494#endif 2495 return (1); 2496} /* asr_pci_map_int */ 2497 2498/* 2499 * Attach the devices, and virtual devices to the driver list. 2500 */ 2501STATIC ATTACH_RET 2502asr_attach (ATTACH_ARGS) 2503{ 2504 Asr_softc_t * sc; 2505 struct scsi_inquiry_data * iq; 2506 ATTACH_SET(); 2507 2508 if ((sc = malloc(sizeof(*sc), M_DEVBUF, M_NOWAIT | M_ZERO)) == 2509 (Asr_softc_t *)NULL) 2510 { 2511 ATTACH_RETURN(ENOMEM); 2512 } 2513 if (Asr_softc == (Asr_softc_t *)NULL) { 2514 /* 2515 * Fixup the OS revision as saved in the dptsig for the 2516 * engine (dptioctl.h) to pick up. 2517 */ 2518 bcopy (osrelease, &ASR_sig.dsDescription[16], 5); 2519 printf ("asr%d: major=%d\n", unit, asr_cdevsw.d_maj); 2520 } 2521 /* 2522 * Initialize the software structure 2523 */ 2524 LIST_INIT(&sc->ha_ccb); 2525# ifdef ASR_MEASURE_PERFORMANCE 2526 { 2527 u_int32_t i; 2528 2529 // initialize free list for timeQ 2530 sc->ha_timeQFreeHead = 0; 2531 sc->ha_timeQFreeTail = MAX_TIMEQ_SIZE - 1; 2532 for (i = 0; i < MAX_TIMEQ_SIZE; i++) { 2533 sc->ha_timeQFreeList[i] = i; 2534 } 2535 } 2536# endif 2537 /* Link us into the HA list */ 2538 { 2539 Asr_softc_t **ha; 2540 2541 for (ha = &Asr_softc; *ha; ha = &((*ha)->ha_next)); 2542 *(ha) = sc; 2543 } 2544 { 2545 PI2O_EXEC_STATUS_GET_REPLY status; 2546 int size; 2547 2548 /* 2549 * This is the real McCoy! 2550 */ 2551 if (!asr_pci_map_mem(tag, sc)) { 2552 printf ("asr%d: could not map memory\n", unit); 2553 ATTACH_RETURN(ENXIO); 2554 } 2555 /* Enable if not formerly enabled */ 2556#if __FreeBSD_version >= 400000 2557 pci_write_config (tag, PCIR_COMMAND, 2558 pci_read_config (tag, PCIR_COMMAND, sizeof(char)) 2559 | PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN, sizeof(char)); 2560 /* Knowledge is power, responsibility is direct */ 2561 { 2562 struct pci_devinfo { 2563 STAILQ_ENTRY(pci_devinfo) pci_links; 2564 struct resource_list resources; 2565 pcicfgregs cfg; 2566 } * dinfo = device_get_ivars(tag); 2567 sc->ha_pciBusNum = dinfo->cfg.bus; 2568 sc->ha_pciDeviceNum = (dinfo->cfg.slot << 3) 2569 | dinfo->cfg.func; 2570 } 2571#else 2572 pci_conf_write (tag, PCIR_COMMAND, 2573 pci_conf_read (tag, PCIR_COMMAND) 2574 | PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN); 2575 /* Knowledge is power, responsibility is direct */ 2576 switch (pci_mechanism) { 2577 2578 case 1: 2579 sc->ha_pciBusNum = tag.cfg1 >> 16; 2580 sc->ha_pciDeviceNum = tag.cfg1 >> 8; 2581 2582 case 2: 2583 sc->ha_pciBusNum = tag.cfg2.forward; 2584 sc->ha_pciDeviceNum = ((tag.cfg2.enable >> 1) & 7) 2585 | (tag.cfg2.port >> 5); 2586 } 2587#endif 2588 /* Check if the device is there? */ 2589 if ((ASR_resetIOP(sc->ha_Virt) == 0) 2590 || ((status = (PI2O_EXEC_STATUS_GET_REPLY)malloc ( 2591 sizeof(I2O_EXEC_STATUS_GET_REPLY), M_TEMP, M_WAITOK)) 2592 == (PI2O_EXEC_STATUS_GET_REPLY)NULL) 2593 || (ASR_getStatus(sc->ha_Virt, status) == NULL)) { 2594 printf ("asr%d: could not initialize hardware\n", unit); 2595 ATTACH_RETURN(ENODEV); /* Get next, maybe better luck */ 2596 } 2597 sc->ha_SystemTable.OrganizationID = status->OrganizationID; 2598 sc->ha_SystemTable.IOP_ID = status->IOP_ID; 2599 sc->ha_SystemTable.I2oVersion = status->I2oVersion; 2600 sc->ha_SystemTable.IopState = status->IopState; 2601 sc->ha_SystemTable.MessengerType = status->MessengerType; 2602 sc->ha_SystemTable.InboundMessageFrameSize 2603 = status->InboundMFrameSize; 2604 sc->ha_SystemTable.MessengerInfo.InboundMessagePortAddressLow 2605 = (U32)(sc->ha_Base) + (U32)(&(((i2oRegs_t *)NULL)->ToFIFO)); 2606 2607 if (!asr_pci_map_int(tag, (void *)sc)) { 2608 printf ("asr%d: could not map interrupt\n", unit); 2609 ATTACH_RETURN(ENXIO); 2610 } 2611 2612 /* Adjust the maximim inbound count */ 2613 if (((sc->ha_QueueSize 2614 = I2O_EXEC_STATUS_GET_REPLY_getMaxInboundMFrames(status)) 2615 > MAX_INBOUND) 2616 || (sc->ha_QueueSize == 0)) { 2617 sc->ha_QueueSize = MAX_INBOUND; 2618 } 2619 2620 /* Adjust the maximum outbound count */ 2621 if (((sc->ha_Msgs_Count 2622 = I2O_EXEC_STATUS_GET_REPLY_getMaxOutboundMFrames(status)) 2623 > MAX_OUTBOUND) 2624 || (sc->ha_Msgs_Count == 0)) { 2625 sc->ha_Msgs_Count = MAX_OUTBOUND; 2626 } 2627 if (sc->ha_Msgs_Count > sc->ha_QueueSize) { 2628 sc->ha_Msgs_Count = sc->ha_QueueSize; 2629 } 2630 2631 /* Adjust the maximum SG size to adapter */ 2632 if ((size = (I2O_EXEC_STATUS_GET_REPLY_getInboundMFrameSize( 2633 status) << 2)) > MAX_INBOUND_SIZE) { 2634 size = MAX_INBOUND_SIZE; 2635 } 2636 free (status, M_TEMP); 2637 sc->ha_SgSize = (size - sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) 2638 + sizeof(I2O_SG_ELEMENT)) / sizeof(I2O_SGE_SIMPLE_ELEMENT); 2639 } 2640 2641 /* 2642 * Only do a bus/HBA reset on the first time through. On this 2643 * first time through, we do not send a flush to the devices. 2644 */ 2645 if (ASR_init(sc) == 0) { 2646 struct BufferInfo { 2647 I2O_PARAM_RESULTS_LIST_HEADER Header; 2648 I2O_PARAM_READ_OPERATION_RESULT Read; 2649 I2O_DPT_EXEC_IOP_BUFFERS_SCALAR Info; 2650 }; 2651 defAlignLong (struct BufferInfo, Buffer); 2652 PI2O_DPT_EXEC_IOP_BUFFERS_SCALAR Info; 2653# define FW_DEBUG_BLED_OFFSET 8 2654 2655 if ((Info = (PI2O_DPT_EXEC_IOP_BUFFERS_SCALAR) 2656 ASR_getParams(sc, 0, 2657 I2O_DPT_EXEC_IOP_BUFFERS_GROUP_NO, 2658 Buffer, sizeof(struct BufferInfo))) 2659 != (PI2O_DPT_EXEC_IOP_BUFFERS_SCALAR)NULL) { 2660 sc->ha_blinkLED = sc->ha_Virt->Address 2661 + I2O_DPT_EXEC_IOP_BUFFERS_SCALAR_getSerialOutputOffset(Info) 2662 + FW_DEBUG_BLED_OFFSET; 2663 } 2664 if (ASR_acquireLct(sc) == 0) { 2665 (void)ASR_acquireHrt(sc); 2666 } 2667 } else { 2668 printf ("asr%d: failed to initialize\n", unit); 2669 ATTACH_RETURN(ENXIO); 2670 } 2671 /* 2672 * Add in additional probe responses for more channels. We 2673 * are reusing the variable `target' for a channel loop counter. 2674 * Done here because of we need both the acquireLct and 2675 * acquireHrt data. 2676 */ 2677 { PI2O_LCT_ENTRY Device; 2678 2679 for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY) 2680 (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT)); 2681 ++Device) { 2682 if (Device->le_type == I2O_UNKNOWN) { 2683 continue; 2684 } 2685 if (I2O_LCT_ENTRY_getUserTID(Device) == 0xFFF) { 2686 if (Device->le_target > sc->ha_MaxId) { 2687 sc->ha_MaxId = Device->le_target; 2688 } 2689 if (Device->le_lun > sc->ha_MaxLun) { 2690 sc->ha_MaxLun = Device->le_lun; 2691 } 2692 } 2693 if (((Device->le_type & I2O_PORT) != 0) 2694 && (Device->le_bus <= MAX_CHANNEL)) { 2695 /* Do not increase MaxId for efficiency */ 2696 sc->ha_adapter_target[Device->le_bus] 2697 = Device->le_target; 2698 } 2699 } 2700 } 2701 2702 2703 /* 2704 * Print the HBA model number as inquired from the card. 2705 */ 2706 2707 printf ("asr%d:", unit); 2708 2709 if ((iq = (struct scsi_inquiry_data *)malloc ( 2710 sizeof(struct scsi_inquiry_data), M_TEMP, M_WAITOK | M_ZERO)) 2711 != (struct scsi_inquiry_data *)NULL) { 2712 defAlignLong(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE,Message); 2713 PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE Message_Ptr; 2714 int posted = 0; 2715 2716 bzero (Message_Ptr 2717 = getAlignLong(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE, Message), 2718 sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) 2719 - sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT)); 2720 2721 I2O_MESSAGE_FRAME_setVersionOffset( 2722 (PI2O_MESSAGE_FRAME)Message_Ptr, 2723 I2O_VERSION_11 2724 | (((sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) 2725 - sizeof(I2O_SG_ELEMENT)) 2726 / sizeof(U32)) << 4)); 2727 I2O_MESSAGE_FRAME_setMessageSize( 2728 (PI2O_MESSAGE_FRAME)Message_Ptr, 2729 (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) 2730 - sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT)) 2731 / sizeof(U32)); 2732 I2O_MESSAGE_FRAME_setInitiatorAddress ( 2733 (PI2O_MESSAGE_FRAME)Message_Ptr, 1); 2734 I2O_MESSAGE_FRAME_setFunction( 2735 (PI2O_MESSAGE_FRAME)Message_Ptr, I2O_PRIVATE_MESSAGE); 2736 I2O_PRIVATE_MESSAGE_FRAME_setXFunctionCode ( 2737 (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr, 2738 I2O_SCSI_SCB_EXEC); 2739 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr, 2740 I2O_SCB_FLAG_ENABLE_DISCONNECT 2741 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG 2742 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER); 2743 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setInterpret(Message_Ptr, 1); 2744 I2O_PRIVATE_MESSAGE_FRAME_setOrganizationID( 2745 (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr, 2746 DPT_ORGANIZATION_ID); 2747 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setCDBLength(Message_Ptr, 6); 2748 Message_Ptr->CDB[0] = INQUIRY; 2749 Message_Ptr->CDB[4] = (unsigned char)sizeof(struct scsi_inquiry_data); 2750 if (Message_Ptr->CDB[4] == 0) { 2751 Message_Ptr->CDB[4] = 255; 2752 } 2753 2754 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr, 2755 (I2O_SCB_FLAG_XFER_FROM_DEVICE 2756 | I2O_SCB_FLAG_ENABLE_DISCONNECT 2757 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG 2758 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER)); 2759 2760 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setByteCount( 2761 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr, 2762 sizeof(struct scsi_inquiry_data)); 2763 SG(&(Message_Ptr->SGL), 0, 2764 I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER, 2765 iq, sizeof(struct scsi_inquiry_data)); 2766 (void)ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr); 2767 2768 if (iq->vendor[0] && (iq->vendor[0] != ' ')) { 2769 printf (" "); 2770 ASR_prstring (iq->vendor, 8); 2771 ++posted; 2772 } 2773 if (iq->product[0] && (iq->product[0] != ' ')) { 2774 printf (" "); 2775 ASR_prstring (iq->product, 16); 2776 ++posted; 2777 } 2778 if (iq->revision[0] && (iq->revision[0] != ' ')) { 2779 printf (" FW Rev. "); 2780 ASR_prstring (iq->revision, 4); 2781 ++posted; 2782 } 2783 free ((caddr_t)iq, M_TEMP); 2784 if (posted) { 2785 printf (","); 2786 } 2787 } 2788 printf (" %d channel, %d CCBs, Protocol I2O\n", sc->ha_MaxBus + 1, 2789 (sc->ha_QueueSize > MAX_INBOUND) ? MAX_INBOUND : sc->ha_QueueSize); 2790 2791 /* 2792 * fill in the prototype cam_path. 2793 */ 2794 { 2795 int bus; 2796 union asr_ccb * ccb; 2797 2798 if ((ccb = asr_alloc_ccb (sc)) == (union asr_ccb *)NULL) { 2799 printf ("asr%d: CAM could not be notified of asynchronous callback parameters\n", unit); 2800 ATTACH_RETURN(ENOMEM); 2801 } 2802 for (bus = 0; bus <= sc->ha_MaxBus; ++bus) { 2803 struct cam_devq * devq; 2804 int QueueSize = sc->ha_QueueSize; 2805 2806 if (QueueSize > MAX_INBOUND) { 2807 QueueSize = MAX_INBOUND; 2808 } 2809 2810 /* 2811 * Create the device queue for our SIM(s). 2812 */ 2813 if ((devq = cam_simq_alloc(QueueSize)) == NULL) { 2814 continue; 2815 } 2816 2817 /* 2818 * Construct our first channel SIM entry 2819 */ 2820 sc->ha_sim[bus] = cam_sim_alloc( 2821 asr_action, asr_poll, "asr", sc, 2822 unit, 1, QueueSize, devq); 2823 if (sc->ha_sim[bus] == NULL) { 2824 continue; 2825 } 2826 2827 if (xpt_bus_register(sc->ha_sim[bus], bus) 2828 != CAM_SUCCESS) { 2829 cam_sim_free(sc->ha_sim[bus], 2830 /*free_devq*/TRUE); 2831 sc->ha_sim[bus] = NULL; 2832 continue; 2833 } 2834 2835 if (xpt_create_path(&(sc->ha_path[bus]), /*periph*/NULL, 2836 cam_sim_path(sc->ha_sim[bus]), CAM_TARGET_WILDCARD, 2837 CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 2838 xpt_bus_deregister( 2839 cam_sim_path(sc->ha_sim[bus])); 2840 cam_sim_free(sc->ha_sim[bus], 2841 /*free_devq*/TRUE); 2842 sc->ha_sim[bus] = NULL; 2843 continue; 2844 } 2845 xpt_setup_ccb(&(ccb->ccb_h), 2846 sc->ha_path[bus], /*priority*/5); 2847 ccb->ccb_h.func_code = XPT_SASYNC_CB; 2848 ccb->csa.event_enable = AC_LOST_DEVICE; 2849 ccb->csa.callback = asr_async; 2850 ccb->csa.callback_arg = sc->ha_sim[bus]; 2851 xpt_action((union ccb *)ccb); 2852 } 2853 asr_free_ccb (ccb); 2854 } 2855 /* 2856 * Generate the device node information 2857 */ 2858 (void)make_dev(&asr_cdevsw, unit, 0, 0, S_IRWXU, "rasr%d", unit); 2859 destroy_dev(makedev(asr_cdevsw.d_maj,unit+1)); 2860 ATTACH_RETURN(0); 2861} /* asr_attach */ 2862 2863#if (!defined(UNREFERENCED_PARAMETER)) 2864# define UNREFERENCED_PARAMETER(x) (void)(x) 2865#endif 2866 2867STATIC void 2868asr_async( 2869 void * callback_arg, 2870 u_int32_t code, 2871 struct cam_path * path, 2872 void * arg) 2873{ 2874 UNREFERENCED_PARAMETER(callback_arg); 2875 UNREFERENCED_PARAMETER(code); 2876 UNREFERENCED_PARAMETER(path); 2877 UNREFERENCED_PARAMETER(arg); 2878} /* asr_async */ 2879 2880STATIC void 2881asr_poll( 2882 IN struct cam_sim *sim) 2883{ 2884 asr_intr(cam_sim_softc(sim)); 2885} /* asr_poll */ 2886 2887STATIC void 2888asr_action( 2889 IN struct cam_sim * sim, 2890 IN union ccb * ccb) 2891{ 2892 struct Asr_softc * sc; 2893 2894 debug_asr_printf ("asr_action(%lx,%lx{%x})\n", 2895 (u_long)sim, (u_long)ccb, ccb->ccb_h.func_code); 2896 2897 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("asr_action\n")); 2898 2899 ccb->ccb_h.spriv_ptr0 = sc = (struct Asr_softc *)cam_sim_softc(sim); 2900 2901 switch (ccb->ccb_h.func_code) { 2902 2903 /* Common cases first */ 2904 case XPT_SCSI_IO: /* Execute the requested I/O operation */ 2905 { 2906 struct Message { 2907 char M[MAX_INBOUND_SIZE]; 2908 }; 2909 defAlignLong(struct Message,Message); 2910 PI2O_MESSAGE_FRAME Message_Ptr; 2911 2912 if (ccb->ccb_h.status != CAM_REQ_INPROG) { 2913 printf( 2914 "asr%d WARNING: scsi_cmd(%x) already done on b%dt%du%d\n", 2915 cam_sim_unit(xpt_path_sim(ccb->ccb_h.path)), 2916 ccb->csio.cdb_io.cdb_bytes[0], 2917 cam_sim_bus(sim), 2918 ccb->ccb_h.target_id, 2919 ccb->ccb_h.target_lun); 2920 } 2921 debug_asr_cmd_printf ("(%d,%d,%d,%d)", 2922 cam_sim_unit(sim), 2923 cam_sim_bus(sim), 2924 ccb->ccb_h.target_id, 2925 ccb->ccb_h.target_lun); 2926 debug_asr_cmd_dump_ccb(ccb); 2927 2928 if ((Message_Ptr = ASR_init_message ((union asr_ccb *)ccb, 2929 (PI2O_MESSAGE_FRAME)Message)) != (PI2O_MESSAGE_FRAME)NULL) { 2930 debug_asr_cmd2_printf ("TID=%x:\n", 2931 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_getTID( 2932 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr)); 2933 debug_asr_cmd2_dump_message(Message_Ptr); 2934 debug_asr_cmd1_printf (" q"); 2935 2936 if (ASR_queue (sc, Message_Ptr) == EMPTY_QUEUE) { 2937#ifdef ASR_MEASURE_PERFORMANCE 2938 ++sc->ha_performance.command_too_busy; 2939#endif 2940 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 2941 ccb->ccb_h.status |= CAM_REQUEUE_REQ; 2942 debug_asr_cmd_printf (" E\n"); 2943 xpt_done(ccb); 2944 } 2945 debug_asr_cmd_printf (" Q\n"); 2946 break; 2947 } 2948 /* 2949 * We will get here if there is no valid TID for the device 2950 * referenced in the scsi command packet. 2951 */ 2952 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 2953 ccb->ccb_h.status |= CAM_SEL_TIMEOUT; 2954 debug_asr_cmd_printf (" B\n"); 2955 xpt_done(ccb); 2956 break; 2957 } 2958 2959 case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */ 2960 /* Rese HBA device ... */ 2961 asr_hbareset (sc); 2962 ccb->ccb_h.status = CAM_REQ_CMP; 2963 xpt_done(ccb); 2964 break; 2965 2966# if (defined(REPORT_LUNS)) 2967 case REPORT_LUNS: 2968# endif 2969 case XPT_ABORT: /* Abort the specified CCB */ 2970 /* XXX Implement */ 2971 ccb->ccb_h.status = CAM_REQ_INVALID; 2972 xpt_done(ccb); 2973 break; 2974 2975 case XPT_SET_TRAN_SETTINGS: 2976 /* XXX Implement */ 2977 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; 2978 xpt_done(ccb); 2979 break; 2980 2981 case XPT_GET_TRAN_SETTINGS: 2982 /* Get default/user set transfer settings for the target */ 2983 { 2984 struct ccb_trans_settings *cts; 2985 u_int target_mask; 2986 2987 cts = &ccb->cts; 2988 target_mask = 0x01 << ccb->ccb_h.target_id; 2989 if ((cts->flags & CCB_TRANS_USER_SETTINGS) != 0) { 2990 cts->flags = CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB; 2991 cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT; 2992 cts->sync_period = 6; /* 40MHz */ 2993 cts->sync_offset = 15; 2994 2995 cts->valid = CCB_TRANS_SYNC_RATE_VALID 2996 | CCB_TRANS_SYNC_OFFSET_VALID 2997 | CCB_TRANS_BUS_WIDTH_VALID 2998 | CCB_TRANS_DISC_VALID 2999 | CCB_TRANS_TQ_VALID; 3000 ccb->ccb_h.status = CAM_REQ_CMP; 3001 } else { 3002 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; 3003 } 3004 xpt_done(ccb); 3005 break; 3006 } 3007 3008 case XPT_CALC_GEOMETRY: 3009 { 3010 struct ccb_calc_geometry *ccg; 3011 u_int32_t size_mb; 3012 u_int32_t secs_per_cylinder; 3013 3014 ccg = &ccb->ccg; 3015 size_mb = ccg->volume_size 3016 / ((1024L * 1024L) / ccg->block_size); 3017 3018 if (size_mb > 4096) { 3019 ccg->heads = 255; 3020 ccg->secs_per_track = 63; 3021 } else if (size_mb > 2048) { 3022 ccg->heads = 128; 3023 ccg->secs_per_track = 63; 3024 } else if (size_mb > 1024) { 3025 ccg->heads = 65; 3026 ccg->secs_per_track = 63; 3027 } else { 3028 ccg->heads = 64; 3029 ccg->secs_per_track = 32; 3030 } 3031 secs_per_cylinder = ccg->heads * ccg->secs_per_track; 3032 ccg->cylinders = ccg->volume_size / secs_per_cylinder; 3033 ccb->ccb_h.status = CAM_REQ_CMP; 3034 xpt_done(ccb); 3035 break; 3036 } 3037 3038 case XPT_RESET_BUS: /* Reset the specified SCSI bus */ 3039 ASR_resetBus (sc, cam_sim_bus(sim)); 3040 ccb->ccb_h.status = CAM_REQ_CMP; 3041 xpt_done(ccb); 3042 break; 3043 3044 case XPT_TERM_IO: /* Terminate the I/O process */ 3045 /* XXX Implement */ 3046 ccb->ccb_h.status = CAM_REQ_INVALID; 3047 xpt_done(ccb); 3048 break; 3049 3050 case XPT_PATH_INQ: /* Path routing inquiry */ 3051 { 3052 struct ccb_pathinq *cpi = &ccb->cpi; 3053 3054 cpi->version_num = 1; /* XXX??? */ 3055 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16; 3056 cpi->target_sprt = 0; 3057 /* Not necessary to reset bus, done by HDM initialization */ 3058 cpi->hba_misc = PIM_NOBUSRESET; 3059 cpi->hba_eng_cnt = 0; 3060 cpi->max_target = sc->ha_MaxId; 3061 cpi->max_lun = sc->ha_MaxLun; 3062 cpi->initiator_id = sc->ha_adapter_target[cam_sim_bus(sim)]; 3063 cpi->bus_id = cam_sim_bus(sim); 3064 cpi->base_transfer_speed = 3300; 3065 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 3066 strncpy(cpi->hba_vid, "Adaptec", HBA_IDLEN); 3067 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); 3068 cpi->unit_number = cam_sim_unit(sim); 3069 cpi->ccb_h.status = CAM_REQ_CMP; 3070 xpt_done(ccb); 3071 break; 3072 } 3073 default: 3074 ccb->ccb_h.status = CAM_REQ_INVALID; 3075 xpt_done(ccb); 3076 break; 3077 } 3078} /* asr_action */ 3079 3080#ifdef ASR_MEASURE_PERFORMANCE 3081#define WRITE_OP 1 3082#define READ_OP 2 3083#define min_submitR sc->ha_performance.read_by_size_min_time[index] 3084#define max_submitR sc->ha_performance.read_by_size_max_time[index] 3085#define min_submitW sc->ha_performance.write_by_size_min_time[index] 3086#define max_submitW sc->ha_performance.write_by_size_max_time[index] 3087 3088STATIC INLINE void 3089asr_IObySize( 3090 IN Asr_softc_t * sc, 3091 IN u_int32_t submitted_time, 3092 IN int op, 3093 IN int index) 3094{ 3095 struct timeval submitted_timeval; 3096 3097 submitted_timeval.tv_sec = 0; 3098 submitted_timeval.tv_usec = submitted_time; 3099 3100 if ( op == READ_OP ) { 3101 ++sc->ha_performance.read_by_size_count[index]; 3102 3103 if ( submitted_time != 0xffffffff ) { 3104 timevaladd( 3105 &sc->ha_performance.read_by_size_total_time[index], 3106 &submitted_timeval); 3107 if ( (min_submitR == 0) 3108 || (submitted_time < min_submitR) ) { 3109 min_submitR = submitted_time; 3110 } 3111 3112 if ( submitted_time > max_submitR ) { 3113 max_submitR = submitted_time; 3114 } 3115 } 3116 } else { 3117 ++sc->ha_performance.write_by_size_count[index]; 3118 if ( submitted_time != 0xffffffff ) { 3119 timevaladd( 3120 &sc->ha_performance.write_by_size_total_time[index], 3121 &submitted_timeval); 3122 if ( (submitted_time < min_submitW) 3123 || (min_submitW == 0) ) { 3124 min_submitW = submitted_time; 3125 } 3126 3127 if ( submitted_time > max_submitW ) { 3128 max_submitW = submitted_time; 3129 } 3130 } 3131 } 3132} /* asr_IObySize */ 3133#endif 3134 3135/* 3136 * Handle processing of current CCB as pointed to by the Status. 3137 */ 3138STATIC int 3139asr_intr ( 3140 IN Asr_softc_t * sc) 3141{ 3142 OUT int processed; 3143 3144#ifdef ASR_MEASURE_PERFORMANCE 3145 struct timeval junk; 3146 3147 microtime(&junk); 3148 sc->ha_performance.intr_started = junk; 3149#endif 3150 3151 for (processed = 0; 3152 sc->ha_Virt->Status & Mask_InterruptsDisabled; 3153 processed = 1) { 3154 union asr_ccb * ccb; 3155 U32 ReplyOffset; 3156 PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME Reply; 3157 3158 if (((ReplyOffset = sc->ha_Virt->FromFIFO) == EMPTY_QUEUE) 3159 && ((ReplyOffset = sc->ha_Virt->FromFIFO) == EMPTY_QUEUE)) { 3160 break; 3161 } 3162 Reply = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)(ReplyOffset 3163 - sc->ha_Msgs_Phys + (char *)(sc->ha_Msgs)); 3164 /* 3165 * We do not need any (optional byteswapping) method access to 3166 * the Initiator context field. 3167 */ 3168 ccb = (union asr_ccb *)(long) 3169 I2O_MESSAGE_FRAME_getInitiatorContext64( 3170 &(Reply->StdReplyFrame.StdMessageFrame)); 3171 if (I2O_MESSAGE_FRAME_getMsgFlags( 3172 &(Reply->StdReplyFrame.StdMessageFrame)) 3173 & I2O_MESSAGE_FLAGS_FAIL) { 3174 defAlignLong(I2O_UTIL_NOP_MESSAGE,Message); 3175 PI2O_UTIL_NOP_MESSAGE Message_Ptr; 3176 U32 MessageOffset; 3177 3178 MessageOffset = (u_long) 3179 I2O_FAILURE_REPLY_MESSAGE_FRAME_getPreservedMFA( 3180 (PI2O_FAILURE_REPLY_MESSAGE_FRAME)Reply); 3181 /* 3182 * Get the Original Message Frame's address, and get 3183 * it's Transaction Context into our space. (Currently 3184 * unused at original authorship, but better to be 3185 * safe than sorry). Straight copy means that we 3186 * need not concern ourselves with the (optional 3187 * byteswapping) method access. 3188 */ 3189 Reply->StdReplyFrame.TransactionContext 3190 = ((PI2O_SINGLE_REPLY_MESSAGE_FRAME) 3191 (sc->ha_Virt->Address + MessageOffset)) 3192 ->TransactionContext; 3193 /* 3194 * For 64 bit machines, we need to reconstruct the 3195 * 64 bit context. 3196 */ 3197 ccb = (union asr_ccb *)(long) 3198 I2O_MESSAGE_FRAME_getInitiatorContext64( 3199 &(Reply->StdReplyFrame.StdMessageFrame)); 3200 /* 3201 * Unique error code for command failure. 3202 */ 3203 I2O_SINGLE_REPLY_MESSAGE_FRAME_setDetailedStatusCode( 3204 &(Reply->StdReplyFrame), (u_int16_t)-2); 3205 /* 3206 * Modify the message frame to contain a NOP and 3207 * re-issue it to the controller. 3208 */ 3209 Message_Ptr = (PI2O_UTIL_NOP_MESSAGE)ASR_fillMessage( 3210 Message, sizeof(I2O_UTIL_NOP_MESSAGE)); 3211# if (I2O_UTIL_NOP != 0) 3212 I2O_MESSAGE_FRAME_setFunction ( 3213 &(Message_Ptr->StdMessageFrame), 3214 I2O_UTIL_NOP); 3215# endif 3216 /* 3217 * Copy the packet out to the Original Message 3218 */ 3219 bcopy ((caddr_t)Message_Ptr, 3220 sc->ha_Virt->Address + MessageOffset, 3221 sizeof(I2O_UTIL_NOP_MESSAGE)); 3222 /* 3223 * Issue the NOP 3224 */ 3225 sc->ha_Virt->ToFIFO = MessageOffset; 3226 } 3227 3228 /* 3229 * Asynchronous command with no return requirements, 3230 * and a generic handler for immunity against odd error 3231 * returns from the adapter. 3232 */ 3233 if (ccb == (union asr_ccb *)NULL) { 3234 /* 3235 * Return Reply so that it can be used for the 3236 * next command 3237 */ 3238 sc->ha_Virt->FromFIFO = ReplyOffset; 3239 continue; 3240 } 3241 3242 /* Welease Wadjah! (and stop timeouts) */ 3243 ASR_ccbRemove (sc, ccb); 3244 3245 switch ( 3246 I2O_SINGLE_REPLY_MESSAGE_FRAME_getDetailedStatusCode( 3247 &(Reply->StdReplyFrame))) { 3248 3249 case I2O_SCSI_DSC_SUCCESS: 3250 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 3251 ccb->ccb_h.status |= CAM_REQ_CMP; 3252 break; 3253 3254 case I2O_SCSI_DSC_CHECK_CONDITION: 3255 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 3256 ccb->ccb_h.status |= CAM_REQ_CMP|CAM_AUTOSNS_VALID; 3257 break; 3258 3259 case I2O_SCSI_DSC_BUSY: 3260 /* FALLTHRU */ 3261 case I2O_SCSI_HBA_DSC_ADAPTER_BUSY: 3262 /* FALLTHRU */ 3263 case I2O_SCSI_HBA_DSC_SCSI_BUS_RESET: 3264 /* FALLTHRU */ 3265 case I2O_SCSI_HBA_DSC_BUS_BUSY: 3266 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 3267 ccb->ccb_h.status |= CAM_SCSI_BUSY; 3268 break; 3269 3270 case I2O_SCSI_HBA_DSC_SELECTION_TIMEOUT: 3271 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 3272 ccb->ccb_h.status |= CAM_SEL_TIMEOUT; 3273 break; 3274 3275 case I2O_SCSI_HBA_DSC_COMMAND_TIMEOUT: 3276 /* FALLTHRU */ 3277 case I2O_SCSI_HBA_DSC_DEVICE_NOT_PRESENT: 3278 /* FALLTHRU */ 3279 case I2O_SCSI_HBA_DSC_LUN_INVALID: 3280 /* FALLTHRU */ 3281 case I2O_SCSI_HBA_DSC_SCSI_TID_INVALID: 3282 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 3283 ccb->ccb_h.status |= CAM_CMD_TIMEOUT; 3284 break; 3285 3286 case I2O_SCSI_HBA_DSC_DATA_OVERRUN: 3287 /* FALLTHRU */ 3288 case I2O_SCSI_HBA_DSC_REQUEST_LENGTH_ERROR: 3289 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 3290 ccb->ccb_h.status |= CAM_DATA_RUN_ERR; 3291 break; 3292 3293 default: 3294 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 3295 ccb->ccb_h.status |= CAM_REQUEUE_REQ; 3296 break; 3297 } 3298 if ((ccb->csio.resid = ccb->csio.dxfer_len) != 0) { 3299 ccb->csio.resid -= 3300 I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME_getTransferCount( 3301 Reply); 3302 } 3303 3304#ifdef ASR_MEASURE_PERFORMANCE 3305 { 3306 struct timeval endTime; 3307 u_int32_t submitted_time; 3308 u_int32_t size; 3309 int op_type; 3310 int startTimeIndex; 3311 3312 --sc->ha_submitted_ccbs_count; 3313 startTimeIndex 3314 = (int)Reply->StdReplyFrame.TransactionContext; 3315 if (-1 != startTimeIndex) { 3316 /* Compute the time spent in device/adapter */ 3317 microtime(&endTime); 3318 submitted_time = asr_time_delta(sc->ha_timeQ[ 3319 startTimeIndex], endTime); 3320 /* put the startTimeIndex back on free list */ 3321 ENQ_TIMEQ_FREE_LIST(startTimeIndex, 3322 sc->ha_timeQFreeList, 3323 sc->ha_timeQFreeHead, 3324 sc->ha_timeQFreeTail); 3325 } else { 3326 submitted_time = 0xffffffff; 3327 } 3328 3329#define maxctime sc->ha_performance.max_command_time[ccb->csio.cdb_io.cdb_bytes[0]] 3330#define minctime sc->ha_performance.min_command_time[ccb->csio.cdb_io.cdb_bytes[0]] 3331 if (submitted_time != 0xffffffff) { 3332 if ( maxctime < submitted_time ) { 3333 maxctime = submitted_time; 3334 } 3335 if ( (minctime == 0) 3336 || (minctime > submitted_time) ) { 3337 minctime = submitted_time; 3338 } 3339 3340 if ( sc->ha_performance.max_submit_time 3341 < submitted_time ) { 3342 sc->ha_performance.max_submit_time 3343 = submitted_time; 3344 } 3345 if ( sc->ha_performance.min_submit_time == 0 3346 || sc->ha_performance.min_submit_time 3347 > submitted_time) { 3348 sc->ha_performance.min_submit_time 3349 = submitted_time; 3350 } 3351 3352 switch ( ccb->csio.cdb_io.cdb_bytes[0] ) { 3353 3354 case 0xa8: /* 12-byte READ */ 3355 /* FALLTHRU */ 3356 case 0x08: /* 6-byte READ */ 3357 /* FALLTHRU */ 3358 case 0x28: /* 10-byte READ */ 3359 op_type = READ_OP; 3360 break; 3361 3362 case 0x0a: /* 6-byte WRITE */ 3363 /* FALLTHRU */ 3364 case 0xaa: /* 12-byte WRITE */ 3365 /* FALLTHRU */ 3366 case 0x2a: /* 10-byte WRITE */ 3367 op_type = WRITE_OP; 3368 break; 3369 3370 default: 3371 op_type = 0; 3372 break; 3373 } 3374 3375 if ( op_type != 0 ) { 3376 struct scsi_rw_big * cmd; 3377 3378 cmd = (struct scsi_rw_big *) 3379 &(ccb->csio.cdb_io); 3380 3381 size = (((u_int32_t) cmd->length2 << 8) 3382 | ((u_int32_t) cmd->length1)) << 9; 3383 3384 switch ( size ) { 3385 3386 case 512: 3387 asr_IObySize(sc, 3388 submitted_time, op_type, 3389 SIZE_512); 3390 break; 3391 3392 case 1024: 3393 asr_IObySize(sc, 3394 submitted_time, op_type, 3395 SIZE_1K); 3396 break; 3397 3398 case 2048: 3399 asr_IObySize(sc, 3400 submitted_time, op_type, 3401 SIZE_2K); 3402 break; 3403 3404 case 4096: 3405 asr_IObySize(sc, 3406 submitted_time, op_type, 3407 SIZE_4K); 3408 break; 3409 3410 case 8192: 3411 asr_IObySize(sc, 3412 submitted_time, op_type, 3413 SIZE_8K); 3414 break; 3415 3416 case 16384: 3417 asr_IObySize(sc, 3418 submitted_time, op_type, 3419 SIZE_16K); 3420 break; 3421 3422 case 32768: 3423 asr_IObySize(sc, 3424 submitted_time, op_type, 3425 SIZE_32K); 3426 break; 3427 3428 case 65536: 3429 asr_IObySize(sc, 3430 submitted_time, op_type, 3431 SIZE_64K); 3432 break; 3433 3434 default: 3435 if ( size > (1 << 16) ) { 3436 asr_IObySize(sc, 3437 submitted_time, 3438 op_type, 3439 SIZE_BIGGER); 3440 } else { 3441 asr_IObySize(sc, 3442 submitted_time, 3443 op_type, 3444 SIZE_OTHER); 3445 } 3446 break; 3447 } 3448 } 3449 } 3450 } 3451#endif 3452 /* Sense data in reply packet */ 3453 if (ccb->ccb_h.status & CAM_AUTOSNS_VALID) { 3454 u_int16_t size = I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME_getAutoSenseTransferCount(Reply); 3455 3456 if (size) { 3457 if (size > sizeof(ccb->csio.sense_data)) { 3458 size = sizeof(ccb->csio.sense_data); 3459 } 3460 if (size > I2O_SCSI_SENSE_DATA_SZ) { 3461 size = I2O_SCSI_SENSE_DATA_SZ; 3462 } 3463 if ((ccb->csio.sense_len) 3464 && (size > ccb->csio.sense_len)) { 3465 size = ccb->csio.sense_len; 3466 } 3467 bcopy ((caddr_t)Reply->SenseData, 3468 (caddr_t)&(ccb->csio.sense_data), size); 3469 } 3470 } 3471 3472 /* 3473 * Return Reply so that it can be used for the next command 3474 * since we have no more need for it now 3475 */ 3476 sc->ha_Virt->FromFIFO = ReplyOffset; 3477 3478 if (ccb->ccb_h.path) { 3479 xpt_done ((union ccb *)ccb); 3480 } else { 3481 wakeup ((caddr_t)ccb); 3482 } 3483 } 3484#ifdef ASR_MEASURE_PERFORMANCE 3485 { 3486 u_int32_t result; 3487 3488 microtime(&junk); 3489 result = asr_time_delta(sc->ha_performance.intr_started, junk); 3490 3491 if (result != 0xffffffff) { 3492 if ( sc->ha_performance.max_intr_time < result ) { 3493 sc->ha_performance.max_intr_time = result; 3494 } 3495 3496 if ( (sc->ha_performance.min_intr_time == 0) 3497 || (sc->ha_performance.min_intr_time > result) ) { 3498 sc->ha_performance.min_intr_time = result; 3499 } 3500 } 3501 } 3502#endif 3503 return (processed); 3504} /* asr_intr */ 3505 3506#undef QueueSize /* Grrrr */ 3507#undef SG_Size /* Grrrr */ 3508 3509/* 3510 * Meant to be included at the bottom of asr.c !!! 3511 */ 3512 3513/* 3514 * Included here as hard coded. Done because other necessary include 3515 * files utilize C++ comment structures which make them a nuisance to 3516 * included here just to pick up these three typedefs. 3517 */ 3518typedef U32 DPT_TAG_T; 3519typedef U32 DPT_MSG_T; 3520typedef U32 DPT_RTN_T; 3521 3522#undef SCSI_RESET /* Conflicts with "scsi/scsiconf.h" defintion */ 3523#include "dev/asr/osd_unix.h" 3524 3525#define asr_unit(dev) minor(dev) 3526 3527STATIC INLINE Asr_softc_t * 3528ASR_get_sc ( 3529 IN dev_t dev) 3530{ 3531 int unit = asr_unit(dev); 3532 OUT Asr_softc_t * sc = Asr_softc; 3533 3534 while (sc && sc->ha_sim[0] && (cam_sim_unit(sc->ha_sim[0]) != unit)) { 3535 sc = sc->ha_next; 3536 } 3537 return (sc); 3538} /* ASR_get_sc */ 3539 3540STATIC u_int8_t ASR_ctlr_held; 3541 3542STATIC int 3543asr_open( 3544 IN dev_t dev, 3545 int32_t flags, 3546 int32_t ifmt, 3547 IN struct proc * proc) 3548{ 3549 int s; 3550 OUT int error; 3551 UNREFERENCED_PARAMETER(flags); 3552 UNREFERENCED_PARAMETER(ifmt); 3553 3554 if (ASR_get_sc (dev) == (Asr_softc_t *)NULL) { 3555 return (ENODEV); 3556 } 3557 s = splcam (); 3558 if (ASR_ctlr_held) { 3559 error = EBUSY; 3560 } else if ((error = suser(proc)) == 0) { 3561 ++ASR_ctlr_held; 3562 } 3563 splx(s); 3564 return (error); 3565} /* asr_open */ 3566 3567STATIC int 3568asr_close( 3569 dev_t dev, 3570 int flags, 3571 int ifmt, 3572 struct proc * proc) 3573{ 3574 UNREFERENCED_PARAMETER(dev); 3575 UNREFERENCED_PARAMETER(flags); 3576 UNREFERENCED_PARAMETER(ifmt); 3577 UNREFERENCED_PARAMETER(proc); 3578 3579 ASR_ctlr_held = 0; 3580 return (0); 3581} /* asr_close */ 3582 3583 3584/*-------------------------------------------------------------------------*/ 3585/* Function ASR_queue_i */ 3586/*-------------------------------------------------------------------------*/ 3587/* The Parameters Passed To This Function Are : */ 3588/* Asr_softc_t * : HBA miniport driver's adapter data storage. */ 3589/* PI2O_MESSAGE_FRAME : Msg Structure Pointer For This Command */ 3590/* I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME following the Msg Structure */ 3591/* */ 3592/* This Function Will Take The User Request Packet And Convert It To An */ 3593/* I2O MSG And Send It Off To The Adapter. */ 3594/* */ 3595/* Return : 0 For OK, Error Code Otherwise */ 3596/*-------------------------------------------------------------------------*/ 3597STATIC INLINE int 3598ASR_queue_i( 3599 IN Asr_softc_t * sc, 3600 INOUT PI2O_MESSAGE_FRAME Packet) 3601{ 3602 union asr_ccb * ccb; 3603 PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME Reply; 3604 PI2O_MESSAGE_FRAME Message_Ptr; 3605 PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME Reply_Ptr; 3606 int MessageSizeInBytes; 3607 int ReplySizeInBytes; 3608 int error; 3609 int s; 3610 /* Scatter Gather buffer list */ 3611 struct ioctlSgList_S { 3612 SLIST_ENTRY(ioctlSgList_S) link; 3613 caddr_t UserSpace; 3614 I2O_FLAGS_COUNT FlagsCount; 3615 char KernelSpace[sizeof(long)]; 3616 } * elm; 3617 /* Generates a `first' entry */ 3618 SLIST_HEAD(ioctlSgListHead_S, ioctlSgList_S) sgList; 3619 3620 if (ASR_getBlinkLedCode(sc)) { 3621 debug_usr_cmd_printf ("Adapter currently in BlinkLed %x\n", 3622 ASR_getBlinkLedCode(sc)); 3623 return (EIO); 3624 } 3625 /* Copy in the message into a local allocation */ 3626 if ((Message_Ptr = (PI2O_MESSAGE_FRAME)malloc ( 3627 sizeof(I2O_MESSAGE_FRAME), M_TEMP, M_WAITOK)) 3628 == (PI2O_MESSAGE_FRAME)NULL) { 3629 debug_usr_cmd_printf ( 3630 "Failed to acquire I2O_MESSAGE_FRAME memory\n"); 3631 return (ENOMEM); 3632 } 3633 if ((error = copyin ((caddr_t)Packet, (caddr_t)Message_Ptr, 3634 sizeof(I2O_MESSAGE_FRAME))) != 0) { 3635 free (Message_Ptr, M_TEMP); 3636 debug_usr_cmd_printf ("Can't copy in packet errno=%d\n", error); 3637 return (error); 3638 } 3639 /* Acquire information to determine type of packet */ 3640 MessageSizeInBytes = (I2O_MESSAGE_FRAME_getMessageSize(Message_Ptr)<<2); 3641 /* The offset of the reply information within the user packet */ 3642 Reply = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)((char *)Packet 3643 + MessageSizeInBytes); 3644 3645 /* Check if the message is a synchronous initialization command */ 3646 s = I2O_MESSAGE_FRAME_getFunction(Message_Ptr); 3647 free (Message_Ptr, M_TEMP); 3648 switch (s) { 3649 3650 case I2O_EXEC_IOP_RESET: 3651 { U32 status; 3652 3653 status = ASR_resetIOP(sc->ha_Virt); 3654 ReplySizeInBytes = sizeof(status); 3655 debug_usr_cmd_printf ("resetIOP done\n"); 3656 return (copyout ((caddr_t)&status, (caddr_t)Reply, 3657 ReplySizeInBytes)); 3658 } 3659 3660 case I2O_EXEC_STATUS_GET: 3661 { I2O_EXEC_STATUS_GET_REPLY status; 3662 3663 if (ASR_getStatus (sc->ha_Virt, &status) 3664 == (PI2O_EXEC_STATUS_GET_REPLY)NULL) { 3665 debug_usr_cmd_printf ("getStatus failed\n"); 3666 return (ENXIO); 3667 } 3668 ReplySizeInBytes = sizeof(status); 3669 debug_usr_cmd_printf ("getStatus done\n"); 3670 return (copyout ((caddr_t)&status, (caddr_t)Reply, 3671 ReplySizeInBytes)); 3672 } 3673 3674 case I2O_EXEC_OUTBOUND_INIT: 3675 { U32 status; 3676 3677 status = ASR_initOutBound(sc); 3678 ReplySizeInBytes = sizeof(status); 3679 debug_usr_cmd_printf ("intOutBound done\n"); 3680 return (copyout ((caddr_t)&status, (caddr_t)Reply, 3681 ReplySizeInBytes)); 3682 } 3683 } 3684 3685 /* Determine if the message size is valid */ 3686 if ((MessageSizeInBytes < sizeof(I2O_MESSAGE_FRAME)) 3687 || (MAX_INBOUND_SIZE < MessageSizeInBytes)) { 3688 debug_usr_cmd_printf ("Packet size %d incorrect\n", 3689 MessageSizeInBytes); 3690 return (EINVAL); 3691 } 3692 3693 if ((Message_Ptr = (PI2O_MESSAGE_FRAME)malloc (MessageSizeInBytes, 3694 M_TEMP, M_WAITOK)) == (PI2O_MESSAGE_FRAME)NULL) { 3695 debug_usr_cmd_printf ("Failed to acquire frame[%d] memory\n", 3696 MessageSizeInBytes); 3697 return (ENOMEM); 3698 } 3699 if ((error = copyin ((caddr_t)Packet, (caddr_t)Message_Ptr, 3700 MessageSizeInBytes)) != 0) { 3701 free (Message_Ptr, M_TEMP); 3702 debug_usr_cmd_printf ("Can't copy in packet[%d] errno=%d\n", 3703 MessageSizeInBytes, error); 3704 return (error); 3705 } 3706 3707 /* Check the size of the reply frame, and start constructing */ 3708 3709 if ((Reply_Ptr = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)malloc ( 3710 sizeof(I2O_MESSAGE_FRAME), M_TEMP, M_WAITOK)) 3711 == (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)NULL) { 3712 free (Message_Ptr, M_TEMP); 3713 debug_usr_cmd_printf ( 3714 "Failed to acquire I2O_MESSAGE_FRAME memory\n"); 3715 return (ENOMEM); 3716 } 3717 if ((error = copyin ((caddr_t)Reply, (caddr_t)Reply_Ptr, 3718 sizeof(I2O_MESSAGE_FRAME))) != 0) { 3719 free (Reply_Ptr, M_TEMP); 3720 free (Message_Ptr, M_TEMP); 3721 debug_usr_cmd_printf ( 3722 "Failed to copy in reply frame, errno=%d\n", 3723 error); 3724 return (error); 3725 } 3726 ReplySizeInBytes = (I2O_MESSAGE_FRAME_getMessageSize( 3727 &(Reply_Ptr->StdReplyFrame.StdMessageFrame)) << 2); 3728 free (Reply_Ptr, M_TEMP); 3729 if (ReplySizeInBytes < sizeof(I2O_SINGLE_REPLY_MESSAGE_FRAME)) { 3730 free (Message_Ptr, M_TEMP); 3731 debug_usr_cmd_printf ( 3732 "Failed to copy in reply frame[%d], errno=%d\n", 3733 ReplySizeInBytes, error); 3734 return (EINVAL); 3735 } 3736 3737 if ((Reply_Ptr = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)malloc ( 3738 ((ReplySizeInBytes > sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)) 3739 ? ReplySizeInBytes 3740 : sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)), 3741 M_TEMP, M_WAITOK)) == (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)NULL) { 3742 free (Message_Ptr, M_TEMP); 3743 debug_usr_cmd_printf ("Failed to acquire frame[%d] memory\n", 3744 ReplySizeInBytes); 3745 return (ENOMEM); 3746 } 3747 (void)ASR_fillMessage ((char *)Reply_Ptr, ReplySizeInBytes); 3748 Reply_Ptr->StdReplyFrame.StdMessageFrame.InitiatorContext 3749 = Message_Ptr->InitiatorContext; 3750 Reply_Ptr->StdReplyFrame.TransactionContext 3751 = ((PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr)->TransactionContext; 3752 I2O_MESSAGE_FRAME_setMsgFlags( 3753 &(Reply_Ptr->StdReplyFrame.StdMessageFrame), 3754 I2O_MESSAGE_FRAME_getMsgFlags( 3755 &(Reply_Ptr->StdReplyFrame.StdMessageFrame)) 3756 | I2O_MESSAGE_FLAGS_REPLY); 3757 3758 /* Check if the message is a special case command */ 3759 switch (I2O_MESSAGE_FRAME_getFunction(Message_Ptr)) { 3760 case I2O_EXEC_SYS_TAB_SET: /* Special Case of empty Scatter Gather */ 3761 if (MessageSizeInBytes == ((I2O_MESSAGE_FRAME_getVersionOffset( 3762 Message_Ptr) & 0xF0) >> 2)) { 3763 free (Message_Ptr, M_TEMP); 3764 I2O_SINGLE_REPLY_MESSAGE_FRAME_setDetailedStatusCode( 3765 &(Reply_Ptr->StdReplyFrame), (ASR_setSysTab(sc) != CAM_REQ_CMP)); 3766 I2O_MESSAGE_FRAME_setMessageSize( 3767 &(Reply_Ptr->StdReplyFrame.StdMessageFrame), 3768 sizeof(I2O_SINGLE_REPLY_MESSAGE_FRAME)); 3769 error = copyout ((caddr_t)Reply_Ptr, (caddr_t)Reply, 3770 ReplySizeInBytes); 3771 free (Reply_Ptr, M_TEMP); 3772 return (error); 3773 } 3774 } 3775 3776 /* Deal in the general case */ 3777 /* First allocate and optionally copy in each scatter gather element */ 3778 SLIST_INIT(&sgList); 3779 if ((I2O_MESSAGE_FRAME_getVersionOffset(Message_Ptr) & 0xF0) != 0) { 3780 PI2O_SGE_SIMPLE_ELEMENT sg; 3781 3782 /* 3783 * since this code is reused in several systems, code 3784 * efficiency is greater by using a shift operation rather 3785 * than a divide by sizeof(u_int32_t). 3786 */ 3787 sg = (PI2O_SGE_SIMPLE_ELEMENT)((char *)Message_Ptr 3788 + ((I2O_MESSAGE_FRAME_getVersionOffset(Message_Ptr) & 0xF0) 3789 >> 2)); 3790 while (sg < (PI2O_SGE_SIMPLE_ELEMENT)(((caddr_t)Message_Ptr) 3791 + MessageSizeInBytes)) { 3792 caddr_t v; 3793 int len; 3794 3795 if ((I2O_FLAGS_COUNT_getFlags(&(sg->FlagsCount)) 3796 & I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT) == 0) { 3797 error = EINVAL; 3798 break; 3799 } 3800 len = I2O_FLAGS_COUNT_getCount(&(sg->FlagsCount)); 3801 debug_usr_cmd_printf ("SG[%d] = %x[%d]\n", 3802 sg - (PI2O_SGE_SIMPLE_ELEMENT)((char *)Message_Ptr 3803 + ((I2O_MESSAGE_FRAME_getVersionOffset( 3804 Message_Ptr) & 0xF0) >> 2)), 3805 I2O_SGE_SIMPLE_ELEMENT_getPhysicalAddress(sg), len); 3806 3807 if ((elm = (struct ioctlSgList_S *)malloc ( 3808 sizeof(*elm) - sizeof(elm->KernelSpace) + len, 3809 M_TEMP, M_WAITOK)) 3810 == (struct ioctlSgList_S *)NULL) { 3811 debug_usr_cmd_printf ( 3812 "Failed to allocate SG[%d]\n", len); 3813 error = ENOMEM; 3814 break; 3815 } 3816 SLIST_INSERT_HEAD(&sgList, elm, link); 3817 elm->FlagsCount = sg->FlagsCount; 3818 elm->UserSpace = (caddr_t) 3819 (I2O_SGE_SIMPLE_ELEMENT_getPhysicalAddress(sg)); 3820 v = elm->KernelSpace; 3821 /* Copy in outgoing data (DIR bit could be invalid) */ 3822 if ((error = copyin (elm->UserSpace, (caddr_t)v, len)) 3823 != 0) { 3824 break; 3825 } 3826 /* 3827 * If the buffer is not contiguous, lets 3828 * break up the scatter/gather entries. 3829 */ 3830 while ((len > 0) 3831 && (sg < (PI2O_SGE_SIMPLE_ELEMENT) 3832 (((caddr_t)Message_Ptr) + MAX_INBOUND_SIZE))) { 3833 int next, base, span; 3834 3835 span = 0; 3836 next = base = KVTOPHYS(v); 3837 I2O_SGE_SIMPLE_ELEMENT_setPhysicalAddress(sg, 3838 base); 3839 3840 /* How far can we go physically contiguously */ 3841 while ((len > 0) && (base == next)) { 3842 int size; 3843 3844 next = trunc_page(base) + PAGE_SIZE; 3845 size = next - base; 3846 if (size > len) { 3847 size = len; 3848 } 3849 span += size; 3850 v += size; 3851 len -= size; 3852 base = KVTOPHYS(v); 3853 } 3854 3855 /* Construct the Flags */ 3856 I2O_FLAGS_COUNT_setCount(&(sg->FlagsCount), 3857 span); 3858 { 3859 int flags = I2O_FLAGS_COUNT_getFlags( 3860 &(elm->FlagsCount)); 3861 /* Any remaining length? */ 3862 if (len > 0) { 3863 flags &= 3864 ~(I2O_SGL_FLAGS_END_OF_BUFFER 3865 | I2O_SGL_FLAGS_LAST_ELEMENT); 3866 } 3867 I2O_FLAGS_COUNT_setFlags( 3868 &(sg->FlagsCount), flags); 3869 } 3870 3871 debug_usr_cmd_printf ("sg[%d] = %x[%d]\n", 3872 sg - (PI2O_SGE_SIMPLE_ELEMENT) 3873 ((char *)Message_Ptr 3874 + ((I2O_MESSAGE_FRAME_getVersionOffset( 3875 Message_Ptr) & 0xF0) >> 2)), 3876 I2O_SGE_SIMPLE_ELEMENT_getPhysicalAddress(sg), 3877 span); 3878 if (len <= 0) { 3879 break; 3880 } 3881 3882 /* 3883 * Incrementing requires resizing of the 3884 * packet, and moving up the existing SG 3885 * elements. 3886 */ 3887 ++sg; 3888 MessageSizeInBytes += sizeof(*sg); 3889 I2O_MESSAGE_FRAME_setMessageSize(Message_Ptr, 3890 I2O_MESSAGE_FRAME_getMessageSize(Message_Ptr) 3891 + (sizeof(*sg) / sizeof(U32))); 3892 { 3893 PI2O_MESSAGE_FRAME NewMessage_Ptr; 3894 3895 if ((NewMessage_Ptr 3896 = (PI2O_MESSAGE_FRAME) 3897 malloc (MessageSizeInBytes, 3898 M_TEMP, M_WAITOK)) 3899 == (PI2O_MESSAGE_FRAME)NULL) { 3900 debug_usr_cmd_printf ( 3901 "Failed to acquire frame[%d] memory\n", 3902 MessageSizeInBytes); 3903 error = ENOMEM; 3904 break; 3905 } 3906 span = ((caddr_t)sg) 3907 - (caddr_t)Message_Ptr; 3908 bcopy ((caddr_t)Message_Ptr, 3909 (caddr_t)NewMessage_Ptr, span); 3910 bcopy ((caddr_t)(sg-1), 3911 ((caddr_t)NewMessage_Ptr) + span, 3912 MessageSizeInBytes - span); 3913 free (Message_Ptr, M_TEMP); 3914 sg = (PI2O_SGE_SIMPLE_ELEMENT) 3915 (((caddr_t)NewMessage_Ptr) + span); 3916 Message_Ptr = NewMessage_Ptr; 3917 } 3918 } 3919 if ((error) 3920 || ((I2O_FLAGS_COUNT_getFlags(&(sg->FlagsCount)) 3921 & I2O_SGL_FLAGS_LAST_ELEMENT) != 0)) { 3922 break; 3923 } 3924 ++sg; 3925 } 3926 if (error) { 3927 while ((elm = SLIST_FIRST(&sgList)) 3928 != (struct ioctlSgList_S *)NULL) { 3929 SLIST_REMOVE_HEAD(&sgList,link); 3930 free (elm, M_TEMP); 3931 } 3932 free (Reply_Ptr, M_TEMP); 3933 free (Message_Ptr, M_TEMP); 3934 return (error); 3935 } 3936 } 3937 3938 debug_usr_cmd_printf ("Inbound: "); 3939 debug_usr_cmd_dump_message(Message_Ptr); 3940 3941 /* Send the command */ 3942 if ((ccb = asr_alloc_ccb (sc)) == (union asr_ccb *)NULL) { 3943 /* Free up in-kernel buffers */ 3944 while ((elm = SLIST_FIRST(&sgList)) 3945 != (struct ioctlSgList_S *)NULL) { 3946 SLIST_REMOVE_HEAD(&sgList,link); 3947 free (elm, M_TEMP); 3948 } 3949 free (Reply_Ptr, M_TEMP); 3950 free (Message_Ptr, M_TEMP); 3951 return (ENOMEM); 3952 } 3953 3954 /* 3955 * We do not need any (optional byteswapping) method access to 3956 * the Initiator context field. 3957 */ 3958 I2O_MESSAGE_FRAME_setInitiatorContext64( 3959 (PI2O_MESSAGE_FRAME)Message_Ptr, (long)ccb); 3960 3961 (void)ASR_queue (sc, (PI2O_MESSAGE_FRAME)Message_Ptr); 3962 3963 free (Message_Ptr, M_TEMP); 3964 3965 /* 3966 * Wait for the board to report a finished instruction. 3967 */ 3968 s = splcam(); 3969 while (ccb->ccb_h.status == CAM_REQ_INPROG) { 3970 if (ASR_getBlinkLedCode(sc)) { 3971 /* Reset Adapter */ 3972 printf ("asr%d: Blink LED 0x%x resetting adapter\n", 3973 cam_sim_unit(xpt_path_sim(ccb->ccb_h.path)), 3974 ASR_getBlinkLedCode(sc)); 3975 ASR_reset (sc); 3976 splx(s); 3977 /* Command Cleanup */ 3978 ASR_ccbRemove(sc, ccb); 3979 /* Free up in-kernel buffers */ 3980 while ((elm = SLIST_FIRST(&sgList)) 3981 != (struct ioctlSgList_S *)NULL) { 3982 SLIST_REMOVE_HEAD(&sgList,link); 3983 free (elm, M_TEMP); 3984 } 3985 free (Reply_Ptr, M_TEMP); 3986 asr_free_ccb(ccb); 3987 return (EIO); 3988 } 3989 /* Check every second for BlinkLed */ 3990 /* There is no PRICAM, but outwardly PRIBIO is functional */ 3991 tsleep((caddr_t)ccb, PRIBIO, "asr", hz); 3992 } 3993 splx(s); 3994 3995 debug_usr_cmd_printf ("Outbound: "); 3996 debug_usr_cmd_dump_message(Reply_Ptr); 3997 3998 I2O_SINGLE_REPLY_MESSAGE_FRAME_setDetailedStatusCode( 3999 &(Reply_Ptr->StdReplyFrame), (ccb->ccb_h.status != CAM_REQ_CMP)); 4000 4001 if (ReplySizeInBytes >= (sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME) 4002 - I2O_SCSI_SENSE_DATA_SZ - sizeof(U32))) { 4003 I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME_setTransferCount(Reply_Ptr, 4004 ccb->csio.dxfer_len - ccb->csio.resid); 4005 } 4006 if ((ccb->ccb_h.status & CAM_AUTOSNS_VALID) && (ReplySizeInBytes 4007 > (sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME) 4008 - I2O_SCSI_SENSE_DATA_SZ))) { 4009 int size = ReplySizeInBytes 4010 - sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME) 4011 - I2O_SCSI_SENSE_DATA_SZ; 4012 4013 if (size > sizeof(ccb->csio.sense_data)) { 4014 size = sizeof(ccb->csio.sense_data); 4015 } 4016 bcopy ((caddr_t)&(ccb->csio.sense_data), (caddr_t)Reply_Ptr->SenseData, 4017 size); 4018 I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME_setAutoSenseTransferCount( 4019 Reply_Ptr, size); 4020 } 4021 4022 /* Free up in-kernel buffers */ 4023 while ((elm = SLIST_FIRST(&sgList)) != (struct ioctlSgList_S *)NULL) { 4024 /* Copy out as necessary */ 4025 if ((error == 0) 4026 /* DIR bit considered `valid', error due to ignorance works */ 4027 && ((I2O_FLAGS_COUNT_getFlags(&(elm->FlagsCount)) 4028 & I2O_SGL_FLAGS_DIR) == 0)) { 4029 error = copyout ((caddr_t)(elm->KernelSpace), 4030 elm->UserSpace, 4031 I2O_FLAGS_COUNT_getCount(&(elm->FlagsCount))); 4032 } 4033 SLIST_REMOVE_HEAD(&sgList,link); 4034 free (elm, M_TEMP); 4035 } 4036 if (error == 0) { 4037 /* Copy reply frame to user space */ 4038 error = copyout ((caddr_t)Reply_Ptr, (caddr_t)Reply, 4039 ReplySizeInBytes); 4040 } 4041 free (Reply_Ptr, M_TEMP); 4042 asr_free_ccb(ccb); 4043 4044 return (error); 4045} /* ASR_queue_i */ 4046 4047/*----------------------------------------------------------------------*/ 4048/* Function asr_ioctl */ 4049/*----------------------------------------------------------------------*/ 4050/* The parameters passed to this function are : */ 4051/* dev : Device number. */ 4052/* cmd : Ioctl Command */ 4053/* data : User Argument Passed In. */ 4054/* flag : Mode Parameter */ 4055/* proc : Process Parameter */ 4056/* */ 4057/* This function is the user interface into this adapter driver */ 4058/* */ 4059/* Return : zero if OK, error code if not */ 4060/*----------------------------------------------------------------------*/ 4061 4062STATIC int 4063asr_ioctl( 4064 IN dev_t dev, 4065 IN u_long cmd, 4066 INOUT caddr_t data, 4067 int flag, 4068 struct proc * proc) 4069{ 4070 int i, j; 4071 OUT int error = 0; 4072 Asr_softc_t * sc = ASR_get_sc (dev); 4073 UNREFERENCED_PARAMETER(flag); 4074 UNREFERENCED_PARAMETER(proc); 4075 4076 if (sc != (Asr_softc_t *)NULL) 4077 switch(cmd) { 4078 4079 case DPT_SIGNATURE: 4080# if (dsDescription_size != 50) 4081 case DPT_SIGNATURE + ((50 - dsDescription_size) << 16): 4082# endif 4083 if (cmd & 0xFFFF0000) { 4084 (void)bcopy ((caddr_t)(&ASR_sig), data, 4085 sizeof(dpt_sig_S)); 4086 return (0); 4087 } 4088 /* Traditional version of the ioctl interface */ 4089 case DPT_SIGNATURE & 0x0000FFFF: 4090 return (copyout ((caddr_t)(&ASR_sig), *((caddr_t *)data), 4091 sizeof(dpt_sig_S))); 4092 4093 /* Traditional version of the ioctl interface */ 4094 case DPT_CTRLINFO & 0x0000FFFF: 4095 case DPT_CTRLINFO: { 4096 struct { 4097 u_int16_t length; 4098 u_int16_t drvrHBAnum; 4099 u_int32_t baseAddr; 4100 u_int16_t blinkState; 4101 u_int8_t pciBusNum; 4102 u_int8_t pciDeviceNum; 4103 u_int16_t hbaFlags; 4104 u_int16_t Interrupt; 4105 u_int32_t reserved1; 4106 u_int32_t reserved2; 4107 u_int32_t reserved3; 4108 } CtlrInfo; 4109 4110 bzero (&CtlrInfo, sizeof(CtlrInfo)); 4111 CtlrInfo.length = sizeof(CtlrInfo) - sizeof(u_int16_t); 4112 CtlrInfo.drvrHBAnum = asr_unit(dev); 4113 CtlrInfo.baseAddr = (u_long)sc->ha_Base; 4114 i = ASR_getBlinkLedCode (sc); 4115 if (i == -1) { 4116 i = 0; 4117 } 4118 CtlrInfo.blinkState = i; 4119 CtlrInfo.pciBusNum = sc->ha_pciBusNum; 4120 CtlrInfo.pciDeviceNum = sc->ha_pciDeviceNum; 4121#define FLG_OSD_PCI_VALID 0x0001 4122#define FLG_OSD_DMA 0x0002 4123#define FLG_OSD_I2O 0x0004 4124 CtlrInfo.hbaFlags = FLG_OSD_PCI_VALID | FLG_OSD_DMA | FLG_OSD_I2O; 4125 CtlrInfo.Interrupt = sc->ha_irq; 4126 if (cmd & 0xFFFF0000) { 4127 bcopy (&CtlrInfo, data, sizeof(CtlrInfo)); 4128 } else { 4129 error = copyout (&CtlrInfo, *(caddr_t *)data, sizeof(CtlrInfo)); 4130 } 4131 } return (error); 4132 4133 /* Traditional version of the ioctl interface */ 4134 case DPT_SYSINFO & 0x0000FFFF: 4135 case DPT_SYSINFO: { 4136 sysInfo_S Info; 4137 caddr_t c_addr; 4138 /* Kernel Specific ptok `hack' */ 4139# define ptok(a) ((char *)(a) + KERNBASE) 4140 4141 bzero (&Info, sizeof(Info)); 4142 4143 outb (0x70, 0x12); 4144 i = inb(0x71); 4145 j = i >> 4; 4146 if (i == 0x0f) { 4147 outb (0x70, 0x19); 4148 j = inb (0x71); 4149 } 4150 Info.drive0CMOS = j; 4151 4152 j = i & 0x0f; 4153 if (i == 0x0f) { 4154 outb (0x70, 0x1a); 4155 j = inb (0x71); 4156 } 4157 Info.drive1CMOS = j; 4158 4159 Info.numDrives = *((char *)ptok(0x475)); 4160 4161 Info.processorFamily = ASR_sig.dsProcessorFamily; 4162 switch (cpu) { 4163 case CPU_386SX: case CPU_386: 4164 Info.processorType = PROC_386; break; 4165 case CPU_486SX: case CPU_486: 4166 Info.processorType = PROC_486; break; 4167 case CPU_586: 4168 Info.processorType = PROC_PENTIUM; break; 4169 case CPU_686: 4170 Info.processorType = PROC_SEXIUM; break; 4171 } 4172 Info.osType = OS_BSDI_UNIX; 4173 Info.osMajorVersion = osrelease[0] - '0'; 4174 Info.osMinorVersion = osrelease[2] - '0'; 4175 /* Info.osRevision = 0; */ 4176 /* Info.osSubRevision = 0; */ 4177 Info.busType = SI_PCI_BUS; 4178 Info.flags = SI_CMOS_Valid | SI_NumDrivesValid 4179 | SI_OSversionValid |SI_BusTypeValid; 4180 4181 /* Go Out And Look For SmartROM */ 4182 for(i = 0; i < 3; ++i) { 4183 int k; 4184 4185 if (i == 0) { 4186 j = 0xC8000; 4187 } else if (i == 1) { 4188 j = 0xD8000; 4189 } else { 4190 j = 0xDC000; 4191 } 4192 c_addr = ptok(j); 4193 if (*((unsigned short *)c_addr) != 0xAA55) { 4194 continue; 4195 } 4196 if (*((u_long *)(c_addr + 6)) != 0x202053) { 4197 continue; 4198 } 4199 if (*((u_long *)(c_addr + 10)) != 0x545044) { 4200 continue; 4201 } 4202 c_addr += 0x24; 4203 for (k = 0; k < 64; ++k) { 4204 if ((*((unsigned char *)(c_addr++)) == ' ') 4205 && (*((unsigned char *)(c_addr)) == 'v')) { 4206 break; 4207 } 4208 } 4209 if (k < 64) { 4210 Info.smartROMMajorVersion 4211 = *((unsigned char *)(c_addr += 3)) - '0'; 4212 Info.smartROMMinorVersion 4213 = *((unsigned char *)(c_addr += 2)); 4214 Info.smartROMRevision 4215 = *((unsigned char *)(++c_addr)); 4216 Info.flags |= SI_SmartROMverValid; 4217 break; 4218 } 4219 } 4220 if (i >= 3) { 4221 Info.flags |= SI_NO_SmartROM; 4222 } 4223 /* Get The Conventional Memory Size From CMOS */ 4224 outb (0x70, 0x16); 4225 j = inb (0x71); 4226 j <<= 8; 4227 outb (0x70, 0x15); 4228 j |= inb(0x71); 4229 Info.conventionalMemSize = j; 4230 4231 /* Get The Extended Memory Found At Power On From CMOS */ 4232 outb (0x70, 0x31); 4233 j = inb (0x71); 4234 j <<= 8; 4235 outb (0x70, 0x30); 4236 j |= inb(0x71); 4237 Info.extendedMemSize = j; 4238 Info.flags |= SI_MemorySizeValid; 4239 4240# if (defined(THIS_IS_BROKEN)) 4241 /* If There Is 1 or 2 Drives Found, Set Up Drive Parameters */ 4242 if (Info.numDrives > 0) { 4243 /* 4244 * Get The Pointer From Int 41 For The First 4245 * Drive Parameters 4246 */ 4247 j = ((unsigned)(*((unsigned short *)ptok(0x104+2))) << 4) 4248 + (unsigned)(*((unsigned short *)ptok(0x104+0))); 4249 /* 4250 * It appears that SmartROM's Int41/Int46 pointers 4251 * use memory that gets stepped on by the kernel 4252 * loading. We no longer have access to this 4253 * geometry information but try anyways (!?) 4254 */ 4255 Info.drives[0].cylinders = *((unsigned char *)ptok(j)); 4256 ++j; 4257 Info.drives[0].cylinders += ((int)*((unsigned char *) 4258 ptok(j))) << 8; 4259 ++j; 4260 Info.drives[0].heads = *((unsigned char *)ptok(j)); 4261 j += 12; 4262 Info.drives[0].sectors = *((unsigned char *)ptok(j)); 4263 Info.flags |= SI_DriveParamsValid; 4264 if ((Info.drives[0].cylinders == 0) 4265 || (Info.drives[0].heads == 0) 4266 || (Info.drives[0].sectors == 0)) { 4267 Info.flags &= ~SI_DriveParamsValid; 4268 } 4269 if (Info.numDrives > 1) { 4270 /* 4271 * Get The Pointer From Int 46 For The 4272 * Second Drive Parameters 4273 */ 4274 j = ((unsigned)(*((unsigned short *)ptok(0x118+2))) << 4) 4275 + (unsigned)(*((unsigned short *)ptok(0x118+0))); 4276 Info.drives[1].cylinders = *((unsigned char *) 4277 ptok(j)); 4278 ++j; 4279 Info.drives[1].cylinders += ((int) 4280 *((unsigned char *)ptok(j))) << 8; 4281 ++j; 4282 Info.drives[1].heads = *((unsigned char *) 4283 ptok(j)); 4284 j += 12; 4285 Info.drives[1].sectors = *((unsigned char *) 4286 ptok(j)); 4287 if ((Info.drives[1].cylinders == 0) 4288 || (Info.drives[1].heads == 0) 4289 || (Info.drives[1].sectors == 0)) { 4290 Info.flags &= ~SI_DriveParamsValid; 4291 } 4292 } 4293 } 4294# endif 4295 /* Copy Out The Info Structure To The User */ 4296 if (cmd & 0xFFFF0000) { 4297 bcopy (&Info, data, sizeof(Info)); 4298 } else { 4299 error = copyout (&Info, *(caddr_t *)data, sizeof(Info)); 4300 } 4301 return (error); } 4302 4303 /* Get The BlinkLED State */ 4304 case DPT_BLINKLED: 4305 i = ASR_getBlinkLedCode (sc); 4306 if (i == -1) { 4307 i = 0; 4308 } 4309 if (cmd & 0xFFFF0000) { 4310 bcopy ((caddr_t)(&i), data, sizeof(i)); 4311 } else { 4312 error = copyout (&i, *(caddr_t *)data, sizeof(i)); 4313 } 4314 break; 4315 4316 /* Get performance metrics */ 4317#ifdef ASR_MEASURE_PERFORMANCE 4318 case DPT_PERF_INFO: 4319 bcopy((caddr_t) &sc->ha_performance, data, 4320 sizeof(sc->ha_performance)); 4321 return (0); 4322#endif 4323 4324 /* Send an I2O command */ 4325 case I2OUSRCMD: 4326 return (ASR_queue_i (sc, *((PI2O_MESSAGE_FRAME *)data))); 4327 4328 /* Reset and re-initialize the adapter */ 4329 case I2ORESETCMD: 4330 ASR_reset (sc); 4331 return (0); 4332 4333 /* Rescan the LCT table and resynchronize the information */ 4334 case I2ORESCANCMD: 4335 return (ASR_rescan (sc)); 4336 } 4337 return (EINVAL); 4338} /* asr_ioctl */ 4339 4340#ifdef ASR_MEASURE_PERFORMANCE 4341/* 4342 * This function subtracts one timeval structure from another, 4343 * Returning the result in usec. 4344 * It assumes that less than 4 billion usecs passed form start to end. 4345 * If times are sensless, 0xffffffff is returned. 4346 */ 4347 4348STATIC u_int32_t 4349asr_time_delta( 4350 IN struct timeval start, 4351 IN struct timeval end) 4352{ 4353 OUT u_int32_t result; 4354 4355 if (start.tv_sec > end.tv_sec) { 4356 result = 0xffffffff; 4357 } 4358 else { 4359 if (start.tv_sec == end.tv_sec) { 4360 if (start.tv_usec > end.tv_usec) { 4361 result = 0xffffffff; 4362 } else { 4363 return (end.tv_usec - start.tv_usec); 4364 } 4365 } else { 4366 return (end.tv_sec - start.tv_sec) * 1000000 + 4367 end.tv_usec + (1000000 - start.tv_usec); 4368 } 4369 } 4370 return(result); 4371} /* asr_time_delta */ 4372#endif 4373