1/************************************************************************ 2 * Linux driver for * 3 * ICP vortex GmbH: GDT ISA/EISA/PCI Disk Array Controllers * 4 * Intel Corporation: Storage RAID Controllers * 5 * * 6 * gdth.c * 7 * Copyright (C) 1995-06 ICP vortex GmbH, Achim Leubner * 8 * Copyright (C) 2002-04 Intel Corporation * 9 * Copyright (C) 2003-06 Adaptec Inc. * 10 * <achim_leubner@adaptec.com> * 11 * * 12 * Additions/Fixes: * 13 * Boji Tony Kannanthanam <boji.t.kannanthanam@intel.com> * 14 * Johannes Dinner <johannes_dinner@adaptec.com> * 15 * * 16 * This program is free software; you can redistribute it and/or modify * 17 * it under the terms of the GNU General Public License as published * 18 * by the Free Software Foundation; either version 2 of the License, * 19 * or (at your option) any later version. * 20 * * 21 * This program is distributed in the hope that it will be useful, * 22 * but WITHOUT ANY WARRANTY; without even the implied warranty of * 23 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * 24 * GNU General Public License for more details. * 25 * * 26 * You should have received a copy of the GNU General Public License * 27 * along with this kernel; if not, write to the Free Software * 28 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * 29 * * 30 * Linux kernel 2.4.x, 2.6.x supported * 31 * * 32 * $Log: gdth.c,v $ 33 * Revision 1.1.1.1 2007/08/03 18:52:56 rnuti 34 * Importing Linux MIPS Kernel 2.6.22 35 * 36 * Revision 1.74 2006/04/10 13:44:47 achim 37 * Community changes for 2.6.x 38 * Kernel 2.2.x no longer supported 39 * scsi_request interface removed, thanks to Christoph Hellwig 40 * 41 * Revision 1.73 2004/03/31 13:33:03 achim 42 * Special command 0xfd implemented to detect 64-bit DMA support 43 * 44 * Revision 1.72 2004/03/17 08:56:04 achim 45 * 64-bit DMA only enabled if FW >= x.43 46 * 47 * Revision 1.71 2004/03/05 15:51:29 achim 48 * Screen service: separate message buffer, bugfixes 49 * 50 * Revision 1.70 2004/02/27 12:19:07 achim 51 * Bugfix: Reset bit in config (0xfe) call removed 52 * 53 * Revision 1.69 2004/02/20 09:50:24 achim 54 * Compatibility changes for kernels < 2.4.20 55 * Bugfix screen service command size 56 * pci_set_dma_mask() error handling added 57 * 58 * Revision 1.68 2004/02/19 15:46:54 achim 59 * 64-bit DMA bugfixes 60 * Drive size bugfix for drives > 1TB 61 * 62 * Revision 1.67 2004/01/14 13:11:57 achim 63 * Tool access over /proc no longer supported 64 * Bugfixes IOCTLs 65 * 66 * Revision 1.66 2003/12/19 15:04:06 achim 67 * Bugfixes support for drives > 2TB 68 * 69 * Revision 1.65 2003/12/15 11:21:56 achim 70 * 64-bit DMA support added 71 * Support for drives > 2 TB implemented 72 * Kernels 2.2.x, 2.4.x, 2.6.x supported 73 * 74 * Revision 1.64 2003/09/17 08:30:26 achim 75 * EISA/ISA controller scan disabled 76 * Command line switch probe_eisa_isa added 77 * 78 * Revision 1.63 2003/07/12 14:01:00 Daniele Bellucci <bellucda@tiscali.it> 79 * Minor cleanups in gdth_ioctl. 80 * 81 * Revision 1.62 2003/02/27 15:01:59 achim 82 * Dynamic DMA mapping implemented 83 * New (character device) IOCTL interface added 84 * Other controller related changes made 85 * 86 * Revision 1.61 2002/11/08 13:09:52 boji 87 * Added support for XSCALE based RAID Controllers 88 * Fixed SCREENSERVICE initialization in SMP cases 89 * Added checks for gdth_polling before GDTH_HA_LOCK 90 * 91 * Revision 1.60 2002/02/05 09:35:22 achim 92 * MODULE_LICENSE only if kernel >= 2.4.11 93 * 94 * Revision 1.59 2002/01/30 09:46:33 achim 95 * Small changes 96 * 97 * Revision 1.58 2002/01/29 15:30:02 achim 98 * Set default value of shared_access to Y 99 * New status S_CACHE_RESERV for clustering added 100 * 101 * Revision 1.57 2001/08/21 11:16:35 achim 102 * Bugfix free_irq() 103 * 104 * Revision 1.56 2001/08/09 11:19:39 achim 105 * Scsi_Host_Template changes 106 * 107 * Revision 1.55 2001/08/09 10:11:28 achim 108 * Command HOST_UNFREEZE_IO before cache service init. 109 * 110 * Revision 1.54 2001/07/20 13:48:12 achim 111 * Expand: gdth_analyse_hdrive() removed 112 * 113 * Revision 1.53 2001/07/17 09:52:49 achim 114 * Small OEM related change 115 * 116 * Revision 1.52 2001/06/19 15:06:20 achim 117 * New host command GDT_UNFREEZE_IO added 118 * 119 * Revision 1.51 2001/05/22 06:42:37 achim 120 * PCI: Subdevice ID added 121 * 122 * Revision 1.50 2001/05/17 13:42:16 achim 123 * Support for Intel Storage RAID Controllers added 124 * 125 * Revision 1.50 2001/05/17 12:12:34 achim 126 * Support for Intel Storage RAID Controllers added 127 * 128 * Revision 1.49 2001/03/15 15:07:17 achim 129 * New __setup interface for boot command line options added 130 * 131 * Revision 1.48 2001/02/06 12:36:28 achim 132 * Bugfix Cluster protocol 133 * 134 * Revision 1.47 2001/01/10 14:42:06 achim 135 * New switch shared_access added 136 * 137 * Revision 1.46 2001/01/09 08:11:35 achim 138 * gdth_command() removed 139 * meaning of Scsi_Pointer members changed 140 * 141 * Revision 1.45 2000/11/16 12:02:24 achim 142 * Changes for kernel 2.4 143 * 144 * Revision 1.44 2000/10/11 08:44:10 achim 145 * Clustering changes: New flag media_changed added 146 * 147 * Revision 1.43 2000/09/20 12:59:01 achim 148 * DPMEM remap functions for all PCI controller types implemented 149 * Small changes for ia64 platform 150 * 151 * Revision 1.42 2000/07/20 09:04:50 achim 152 * Small changes for kernel 2.4 153 * 154 * Revision 1.41 2000/07/04 14:11:11 achim 155 * gdth_analyse_hdrive() added to rescan drives after online expansion 156 * 157 * Revision 1.40 2000/06/27 11:24:16 achim 158 * Changes Clustering, Screenservice 159 * 160 * Revision 1.39 2000/06/15 13:09:04 achim 161 * Changes for gdth_do_cmd() 162 * 163 * Revision 1.38 2000/06/15 12:08:43 achim 164 * Bugfix gdth_sync_event(), service SCREENSERVICE 165 * Data direction for command 0xc2 changed to DOU 166 * 167 * Revision 1.37 2000/05/25 13:50:10 achim 168 * New driver parameter virt_ctr added 169 * 170 * Revision 1.36 2000/05/04 08:50:46 achim 171 * Event buffer now in gdth_ha_str 172 * 173 * Revision 1.35 2000/03/03 10:44:08 achim 174 * New event_string only valid for the RP controller family 175 * 176 * Revision 1.34 2000/03/02 14:55:29 achim 177 * New mechanism for async. event handling implemented 178 * 179 * Revision 1.33 2000/02/21 15:37:37 achim 180 * Bugfix Alpha platform + DPMEM above 4GB 181 * 182 * Revision 1.32 2000/02/14 16:17:37 achim 183 * Bugfix sense_buffer[] + raw devices 184 * 185 * Revision 1.31 2000/02/10 10:29:00 achim 186 * Delete sense_buffer[0], if command OK 187 * 188 * Revision 1.30 1999/11/02 13:42:39 achim 189 * ARRAY_DRV_LIST2 implemented 190 * Now 255 log. and 100 host drives supported 191 * 192 * Revision 1.29 1999/10/05 13:28:47 achim 193 * GDT_CLUST_RESET added 194 * 195 * Revision 1.28 1999/08/12 13:44:54 achim 196 * MOUNTALL removed 197 * Cluster drives -> removeable drives 198 * 199 * Revision 1.27 1999/06/22 07:22:38 achim 200 * Small changes 201 * 202 * Revision 1.26 1999/06/10 16:09:12 achim 203 * Cluster Host Drive support: Bugfixes 204 * 205 * Revision 1.25 1999/06/01 16:03:56 achim 206 * gdth_init_pci(): Manipulate config. space to start RP controller 207 * 208 * Revision 1.24 1999/05/26 11:53:06 achim 209 * Cluster Host Drive support added 210 * 211 * Revision 1.23 1999/03/26 09:12:31 achim 212 * Default value for hdr_channel set to 0 213 * 214 * Revision 1.22 1999/03/22 16:27:16 achim 215 * Bugfix: gdth_store_event() must not be locked with GDTH_LOCK_HA() 216 * 217 * Revision 1.21 1999/03/16 13:40:34 achim 218 * Problems with reserved drives solved 219 * gdth_eh_bus_reset() implemented 220 * 221 * Revision 1.20 1999/03/10 09:08:13 achim 222 * Bugfix: Corrections in gdth_direction_tab[] made 223 * Bugfix: Increase command timeout (gdth_update_timeout()) NOT in gdth_putq() 224 * 225 * Revision 1.19 1999/03/05 14:38:16 achim 226 * Bugfix: Heads/Sectors mapping for reserved devices possibly wrong 227 * -> gdth_eval_mapping() implemented, changes in gdth_bios_param() 228 * INIT_RETRIES set to 100s to avoid DEINIT-Timeout for controllers 229 * with BIOS disabled and memory test set to Intensive 230 * Enhanced /proc support 231 * 232 * Revision 1.18 1999/02/24 09:54:33 achim 233 * Command line parameter hdr_channel implemented 234 * Bugfix for EISA controllers + Linux 2.2.x 235 * 236 * Revision 1.17 1998/12/17 15:58:11 achim 237 * Command line parameters implemented 238 * Changes for Alpha platforms 239 * PCI controller scan changed 240 * SMP support improved (spin_lock_irqsave(),...) 241 * New async. events, new scan/reserve commands included 242 * 243 * Revision 1.16 1998/09/28 16:08:46 achim 244 * GDT_PCIMPR: DPMEM remapping, if required 245 * mdelay() added 246 * 247 * Revision 1.15 1998/06/03 14:54:06 achim 248 * gdth_delay(), gdth_flush() implemented 249 * Bugfix: gdth_release() changed 250 * 251 * Revision 1.14 1998/05/22 10:01:17 achim 252 * mj: pcibios_strerror() removed 253 * Improved SMP support (if version >= 2.1.95) 254 * gdth_halt(): halt_called flag added (if version < 2.1) 255 * 256 * Revision 1.13 1998/04/16 09:14:57 achim 257 * Reserve drives (for raw service) implemented 258 * New error handling code enabled 259 * Get controller name from board_info() IOCTL 260 * Final round of PCI device driver patches by Martin Mares 261 * 262 * Revision 1.12 1998/03/03 09:32:37 achim 263 * Fibre channel controller support added 264 * 265 * Revision 1.11 1998/01/27 16:19:14 achim 266 * SA_SHIRQ added 267 * add_timer()/del_timer() instead of GDTH_TIMER 268 * scsi_add_timer()/scsi_del_timer() instead of SCSI_TIMER 269 * New error handling included 270 * 271 * Revision 1.10 1997/10/31 12:29:57 achim 272 * Read heads/sectors from host drive 273 * 274 * Revision 1.9 1997/09/04 10:07:25 achim 275 * IO-mapping with virt_to_bus(), gdth_readb(), gdth_writeb(), ... 276 * register_reboot_notifier() to get a notify on shutown used 277 * 278 * Revision 1.8 1997/04/02 12:14:30 achim 279 * Version 1.00 (see gdth.h), tested with kernel 2.0.29 280 * 281 * Revision 1.7 1997/03/12 13:33:37 achim 282 * gdth_reset() changed, new async. events 283 * 284 * Revision 1.6 1997/03/04 14:01:11 achim 285 * Shutdown routine gdth_halt() implemented 286 * 287 * Revision 1.5 1997/02/21 09:08:36 achim 288 * New controller included (RP, RP1, RP2 series) 289 * IOCTL interface implemented 290 * 291 * Revision 1.4 1996/07/05 12:48:55 achim 292 * Function gdth_bios_param() implemented 293 * New constant GDTH_MAXC_P_L inserted 294 * GDT_WRITE_THR, GDT_EXT_INFO implemented 295 * Function gdth_reset() changed 296 * 297 * Revision 1.3 1996/05/10 09:04:41 achim 298 * Small changes for Linux 1.2.13 299 * 300 * Revision 1.2 1996/05/09 12:45:27 achim 301 * Loadable module support implemented 302 * /proc support corrections made 303 * 304 * Revision 1.1 1996/04/11 07:35:57 achim 305 * Initial revision 306 * 307 ************************************************************************/ 308 309/* All GDT Disk Array Controllers are fully supported by this driver. 310 * This includes the PCI/EISA/ISA SCSI Disk Array Controllers and the 311 * PCI Fibre Channel Disk Array Controllers. See gdth.h for a complete 312 * list of all controller types. 313 * 314 * If you have one or more GDT3000/3020 EISA controllers with 315 * controller BIOS disabled, you have to set the IRQ values with the 316 * command line option "gdth=irq1,irq2,...", where the irq1,irq2,... are 317 * the IRQ values for the EISA controllers. 318 * 319 * After the optional list of IRQ values, other possible 320 * command line options are: 321 * disable:Y disable driver 322 * disable:N enable driver 323 * reserve_mode:0 reserve no drives for the raw service 324 * reserve_mode:1 reserve all not init., removable drives 325 * reserve_mode:2 reserve all not init. drives 326 * reserve_list:h,b,t,l,h,b,t,l,... reserve particular drive(s) with 327 * h- controller no., b- channel no., 328 * t- target ID, l- LUN 329 * reverse_scan:Y reverse scan order for PCI controllers 330 * reverse_scan:N scan PCI controllers like BIOS 331 * max_ids:x x - target ID count per channel (1..MAXID) 332 * rescan:Y rescan all channels/IDs 333 * rescan:N use all devices found until now 334 * virt_ctr:Y map every channel to a virtual controller 335 * virt_ctr:N use multi channel support 336 * hdr_channel:x x - number of virtual bus for host drives 337 * shared_access:Y disable driver reserve/release protocol to 338 * access a shared resource from several nodes, 339 * appropriate controller firmware required 340 * shared_access:N enable driver reserve/release protocol 341 * probe_eisa_isa:Y scan for EISA/ISA controllers 342 * probe_eisa_isa:N do not scan for EISA/ISA controllers 343 * force_dma32:Y use only 32 bit DMA mode 344 * force_dma32:N use 64 bit DMA mode, if supported 345 * 346 * The default values are: "gdth=disable:N,reserve_mode:1,reverse_scan:N, 347 * max_ids:127,rescan:N,virt_ctr:N,hdr_channel:0, 348 * shared_access:Y,probe_eisa_isa:N,force_dma32:N". 349 * Here is another example: "gdth=reserve_list:0,1,2,0,0,1,3,0,rescan:Y". 350 * 351 * When loading the gdth driver as a module, the same options are available. 352 * You can set the IRQs with "IRQ=...". However, the syntax to specify the 353 * options changes slightly. You must replace all ',' between options 354 * with ' ' and all ':' with '=' and you must use 355 * '1' in place of 'Y' and '0' in place of 'N'. 356 * 357 * Default: "modprobe gdth disable=0 reserve_mode=1 reverse_scan=0 358 * max_ids=127 rescan=0 virt_ctr=0 hdr_channel=0 shared_access=0 359 * probe_eisa_isa=0 force_dma32=0" 360 * The other example: "modprobe gdth reserve_list=0,1,2,0,0,1,3,0 rescan=1". 361 */ 362 363/* The meaning of the Scsi_Pointer members in this driver is as follows: 364 * ptr: Chaining 365 * this_residual: Command priority 366 * buffer: phys. DMA sense buffer 367 * dma_handle: phys. DMA buffer (kernel >= 2.4.0) 368 * buffers_residual: Timeout value 369 * Status: Command status (gdth_do_cmd()), DMA mem. mappings 370 * Message: Additional info (gdth_do_cmd()), DMA direction 371 * have_data_in: Flag for gdth_wait_completion() 372 * sent_command: Opcode special command 373 * phase: Service/parameter/return code special command 374 */ 375 376 377/* interrupt coalescing */ 378/* #define INT_COAL */ 379 380/* statistics */ 381#define GDTH_STATISTICS 382 383#include <linux/module.h> 384 385#include <linux/version.h> 386#include <linux/kernel.h> 387#include <linux/types.h> 388#include <linux/pci.h> 389#include <linux/string.h> 390#include <linux/ctype.h> 391#include <linux/ioport.h> 392#include <linux/delay.h> 393#include <linux/interrupt.h> 394#include <linux/in.h> 395#include <linux/proc_fs.h> 396#include <linux/time.h> 397#include <linux/timer.h> 398#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,6) 399#include <linux/dma-mapping.h> 400#else 401#define DMA_32BIT_MASK 0x00000000ffffffffULL 402#define DMA_64BIT_MASK 0xffffffffffffffffULL 403#endif 404 405#ifdef GDTH_RTC 406#include <linux/mc146818rtc.h> 407#endif 408#include <linux/reboot.h> 409 410#include <asm/dma.h> 411#include <asm/system.h> 412#include <asm/io.h> 413#include <asm/uaccess.h> 414#include <linux/spinlock.h> 415#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) 416#include <linux/blkdev.h> 417#else 418#include <linux/blk.h> 419#include "sd.h" 420#endif 421 422#include "scsi.h" 423#include <scsi/scsi_host.h> 424#include "gdth_kcompat.h" 425#include "gdth.h" 426 427static void gdth_delay(int milliseconds); 428static void gdth_eval_mapping(ulong32 size, ulong32 *cyls, int *heads, int *secs); 429static irqreturn_t gdth_interrupt(int irq, void *dev_id); 430static int gdth_sync_event(int hanum,int service,unchar index,Scsi_Cmnd *scp); 431static int gdth_async_event(int hanum); 432static void gdth_log_event(gdth_evt_data *dvr, char *buffer); 433 434static void gdth_putq(int hanum,Scsi_Cmnd *scp,unchar priority); 435static void gdth_next(int hanum); 436static int gdth_fill_raw_cmd(int hanum,Scsi_Cmnd *scp,unchar b); 437static int gdth_special_cmd(int hanum,Scsi_Cmnd *scp); 438static gdth_evt_str *gdth_store_event(gdth_ha_str *ha, ushort source, 439 ushort idx, gdth_evt_data *evt); 440static int gdth_read_event(gdth_ha_str *ha, int handle, gdth_evt_str *estr); 441static void gdth_readapp_event(gdth_ha_str *ha, unchar application, 442 gdth_evt_str *estr); 443static void gdth_clear_events(void); 444 445static void gdth_copy_internal_data(int hanum,Scsi_Cmnd *scp, 446 char *buffer,ushort count); 447static int gdth_internal_cache_cmd(int hanum,Scsi_Cmnd *scp); 448static int gdth_fill_cache_cmd(int hanum,Scsi_Cmnd *scp,ushort hdrive); 449 450static int gdth_search_eisa(ushort eisa_adr); 451static int gdth_search_isa(ulong32 bios_adr); 452static int gdth_search_pci(gdth_pci_str *pcistr); 453static void gdth_search_dev(gdth_pci_str *pcistr, ushort *cnt, 454 ushort vendor, ushort dev); 455static void gdth_sort_pci(gdth_pci_str *pcistr, int cnt); 456static int gdth_init_eisa(ushort eisa_adr,gdth_ha_str *ha); 457static int gdth_init_isa(ulong32 bios_adr,gdth_ha_str *ha); 458static int gdth_init_pci(gdth_pci_str *pcistr,gdth_ha_str *ha); 459 460static void gdth_enable_int(int hanum); 461static int gdth_get_status(unchar *pIStatus,int irq); 462static int gdth_test_busy(int hanum); 463static int gdth_get_cmd_index(int hanum); 464static void gdth_release_event(int hanum); 465static int gdth_wait(int hanum,int index,ulong32 time); 466static int gdth_internal_cmd(int hanum,unchar service,ushort opcode,ulong32 p1, 467 ulong64 p2,ulong64 p3); 468static int gdth_search_drives(int hanum); 469static int gdth_analyse_hdrive(int hanum, ushort hdrive); 470 471static const char *gdth_ctr_name(int hanum); 472 473static int gdth_open(struct inode *inode, struct file *filep); 474static int gdth_close(struct inode *inode, struct file *filep); 475static int gdth_ioctl(struct inode *inode, struct file *filep, 476 unsigned int cmd, unsigned long arg); 477 478static void gdth_flush(int hanum); 479static int gdth_halt(struct notifier_block *nb, ulong event, void *buf); 480static int gdth_queuecommand(Scsi_Cmnd *scp,void (*done)(Scsi_Cmnd *)); 481static void gdth_scsi_done(struct scsi_cmnd *scp); 482 483#ifdef DEBUG_GDTH 484static unchar DebugState = DEBUG_GDTH; 485 486#ifdef __SERIAL__ 487#define MAX_SERBUF 160 488static void ser_init(void); 489static void ser_puts(char *str); 490static void ser_putc(char c); 491static int ser_printk(const char *fmt, ...); 492static char strbuf[MAX_SERBUF+1]; 493#ifdef __COM2__ 494#define COM_BASE 0x2f8 495#else 496#define COM_BASE 0x3f8 497#endif 498static void ser_init() 499{ 500 unsigned port=COM_BASE; 501 502 outb(0x80,port+3); 503 outb(0,port+1); 504 /* 19200 Baud, if 9600: outb(12,port) */ 505 outb(6, port); 506 outb(3,port+3); 507 outb(0,port+1); 508 /* 509 ser_putc('I'); 510 ser_putc(' '); 511 */ 512} 513 514static void ser_puts(char *str) 515{ 516 char *ptr; 517 518 ser_init(); 519 for (ptr=str;*ptr;++ptr) 520 ser_putc(*ptr); 521} 522 523static void ser_putc(char c) 524{ 525 unsigned port=COM_BASE; 526 527 while ((inb(port+5) & 0x20)==0); 528 outb(c,port); 529 if (c==0x0a) 530 { 531 while ((inb(port+5) & 0x20)==0); 532 outb(0x0d,port); 533 } 534} 535 536static int ser_printk(const char *fmt, ...) 537{ 538 va_list args; 539 int i; 540 541 va_start(args,fmt); 542 i = vsprintf(strbuf,fmt,args); 543 ser_puts(strbuf); 544 va_end(args); 545 return i; 546} 547 548#define TRACE(a) {if (DebugState==1) {ser_printk a;}} 549#define TRACE2(a) {if (DebugState==1 || DebugState==2) {ser_printk a;}} 550#define TRACE3(a) {if (DebugState!=0) {ser_printk a;}} 551 552#else /* !__SERIAL__ */ 553#define TRACE(a) {if (DebugState==1) {printk a;}} 554#define TRACE2(a) {if (DebugState==1 || DebugState==2) {printk a;}} 555#define TRACE3(a) {if (DebugState!=0) {printk a;}} 556#endif 557 558#else /* !DEBUG */ 559#define TRACE(a) 560#define TRACE2(a) 561#define TRACE3(a) 562#endif 563 564#ifdef GDTH_STATISTICS 565static ulong32 max_rq=0, max_index=0, max_sg=0; 566#ifdef INT_COAL 567static ulong32 max_int_coal=0; 568#endif 569static ulong32 act_ints=0, act_ios=0, act_stats=0, act_rq=0; 570static struct timer_list gdth_timer; 571#endif 572 573#define PTR2USHORT(a) (ushort)(ulong)(a) 574#define GDTOFFSOF(a,b) (size_t)&(((a*)0)->b) 575#define INDEX_OK(i,t) ((i)<ARRAY_SIZE(t)) 576 577#define NUMDATA(a) ( (gdth_num_str *)((a)->hostdata)) 578#define HADATA(a) (&((gdth_ext_str *)((a)->hostdata))->haext) 579#define CMDDATA(a) (&((gdth_ext_str *)((a)->hostdata))->cmdext) 580 581#define BUS_L2P(a,b) ((b)>(a)->virt_bus ? (b-1):(b)) 582 583#define gdth_readb(addr) readb(addr) 584#define gdth_readw(addr) readw(addr) 585#define gdth_readl(addr) readl(addr) 586#define gdth_writeb(b,addr) writeb((b),(addr)) 587#define gdth_writew(b,addr) writew((b),(addr)) 588#define gdth_writel(b,addr) writel((b),(addr)) 589 590static unchar gdth_drq_tab[4] = {5,6,7,7}; /* DRQ table */ 591static unchar gdth_irq_tab[6] = {0,10,11,12,14,0}; /* IRQ table */ 592static unchar gdth_polling; /* polling if TRUE */ 593static unchar gdth_from_wait = FALSE; /* gdth_wait() */ 594static int wait_index,wait_hanum; /* gdth_wait() */ 595static int gdth_ctr_count = 0; /* controller count */ 596static int gdth_ctr_vcount = 0; /* virt. ctr. count */ 597static int gdth_ctr_released = 0; /* gdth_release() */ 598static struct Scsi_Host *gdth_ctr_tab[MAXHA]; /* controller table */ 599static struct Scsi_Host *gdth_ctr_vtab[MAXHA*MAXBUS]; /* virt. ctr. table */ 600static unchar gdth_write_through = FALSE; /* write through */ 601static gdth_evt_str ebuffer[MAX_EVENTS]; /* event buffer */ 602static int elastidx; 603static int eoldidx; 604static int major; 605 606#define DIN 1 /* IN data direction */ 607#define DOU 2 /* OUT data direction */ 608#define DNO DIN /* no data transfer */ 609#define DUN DIN /* unknown data direction */ 610static unchar gdth_direction_tab[0x100] = { 611 DNO,DNO,DIN,DIN,DOU,DIN,DIN,DOU,DIN,DUN,DOU,DOU,DUN,DUN,DUN,DIN, 612 DNO,DIN,DIN,DOU,DIN,DOU,DNO,DNO,DOU,DNO,DIN,DNO,DIN,DOU,DNO,DUN, 613 DIN,DUN,DIN,DUN,DOU,DIN,DUN,DUN,DIN,DIN,DOU,DNO,DUN,DIN,DOU,DOU, 614 DOU,DOU,DOU,DNO,DIN,DNO,DNO,DIN,DOU,DOU,DOU,DOU,DIN,DOU,DIN,DOU, 615 DOU,DOU,DIN,DIN,DIN,DNO,DUN,DNO,DNO,DNO,DUN,DNO,DOU,DIN,DUN,DUN, 616 DUN,DUN,DUN,DUN,DUN,DOU,DUN,DUN,DUN,DUN,DIN,DUN,DUN,DUN,DUN,DUN, 617 DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN, 618 DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN, 619 DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DIN,DUN,DOU,DUN,DUN,DUN,DUN,DUN, 620 DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DIN,DUN, 621 DUN,DUN,DUN,DUN,DUN,DNO,DNO,DUN,DIN,DNO,DOU,DUN,DNO,DUN,DOU,DOU, 622 DOU,DOU,DOU,DNO,DUN,DIN,DOU,DIN,DIN,DUN,DUN,DUN,DUN,DUN,DUN,DUN, 623 DUN,DUN,DOU,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN, 624 DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN, 625 DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DOU,DUN,DUN,DUN,DUN,DUN, 626 DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN 627}; 628 629/* LILO and modprobe/insmod parameters */ 630/* IRQ list for GDT3000/3020 EISA controllers */ 631static int irq[MAXHA] __initdata = 632{0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, 633 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff}; 634/* disable driver flag */ 635static int disable __initdata = 0; 636/* reserve flag */ 637static int reserve_mode = 1; 638/* reserve list */ 639static int reserve_list[MAX_RES_ARGS] = 640{0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, 641 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, 642 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff}; 643/* scan order for PCI controllers */ 644static int reverse_scan = 0; 645/* virtual channel for the host drives */ 646static int hdr_channel = 0; 647/* max. IDs per channel */ 648static int max_ids = MAXID; 649/* rescan all IDs */ 650static int rescan = 0; 651/* map channels to virtual controllers */ 652static int virt_ctr = 0; 653/* shared access */ 654static int shared_access = 1; 655/* enable support for EISA and ISA controllers */ 656static int probe_eisa_isa = 0; 657/* 64 bit DMA mode, support for drives > 2 TB, if force_dma32 = 0 */ 658static int force_dma32 = 0; 659 660/* parameters for modprobe/insmod */ 661#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,11) 662module_param_array(irq, int, NULL, 0); 663module_param(disable, int, 0); 664module_param(reserve_mode, int, 0); 665module_param_array(reserve_list, int, NULL, 0); 666module_param(reverse_scan, int, 0); 667module_param(hdr_channel, int, 0); 668module_param(max_ids, int, 0); 669module_param(rescan, int, 0); 670module_param(virt_ctr, int, 0); 671module_param(shared_access, int, 0); 672module_param(probe_eisa_isa, int, 0); 673module_param(force_dma32, int, 0); 674#else 675MODULE_PARM(irq, "i"); 676MODULE_PARM(disable, "i"); 677MODULE_PARM(reserve_mode, "i"); 678MODULE_PARM(reserve_list, "4-" __MODULE_STRING(MAX_RES_ARGS) "i"); 679MODULE_PARM(reverse_scan, "i"); 680MODULE_PARM(hdr_channel, "i"); 681MODULE_PARM(max_ids, "i"); 682MODULE_PARM(rescan, "i"); 683MODULE_PARM(virt_ctr, "i"); 684MODULE_PARM(shared_access, "i"); 685MODULE_PARM(probe_eisa_isa, "i"); 686MODULE_PARM(force_dma32, "i"); 687#endif 688MODULE_AUTHOR("Achim Leubner"); 689MODULE_LICENSE("GPL"); 690 691/* ioctl interface */ 692static const struct file_operations gdth_fops = { 693 .ioctl = gdth_ioctl, 694 .open = gdth_open, 695 .release = gdth_close, 696}; 697 698#include "gdth_proc.h" 699#include "gdth_proc.c" 700 701/* notifier block to get a notify on system shutdown/halt/reboot */ 702static struct notifier_block gdth_notifier = { 703 gdth_halt, NULL, 0 704}; 705static int notifier_disabled = 0; 706 707static void gdth_delay(int milliseconds) 708{ 709 if (milliseconds == 0) { 710 udelay(1); 711 } else { 712 mdelay(milliseconds); 713 } 714} 715 716#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) 717static void gdth_scsi_done(struct scsi_cmnd *scp) 718{ 719 TRACE2(("gdth_scsi_done()\n")); 720 721 if (scp->request) 722 complete((struct completion *)scp->request); 723} 724 725int __gdth_execute(struct scsi_device *sdev, gdth_cmd_str *gdtcmd, char *cmnd, 726 int timeout, u32 *info) 727{ 728 Scsi_Cmnd *scp; 729 DECLARE_COMPLETION_ONSTACK(wait); 730 int rval; 731 732 scp = kmalloc(sizeof(*scp), GFP_KERNEL); 733 if (!scp) 734 return -ENOMEM; 735 memset(scp, 0, sizeof(*scp)); 736 scp->device = sdev; 737 /* use request field to save the ptr. to completion struct. */ 738 scp->request = (struct request *)&wait; 739 scp->timeout_per_command = timeout*HZ; 740 scp->request_buffer = gdtcmd; 741 scp->cmd_len = 12; 742 memcpy(scp->cmnd, cmnd, 12); 743 scp->SCp.this_residual = IOCTL_PRI; /* priority */ 744 scp->done = gdth_scsi_done; /* some fn. test this */ 745 gdth_queuecommand(scp, gdth_scsi_done); 746 wait_for_completion(&wait); 747 748 rval = scp->SCp.Status; 749 if (info) 750 *info = scp->SCp.Message; 751 kfree(scp); 752 return rval; 753} 754#else 755static void gdth_scsi_done(Scsi_Cmnd *scp) 756{ 757 TRACE2(("gdth_scsi_done()\n")); 758 759 scp->request.rq_status = RQ_SCSI_DONE; 760 if (scp->request.waiting) 761 complete(scp->request.waiting); 762} 763 764int __gdth_execute(struct scsi_device *sdev, gdth_cmd_str *gdtcmd, char *cmnd, 765 int timeout, u32 *info) 766{ 767 Scsi_Cmnd *scp = scsi_allocate_device(sdev, 1, FALSE); 768 unsigned bufflen = gdtcmd ? sizeof(gdth_cmd_str) : 0; 769 DECLARE_COMPLETION_ONSTACK(wait); 770 int rval; 771 772 if (!scp) 773 return -ENOMEM; 774 scp->cmd_len = 12; 775 scp->use_sg = 0; 776 scp->SCp.this_residual = IOCTL_PRI; /* priority */ 777 scp->request.rq_status = RQ_SCSI_BUSY; 778 scp->request.waiting = &wait; 779 scsi_do_cmd(scp, cmnd, gdtcmd, bufflen, gdth_scsi_done, timeout*HZ, 1); 780 wait_for_completion(&wait); 781 782 rval = scp->SCp.Status; 783 if (info) 784 *info = scp->SCp.Message; 785 786 scsi_release_command(scp); 787 return rval; 788} 789#endif 790 791int gdth_execute(struct Scsi_Host *shost, gdth_cmd_str *gdtcmd, char *cmnd, 792 int timeout, u32 *info) 793{ 794 struct scsi_device *sdev = scsi_get_host_dev(shost); 795 int rval = __gdth_execute(sdev, gdtcmd, cmnd, timeout, info); 796 797 scsi_free_host_dev(sdev); 798 return rval; 799} 800 801static void gdth_eval_mapping(ulong32 size, ulong32 *cyls, int *heads, int *secs) 802{ 803 *cyls = size /HEADS/SECS; 804 if (*cyls <= MAXCYLS) { 805 *heads = HEADS; 806 *secs = SECS; 807 } else { /* too high for 64*32 */ 808 *cyls = size /MEDHEADS/MEDSECS; 809 if (*cyls <= MAXCYLS) { 810 *heads = MEDHEADS; 811 *secs = MEDSECS; 812 } else { /* too high for 127*63 */ 813 *cyls = size /BIGHEADS/BIGSECS; 814 *heads = BIGHEADS; 815 *secs = BIGSECS; 816 } 817 } 818} 819 820/* controller search and initialization functions */ 821 822static int __init gdth_search_eisa(ushort eisa_adr) 823{ 824 ulong32 id; 825 826 TRACE(("gdth_search_eisa() adr. %x\n",eisa_adr)); 827 id = inl(eisa_adr+ID0REG); 828 if (id == GDT3A_ID || id == GDT3B_ID) { /* GDT3000A or GDT3000B */ 829 if ((inb(eisa_adr+EISAREG) & 8) == 0) 830 return 0; /* not EISA configured */ 831 return 1; 832 } 833 if (id == GDT3_ID) /* GDT3000 */ 834 return 1; 835 836 return 0; 837} 838 839 840static int __init gdth_search_isa(ulong32 bios_adr) 841{ 842 void __iomem *addr; 843 ulong32 id; 844 845 TRACE(("gdth_search_isa() bios adr. %x\n",bios_adr)); 846 if ((addr = ioremap(bios_adr+BIOS_ID_OFFS, sizeof(ulong32))) != NULL) { 847 id = gdth_readl(addr); 848 iounmap(addr); 849 if (id == GDT2_ID) /* GDT2000 */ 850 return 1; 851 } 852 return 0; 853} 854 855 856static int __init gdth_search_pci(gdth_pci_str *pcistr) 857{ 858 ushort device, cnt; 859 860 TRACE(("gdth_search_pci()\n")); 861 862 cnt = 0; 863 for (device = 0; device <= PCI_DEVICE_ID_VORTEX_GDT6555; ++device) 864 gdth_search_dev(pcistr, &cnt, PCI_VENDOR_ID_VORTEX, device); 865 for (device = PCI_DEVICE_ID_VORTEX_GDT6x17RP; 866 device <= PCI_DEVICE_ID_VORTEX_GDTMAXRP; ++device) 867 gdth_search_dev(pcistr, &cnt, PCI_VENDOR_ID_VORTEX, device); 868 gdth_search_dev(pcistr, &cnt, PCI_VENDOR_ID_VORTEX, 869 PCI_DEVICE_ID_VORTEX_GDTNEWRX); 870 gdth_search_dev(pcistr, &cnt, PCI_VENDOR_ID_VORTEX, 871 PCI_DEVICE_ID_VORTEX_GDTNEWRX2); 872 gdth_search_dev(pcistr, &cnt, PCI_VENDOR_ID_INTEL, 873 PCI_DEVICE_ID_INTEL_SRC); 874 gdth_search_dev(pcistr, &cnt, PCI_VENDOR_ID_INTEL, 875 PCI_DEVICE_ID_INTEL_SRC_XSCALE); 876 return cnt; 877} 878 879/* Vortex only makes RAID controllers. 880 * We do not really want to specify all 550 ids here, so wildcard match. 881 */ 882static struct pci_device_id gdthtable[] __attribute_used__ = { 883 {PCI_VENDOR_ID_VORTEX,PCI_ANY_ID,PCI_ANY_ID, PCI_ANY_ID}, 884 {PCI_VENDOR_ID_INTEL,PCI_DEVICE_ID_INTEL_SRC,PCI_ANY_ID,PCI_ANY_ID}, 885 {PCI_VENDOR_ID_INTEL,PCI_DEVICE_ID_INTEL_SRC_XSCALE,PCI_ANY_ID,PCI_ANY_ID}, 886 {0} 887}; 888MODULE_DEVICE_TABLE(pci,gdthtable); 889 890static void __init gdth_search_dev(gdth_pci_str *pcistr, ushort *cnt, 891 ushort vendor, ushort device) 892{ 893 ulong base0, base1, base2; 894 struct pci_dev *pdev; 895 896 TRACE(("gdth_search_dev() cnt %d vendor %x device %x\n", 897 *cnt, vendor, device)); 898 899 pdev = NULL; 900 while ((pdev = pci_find_device(vendor, device, pdev)) 901 != NULL) { 902 if (pci_enable_device(pdev)) 903 continue; 904 if (*cnt >= MAXHA) 905 return; 906 /* GDT PCI controller found, resources are already in pdev */ 907 pcistr[*cnt].pdev = pdev; 908 pcistr[*cnt].vendor_id = vendor; 909 pcistr[*cnt].device_id = device; 910 pcistr[*cnt].subdevice_id = pdev->subsystem_device; 911 pcistr[*cnt].bus = pdev->bus->number; 912 pcistr[*cnt].device_fn = pdev->devfn; 913 pcistr[*cnt].irq = pdev->irq; 914 base0 = pci_resource_flags(pdev, 0); 915 base1 = pci_resource_flags(pdev, 1); 916 base2 = pci_resource_flags(pdev, 2); 917 if (device <= PCI_DEVICE_ID_VORTEX_GDT6000B || /* GDT6000/B */ 918 device >= PCI_DEVICE_ID_VORTEX_GDT6x17RP) { /* MPR */ 919 if (!(base0 & IORESOURCE_MEM)) 920 continue; 921 pcistr[*cnt].dpmem = pci_resource_start(pdev, 0); 922 } else { /* GDT6110, GDT6120, .. */ 923 if (!(base0 & IORESOURCE_MEM) || 924 !(base2 & IORESOURCE_MEM) || 925 !(base1 & IORESOURCE_IO)) 926 continue; 927 pcistr[*cnt].dpmem = pci_resource_start(pdev, 2); 928 pcistr[*cnt].io_mm = pci_resource_start(pdev, 0); 929 pcistr[*cnt].io = pci_resource_start(pdev, 1); 930 } 931 TRACE2(("Controller found at %d/%d, irq %d, dpmem 0x%lx\n", 932 pcistr[*cnt].bus, PCI_SLOT(pcistr[*cnt].device_fn), 933 pcistr[*cnt].irq, pcistr[*cnt].dpmem)); 934 (*cnt)++; 935 } 936} 937 938 939static void __init gdth_sort_pci(gdth_pci_str *pcistr, int cnt) 940{ 941 gdth_pci_str temp; 942 int i, changed; 943 944 TRACE(("gdth_sort_pci() cnt %d\n",cnt)); 945 if (cnt == 0) 946 return; 947 948 do { 949 changed = FALSE; 950 for (i = 0; i < cnt-1; ++i) { 951 if (!reverse_scan) { 952 if ((pcistr[i].bus > pcistr[i+1].bus) || 953 (pcistr[i].bus == pcistr[i+1].bus && 954 PCI_SLOT(pcistr[i].device_fn) > 955 PCI_SLOT(pcistr[i+1].device_fn))) { 956 temp = pcistr[i]; 957 pcistr[i] = pcistr[i+1]; 958 pcistr[i+1] = temp; 959 changed = TRUE; 960 } 961 } else { 962 if ((pcistr[i].bus < pcistr[i+1].bus) || 963 (pcistr[i].bus == pcistr[i+1].bus && 964 PCI_SLOT(pcistr[i].device_fn) < 965 PCI_SLOT(pcistr[i+1].device_fn))) { 966 temp = pcistr[i]; 967 pcistr[i] = pcistr[i+1]; 968 pcistr[i+1] = temp; 969 changed = TRUE; 970 } 971 } 972 } 973 } while (changed); 974} 975 976 977static int __init gdth_init_eisa(ushort eisa_adr,gdth_ha_str *ha) 978{ 979 ulong32 retries,id; 980 unchar prot_ver,eisacf,i,irq_found; 981 982 TRACE(("gdth_init_eisa() adr. %x\n",eisa_adr)); 983 984 /* disable board interrupts, deinitialize services */ 985 outb(0xff,eisa_adr+EDOORREG); 986 outb(0x00,eisa_adr+EDENABREG); 987 outb(0x00,eisa_adr+EINTENABREG); 988 989 outb(0xff,eisa_adr+LDOORREG); 990 retries = INIT_RETRIES; 991 gdth_delay(20); 992 while (inb(eisa_adr+EDOORREG) != 0xff) { 993 if (--retries == 0) { 994 printk("GDT-EISA: Initialization error (DEINIT failed)\n"); 995 return 0; 996 } 997 gdth_delay(1); 998 TRACE2(("wait for DEINIT: retries=%d\n",retries)); 999 } 1000 prot_ver = inb(eisa_adr+MAILBOXREG); 1001 outb(0xff,eisa_adr+EDOORREG); 1002 if (prot_ver != PROTOCOL_VERSION) { 1003 printk("GDT-EISA: Illegal protocol version\n"); 1004 return 0; 1005 } 1006 ha->bmic = eisa_adr; 1007 ha->brd_phys = (ulong32)eisa_adr >> 12; 1008 1009 outl(0,eisa_adr+MAILBOXREG); 1010 outl(0,eisa_adr+MAILBOXREG+4); 1011 outl(0,eisa_adr+MAILBOXREG+8); 1012 outl(0,eisa_adr+MAILBOXREG+12); 1013 1014 /* detect IRQ */ 1015 if ((id = inl(eisa_adr+ID0REG)) == GDT3_ID) { 1016 ha->oem_id = OEM_ID_ICP; 1017 ha->type = GDT_EISA; 1018 ha->stype = id; 1019 outl(1,eisa_adr+MAILBOXREG+8); 1020 outb(0xfe,eisa_adr+LDOORREG); 1021 retries = INIT_RETRIES; 1022 gdth_delay(20); 1023 while (inb(eisa_adr+EDOORREG) != 0xfe) { 1024 if (--retries == 0) { 1025 printk("GDT-EISA: Initialization error (get IRQ failed)\n"); 1026 return 0; 1027 } 1028 gdth_delay(1); 1029 } 1030 ha->irq = inb(eisa_adr+MAILBOXREG); 1031 outb(0xff,eisa_adr+EDOORREG); 1032 TRACE2(("GDT3000/3020: IRQ=%d\n",ha->irq)); 1033 /* check the result */ 1034 if (ha->irq == 0) { 1035 TRACE2(("Unknown IRQ, use IRQ table from cmd line !\n")); 1036 for (i = 0, irq_found = FALSE; 1037 i < MAXHA && irq[i] != 0xff; ++i) { 1038 if (irq[i]==10 || irq[i]==11 || irq[i]==12 || irq[i]==14) { 1039 irq_found = TRUE; 1040 break; 1041 } 1042 } 1043 if (irq_found) { 1044 ha->irq = irq[i]; 1045 irq[i] = 0; 1046 printk("GDT-EISA: Can not detect controller IRQ,\n"); 1047 printk("Use IRQ setting from command line (IRQ = %d)\n", 1048 ha->irq); 1049 } else { 1050 printk("GDT-EISA: Initialization error (unknown IRQ), Enable\n"); 1051 printk("the controller BIOS or use command line parameters\n"); 1052 return 0; 1053 } 1054 } 1055 } else { 1056 eisacf = inb(eisa_adr+EISAREG) & 7; 1057 if (eisacf > 4) /* level triggered */ 1058 eisacf -= 4; 1059 ha->irq = gdth_irq_tab[eisacf]; 1060 ha->oem_id = OEM_ID_ICP; 1061 ha->type = GDT_EISA; 1062 ha->stype = id; 1063 } 1064 1065 ha->dma64_support = 0; 1066 return 1; 1067} 1068 1069 1070static int __init gdth_init_isa(ulong32 bios_adr,gdth_ha_str *ha) 1071{ 1072 register gdt2_dpram_str __iomem *dp2_ptr; 1073 int i; 1074 unchar irq_drq,prot_ver; 1075 ulong32 retries; 1076 1077 TRACE(("gdth_init_isa() bios adr. %x\n",bios_adr)); 1078 1079 ha->brd = ioremap(bios_adr, sizeof(gdt2_dpram_str)); 1080 if (ha->brd == NULL) { 1081 printk("GDT-ISA: Initialization error (DPMEM remap error)\n"); 1082 return 0; 1083 } 1084 dp2_ptr = ha->brd; 1085 gdth_writeb(1, &dp2_ptr->io.memlock); /* switch off write protection */ 1086 /* reset interface area */ 1087 memset_io(&dp2_ptr->u, 0, sizeof(dp2_ptr->u)); 1088 if (gdth_readl(&dp2_ptr->u) != 0) { 1089 printk("GDT-ISA: Initialization error (DPMEM write error)\n"); 1090 iounmap(ha->brd); 1091 return 0; 1092 } 1093 1094 /* disable board interrupts, read DRQ and IRQ */ 1095 gdth_writeb(0xff, &dp2_ptr->io.irqdel); 1096 gdth_writeb(0x00, &dp2_ptr->io.irqen); 1097 gdth_writeb(0x00, &dp2_ptr->u.ic.S_Status); 1098 gdth_writeb(0x00, &dp2_ptr->u.ic.Cmd_Index); 1099 1100 irq_drq = gdth_readb(&dp2_ptr->io.rq); 1101 for (i=0; i<3; ++i) { 1102 if ((irq_drq & 1)==0) 1103 break; 1104 irq_drq >>= 1; 1105 } 1106 ha->drq = gdth_drq_tab[i]; 1107 1108 irq_drq = gdth_readb(&dp2_ptr->io.rq) >> 3; 1109 for (i=1; i<5; ++i) { 1110 if ((irq_drq & 1)==0) 1111 break; 1112 irq_drq >>= 1; 1113 } 1114 ha->irq = gdth_irq_tab[i]; 1115 1116 /* deinitialize services */ 1117 gdth_writel(bios_adr, &dp2_ptr->u.ic.S_Info[0]); 1118 gdth_writeb(0xff, &dp2_ptr->u.ic.S_Cmd_Indx); 1119 gdth_writeb(0, &dp2_ptr->io.event); 1120 retries = INIT_RETRIES; 1121 gdth_delay(20); 1122 while (gdth_readb(&dp2_ptr->u.ic.S_Status) != 0xff) { 1123 if (--retries == 0) { 1124 printk("GDT-ISA: Initialization error (DEINIT failed)\n"); 1125 iounmap(ha->brd); 1126 return 0; 1127 } 1128 gdth_delay(1); 1129 } 1130 prot_ver = (unchar)gdth_readl(&dp2_ptr->u.ic.S_Info[0]); 1131 gdth_writeb(0, &dp2_ptr->u.ic.Status); 1132 gdth_writeb(0xff, &dp2_ptr->io.irqdel); 1133 if (prot_ver != PROTOCOL_VERSION) { 1134 printk("GDT-ISA: Illegal protocol version\n"); 1135 iounmap(ha->brd); 1136 return 0; 1137 } 1138 1139 ha->oem_id = OEM_ID_ICP; 1140 ha->type = GDT_ISA; 1141 ha->ic_all_size = sizeof(dp2_ptr->u); 1142 ha->stype= GDT2_ID; 1143 ha->brd_phys = bios_adr >> 4; 1144 1145 /* special request to controller BIOS */ 1146 gdth_writel(0x00, &dp2_ptr->u.ic.S_Info[0]); 1147 gdth_writel(0x00, &dp2_ptr->u.ic.S_Info[1]); 1148 gdth_writel(0x01, &dp2_ptr->u.ic.S_Info[2]); 1149 gdth_writel(0x00, &dp2_ptr->u.ic.S_Info[3]); 1150 gdth_writeb(0xfe, &dp2_ptr->u.ic.S_Cmd_Indx); 1151 gdth_writeb(0, &dp2_ptr->io.event); 1152 retries = INIT_RETRIES; 1153 gdth_delay(20); 1154 while (gdth_readb(&dp2_ptr->u.ic.S_Status) != 0xfe) { 1155 if (--retries == 0) { 1156 printk("GDT-ISA: Initialization error\n"); 1157 iounmap(ha->brd); 1158 return 0; 1159 } 1160 gdth_delay(1); 1161 } 1162 gdth_writeb(0, &dp2_ptr->u.ic.Status); 1163 gdth_writeb(0xff, &dp2_ptr->io.irqdel); 1164 1165 ha->dma64_support = 0; 1166 return 1; 1167} 1168 1169 1170static int __init gdth_init_pci(gdth_pci_str *pcistr,gdth_ha_str *ha) 1171{ 1172 register gdt6_dpram_str __iomem *dp6_ptr; 1173 register gdt6c_dpram_str __iomem *dp6c_ptr; 1174 register gdt6m_dpram_str __iomem *dp6m_ptr; 1175 ulong32 retries; 1176 unchar prot_ver; 1177 ushort command; 1178 int i, found = FALSE; 1179 1180 TRACE(("gdth_init_pci()\n")); 1181 1182 if (pcistr->vendor_id == PCI_VENDOR_ID_INTEL) 1183 ha->oem_id = OEM_ID_INTEL; 1184 else 1185 ha->oem_id = OEM_ID_ICP; 1186 ha->brd_phys = (pcistr->bus << 8) | (pcistr->device_fn & 0xf8); 1187 ha->stype = (ulong32)pcistr->device_id; 1188 ha->subdevice_id = pcistr->subdevice_id; 1189 ha->irq = pcistr->irq; 1190 ha->pdev = pcistr->pdev; 1191 1192 if (ha->stype <= PCI_DEVICE_ID_VORTEX_GDT6000B) { /* GDT6000/B */ 1193 TRACE2(("init_pci() dpmem %lx irq %d\n",pcistr->dpmem,ha->irq)); 1194 ha->brd = ioremap(pcistr->dpmem, sizeof(gdt6_dpram_str)); 1195 if (ha->brd == NULL) { 1196 printk("GDT-PCI: Initialization error (DPMEM remap error)\n"); 1197 return 0; 1198 } 1199 /* check and reset interface area */ 1200 dp6_ptr = ha->brd; 1201 gdth_writel(DPMEM_MAGIC, &dp6_ptr->u); 1202 if (gdth_readl(&dp6_ptr->u) != DPMEM_MAGIC) { 1203 printk("GDT-PCI: Cannot access DPMEM at 0x%lx (shadowed?)\n", 1204 pcistr->dpmem); 1205 found = FALSE; 1206 for (i = 0xC8000; i < 0xE8000; i += 0x4000) { 1207 iounmap(ha->brd); 1208 ha->brd = ioremap(i, sizeof(ushort)); 1209 if (ha->brd == NULL) { 1210 printk("GDT-PCI: Initialization error (DPMEM remap error)\n"); 1211 return 0; 1212 } 1213 if (gdth_readw(ha->brd) != 0xffff) { 1214 TRACE2(("init_pci_old() address 0x%x busy\n", i)); 1215 continue; 1216 } 1217 iounmap(ha->brd); 1218 pci_write_config_dword(pcistr->pdev, 1219 PCI_BASE_ADDRESS_0, i); 1220 ha->brd = ioremap(i, sizeof(gdt6_dpram_str)); 1221 if (ha->brd == NULL) { 1222 printk("GDT-PCI: Initialization error (DPMEM remap error)\n"); 1223 return 0; 1224 } 1225 dp6_ptr = ha->brd; 1226 gdth_writel(DPMEM_MAGIC, &dp6_ptr->u); 1227 if (gdth_readl(&dp6_ptr->u) == DPMEM_MAGIC) { 1228 printk("GDT-PCI: Use free address at 0x%x\n", i); 1229 found = TRUE; 1230 break; 1231 } 1232 } 1233 if (!found) { 1234 printk("GDT-PCI: No free address found!\n"); 1235 iounmap(ha->brd); 1236 return 0; 1237 } 1238 } 1239 memset_io(&dp6_ptr->u, 0, sizeof(dp6_ptr->u)); 1240 if (gdth_readl(&dp6_ptr->u) != 0) { 1241 printk("GDT-PCI: Initialization error (DPMEM write error)\n"); 1242 iounmap(ha->brd); 1243 return 0; 1244 } 1245 1246 /* disable board interrupts, deinit services */ 1247 gdth_writeb(0xff, &dp6_ptr->io.irqdel); 1248 gdth_writeb(0x00, &dp6_ptr->io.irqen); 1249 gdth_writeb(0x00, &dp6_ptr->u.ic.S_Status); 1250 gdth_writeb(0x00, &dp6_ptr->u.ic.Cmd_Index); 1251 1252 gdth_writel(pcistr->dpmem, &dp6_ptr->u.ic.S_Info[0]); 1253 gdth_writeb(0xff, &dp6_ptr->u.ic.S_Cmd_Indx); 1254 gdth_writeb(0, &dp6_ptr->io.event); 1255 retries = INIT_RETRIES; 1256 gdth_delay(20); 1257 while (gdth_readb(&dp6_ptr->u.ic.S_Status) != 0xff) { 1258 if (--retries == 0) { 1259 printk("GDT-PCI: Initialization error (DEINIT failed)\n"); 1260 iounmap(ha->brd); 1261 return 0; 1262 } 1263 gdth_delay(1); 1264 } 1265 prot_ver = (unchar)gdth_readl(&dp6_ptr->u.ic.S_Info[0]); 1266 gdth_writeb(0, &dp6_ptr->u.ic.S_Status); 1267 gdth_writeb(0xff, &dp6_ptr->io.irqdel); 1268 if (prot_ver != PROTOCOL_VERSION) { 1269 printk("GDT-PCI: Illegal protocol version\n"); 1270 iounmap(ha->brd); 1271 return 0; 1272 } 1273 1274 ha->type = GDT_PCI; 1275 ha->ic_all_size = sizeof(dp6_ptr->u); 1276 1277 /* special command to controller BIOS */ 1278 gdth_writel(0x00, &dp6_ptr->u.ic.S_Info[0]); 1279 gdth_writel(0x00, &dp6_ptr->u.ic.S_Info[1]); 1280 gdth_writel(0x00, &dp6_ptr->u.ic.S_Info[2]); 1281 gdth_writel(0x00, &dp6_ptr->u.ic.S_Info[3]); 1282 gdth_writeb(0xfe, &dp6_ptr->u.ic.S_Cmd_Indx); 1283 gdth_writeb(0, &dp6_ptr->io.event); 1284 retries = INIT_RETRIES; 1285 gdth_delay(20); 1286 while (gdth_readb(&dp6_ptr->u.ic.S_Status) != 0xfe) { 1287 if (--retries == 0) { 1288 printk("GDT-PCI: Initialization error\n"); 1289 iounmap(ha->brd); 1290 return 0; 1291 } 1292 gdth_delay(1); 1293 } 1294 gdth_writeb(0, &dp6_ptr->u.ic.S_Status); 1295 gdth_writeb(0xff, &dp6_ptr->io.irqdel); 1296 1297 ha->dma64_support = 0; 1298 1299 } else if (ha->stype <= PCI_DEVICE_ID_VORTEX_GDT6555) { /* GDT6110, ... */ 1300 ha->plx = (gdt6c_plx_regs *)pcistr->io; 1301 TRACE2(("init_pci_new() dpmem %lx irq %d\n", 1302 pcistr->dpmem,ha->irq)); 1303 ha->brd = ioremap(pcistr->dpmem, sizeof(gdt6c_dpram_str)); 1304 if (ha->brd == NULL) { 1305 printk("GDT-PCI: Initialization error (DPMEM remap error)\n"); 1306 iounmap(ha->brd); 1307 return 0; 1308 } 1309 /* check and reset interface area */ 1310 dp6c_ptr = ha->brd; 1311 gdth_writel(DPMEM_MAGIC, &dp6c_ptr->u); 1312 if (gdth_readl(&dp6c_ptr->u) != DPMEM_MAGIC) { 1313 printk("GDT-PCI: Cannot access DPMEM at 0x%lx (shadowed?)\n", 1314 pcistr->dpmem); 1315 found = FALSE; 1316 for (i = 0xC8000; i < 0xE8000; i += 0x4000) { 1317 iounmap(ha->brd); 1318 ha->brd = ioremap(i, sizeof(ushort)); 1319 if (ha->brd == NULL) { 1320 printk("GDT-PCI: Initialization error (DPMEM remap error)\n"); 1321 return 0; 1322 } 1323 if (gdth_readw(ha->brd) != 0xffff) { 1324 TRACE2(("init_pci_plx() address 0x%x busy\n", i)); 1325 continue; 1326 } 1327 iounmap(ha->brd); 1328 pci_write_config_dword(pcistr->pdev, 1329 PCI_BASE_ADDRESS_2, i); 1330 ha->brd = ioremap(i, sizeof(gdt6c_dpram_str)); 1331 if (ha->brd == NULL) { 1332 printk("GDT-PCI: Initialization error (DPMEM remap error)\n"); 1333 return 0; 1334 } 1335 dp6c_ptr = ha->brd; 1336 gdth_writel(DPMEM_MAGIC, &dp6c_ptr->u); 1337 if (gdth_readl(&dp6c_ptr->u) == DPMEM_MAGIC) { 1338 printk("GDT-PCI: Use free address at 0x%x\n", i); 1339 found = TRUE; 1340 break; 1341 } 1342 } 1343 if (!found) { 1344 printk("GDT-PCI: No free address found!\n"); 1345 iounmap(ha->brd); 1346 return 0; 1347 } 1348 } 1349 memset_io(&dp6c_ptr->u, 0, sizeof(dp6c_ptr->u)); 1350 if (gdth_readl(&dp6c_ptr->u) != 0) { 1351 printk("GDT-PCI: Initialization error (DPMEM write error)\n"); 1352 iounmap(ha->brd); 1353 return 0; 1354 } 1355 1356 /* disable board interrupts, deinit services */ 1357 outb(0x00,PTR2USHORT(&ha->plx->control1)); 1358 outb(0xff,PTR2USHORT(&ha->plx->edoor_reg)); 1359 1360 gdth_writeb(0x00, &dp6c_ptr->u.ic.S_Status); 1361 gdth_writeb(0x00, &dp6c_ptr->u.ic.Cmd_Index); 1362 1363 gdth_writel(pcistr->dpmem, &dp6c_ptr->u.ic.S_Info[0]); 1364 gdth_writeb(0xff, &dp6c_ptr->u.ic.S_Cmd_Indx); 1365 1366 outb(1,PTR2USHORT(&ha->plx->ldoor_reg)); 1367 1368 retries = INIT_RETRIES; 1369 gdth_delay(20); 1370 while (gdth_readb(&dp6c_ptr->u.ic.S_Status) != 0xff) { 1371 if (--retries == 0) { 1372 printk("GDT-PCI: Initialization error (DEINIT failed)\n"); 1373 iounmap(ha->brd); 1374 return 0; 1375 } 1376 gdth_delay(1); 1377 } 1378 prot_ver = (unchar)gdth_readl(&dp6c_ptr->u.ic.S_Info[0]); 1379 gdth_writeb(0, &dp6c_ptr->u.ic.Status); 1380 if (prot_ver != PROTOCOL_VERSION) { 1381 printk("GDT-PCI: Illegal protocol version\n"); 1382 iounmap(ha->brd); 1383 return 0; 1384 } 1385 1386 ha->type = GDT_PCINEW; 1387 ha->ic_all_size = sizeof(dp6c_ptr->u); 1388 1389 /* special command to controller BIOS */ 1390 gdth_writel(0x00, &dp6c_ptr->u.ic.S_Info[0]); 1391 gdth_writel(0x00, &dp6c_ptr->u.ic.S_Info[1]); 1392 gdth_writel(0x00, &dp6c_ptr->u.ic.S_Info[2]); 1393 gdth_writel(0x00, &dp6c_ptr->u.ic.S_Info[3]); 1394 gdth_writeb(0xfe, &dp6c_ptr->u.ic.S_Cmd_Indx); 1395 1396 outb(1,PTR2USHORT(&ha->plx->ldoor_reg)); 1397 1398 retries = INIT_RETRIES; 1399 gdth_delay(20); 1400 while (gdth_readb(&dp6c_ptr->u.ic.S_Status) != 0xfe) { 1401 if (--retries == 0) { 1402 printk("GDT-PCI: Initialization error\n"); 1403 iounmap(ha->brd); 1404 return 0; 1405 } 1406 gdth_delay(1); 1407 } 1408 gdth_writeb(0, &dp6c_ptr->u.ic.S_Status); 1409 1410 ha->dma64_support = 0; 1411 1412 } else { /* MPR */ 1413 TRACE2(("init_pci_mpr() dpmem %lx irq %d\n",pcistr->dpmem,ha->irq)); 1414 ha->brd = ioremap(pcistr->dpmem, sizeof(gdt6m_dpram_str)); 1415 if (ha->brd == NULL) { 1416 printk("GDT-PCI: Initialization error (DPMEM remap error)\n"); 1417 return 0; 1418 } 1419 1420 /* manipulate config. space to enable DPMEM, start RP controller */ 1421 pci_read_config_word(pcistr->pdev, PCI_COMMAND, &command); 1422 command |= 6; 1423 pci_write_config_word(pcistr->pdev, PCI_COMMAND, command); 1424 if (pci_resource_start(pcistr->pdev, 8) == 1UL) 1425 pci_resource_start(pcistr->pdev, 8) = 0UL; 1426 i = 0xFEFF0001UL; 1427 pci_write_config_dword(pcistr->pdev, PCI_ROM_ADDRESS, i); 1428 gdth_delay(1); 1429 pci_write_config_dword(pcistr->pdev, PCI_ROM_ADDRESS, 1430 pci_resource_start(pcistr->pdev, 8)); 1431 1432 dp6m_ptr = ha->brd; 1433 1434 /* Ensure that it is safe to access the non HW portions of DPMEM. 1435 * Aditional check needed for Xscale based RAID controllers */ 1436 while( ((int)gdth_readb(&dp6m_ptr->i960r.sema0_reg) ) & 3 ) 1437 gdth_delay(1); 1438 1439 /* check and reset interface area */ 1440 gdth_writel(DPMEM_MAGIC, &dp6m_ptr->u); 1441 if (gdth_readl(&dp6m_ptr->u) != DPMEM_MAGIC) { 1442 printk("GDT-PCI: Cannot access DPMEM at 0x%lx (shadowed?)\n", 1443 pcistr->dpmem); 1444 found = FALSE; 1445 for (i = 0xC8000; i < 0xE8000; i += 0x4000) { 1446 iounmap(ha->brd); 1447 ha->brd = ioremap(i, sizeof(ushort)); 1448 if (ha->brd == NULL) { 1449 printk("GDT-PCI: Initialization error (DPMEM remap error)\n"); 1450 return 0; 1451 } 1452 if (gdth_readw(ha->brd) != 0xffff) { 1453 TRACE2(("init_pci_mpr() address 0x%x busy\n", i)); 1454 continue; 1455 } 1456 iounmap(ha->brd); 1457 pci_write_config_dword(pcistr->pdev, 1458 PCI_BASE_ADDRESS_0, i); 1459 ha->brd = ioremap(i, sizeof(gdt6m_dpram_str)); 1460 if (ha->brd == NULL) { 1461 printk("GDT-PCI: Initialization error (DPMEM remap error)\n"); 1462 return 0; 1463 } 1464 dp6m_ptr = ha->brd; 1465 gdth_writel(DPMEM_MAGIC, &dp6m_ptr->u); 1466 if (gdth_readl(&dp6m_ptr->u) == DPMEM_MAGIC) { 1467 printk("GDT-PCI: Use free address at 0x%x\n", i); 1468 found = TRUE; 1469 break; 1470 } 1471 } 1472 if (!found) { 1473 printk("GDT-PCI: No free address found!\n"); 1474 iounmap(ha->brd); 1475 return 0; 1476 } 1477 } 1478 memset_io(&dp6m_ptr->u, 0, sizeof(dp6m_ptr->u)); 1479 1480 /* disable board interrupts, deinit services */ 1481 gdth_writeb(gdth_readb(&dp6m_ptr->i960r.edoor_en_reg) | 4, 1482 &dp6m_ptr->i960r.edoor_en_reg); 1483 gdth_writeb(0xff, &dp6m_ptr->i960r.edoor_reg); 1484 gdth_writeb(0x00, &dp6m_ptr->u.ic.S_Status); 1485 gdth_writeb(0x00, &dp6m_ptr->u.ic.Cmd_Index); 1486 1487 gdth_writel(pcistr->dpmem, &dp6m_ptr->u.ic.S_Info[0]); 1488 gdth_writeb(0xff, &dp6m_ptr->u.ic.S_Cmd_Indx); 1489 gdth_writeb(1, &dp6m_ptr->i960r.ldoor_reg); 1490 retries = INIT_RETRIES; 1491 gdth_delay(20); 1492 while (gdth_readb(&dp6m_ptr->u.ic.S_Status) != 0xff) { 1493 if (--retries == 0) { 1494 printk("GDT-PCI: Initialization error (DEINIT failed)\n"); 1495 iounmap(ha->brd); 1496 return 0; 1497 } 1498 gdth_delay(1); 1499 } 1500 prot_ver = (unchar)gdth_readl(&dp6m_ptr->u.ic.S_Info[0]); 1501 gdth_writeb(0, &dp6m_ptr->u.ic.S_Status); 1502 if (prot_ver != PROTOCOL_VERSION) { 1503 printk("GDT-PCI: Illegal protocol version\n"); 1504 iounmap(ha->brd); 1505 return 0; 1506 } 1507 1508 ha->type = GDT_PCIMPR; 1509 ha->ic_all_size = sizeof(dp6m_ptr->u); 1510 1511 /* special command to controller BIOS */ 1512 gdth_writel(0x00, &dp6m_ptr->u.ic.S_Info[0]); 1513 gdth_writel(0x00, &dp6m_ptr->u.ic.S_Info[1]); 1514 gdth_writel(0x00, &dp6m_ptr->u.ic.S_Info[2]); 1515 gdth_writel(0x00, &dp6m_ptr->u.ic.S_Info[3]); 1516 gdth_writeb(0xfe, &dp6m_ptr->u.ic.S_Cmd_Indx); 1517 gdth_writeb(1, &dp6m_ptr->i960r.ldoor_reg); 1518 retries = INIT_RETRIES; 1519 gdth_delay(20); 1520 while (gdth_readb(&dp6m_ptr->u.ic.S_Status) != 0xfe) { 1521 if (--retries == 0) { 1522 printk("GDT-PCI: Initialization error\n"); 1523 iounmap(ha->brd); 1524 return 0; 1525 } 1526 gdth_delay(1); 1527 } 1528 gdth_writeb(0, &dp6m_ptr->u.ic.S_Status); 1529 1530 /* read FW version to detect 64-bit DMA support */ 1531 gdth_writeb(0xfd, &dp6m_ptr->u.ic.S_Cmd_Indx); 1532 gdth_writeb(1, &dp6m_ptr->i960r.ldoor_reg); 1533 retries = INIT_RETRIES; 1534 gdth_delay(20); 1535 while (gdth_readb(&dp6m_ptr->u.ic.S_Status) != 0xfd) { 1536 if (--retries == 0) { 1537 printk("GDT-PCI: Initialization error (DEINIT failed)\n"); 1538 iounmap(ha->brd); 1539 return 0; 1540 } 1541 gdth_delay(1); 1542 } 1543 prot_ver = (unchar)(gdth_readl(&dp6m_ptr->u.ic.S_Info[0]) >> 16); 1544 gdth_writeb(0, &dp6m_ptr->u.ic.S_Status); 1545 if (prot_ver < 0x2b) /* FW < x.43: no 64-bit DMA support */ 1546 ha->dma64_support = 0; 1547 else 1548 ha->dma64_support = 1; 1549 } 1550 1551 return 1; 1552} 1553 1554 1555/* controller protocol functions */ 1556 1557static void __init gdth_enable_int(int hanum) 1558{ 1559 gdth_ha_str *ha; 1560 ulong flags; 1561 gdt2_dpram_str __iomem *dp2_ptr; 1562 gdt6_dpram_str __iomem *dp6_ptr; 1563 gdt6m_dpram_str __iomem *dp6m_ptr; 1564 1565 TRACE(("gdth_enable_int() hanum %d\n",hanum)); 1566 ha = HADATA(gdth_ctr_tab[hanum]); 1567 spin_lock_irqsave(&ha->smp_lock, flags); 1568 1569 if (ha->type == GDT_EISA) { 1570 outb(0xff, ha->bmic + EDOORREG); 1571 outb(0xff, ha->bmic + EDENABREG); 1572 outb(0x01, ha->bmic + EINTENABREG); 1573 } else if (ha->type == GDT_ISA) { 1574 dp2_ptr = ha->brd; 1575 gdth_writeb(1, &dp2_ptr->io.irqdel); 1576 gdth_writeb(0, &dp2_ptr->u.ic.Cmd_Index); 1577 gdth_writeb(1, &dp2_ptr->io.irqen); 1578 } else if (ha->type == GDT_PCI) { 1579 dp6_ptr = ha->brd; 1580 gdth_writeb(1, &dp6_ptr->io.irqdel); 1581 gdth_writeb(0, &dp6_ptr->u.ic.Cmd_Index); 1582 gdth_writeb(1, &dp6_ptr->io.irqen); 1583 } else if (ha->type == GDT_PCINEW) { 1584 outb(0xff, PTR2USHORT(&ha->plx->edoor_reg)); 1585 outb(0x03, PTR2USHORT(&ha->plx->control1)); 1586 } else if (ha->type == GDT_PCIMPR) { 1587 dp6m_ptr = ha->brd; 1588 gdth_writeb(0xff, &dp6m_ptr->i960r.edoor_reg); 1589 gdth_writeb(gdth_readb(&dp6m_ptr->i960r.edoor_en_reg) & ~4, 1590 &dp6m_ptr->i960r.edoor_en_reg); 1591 } 1592 spin_unlock_irqrestore(&ha->smp_lock, flags); 1593} 1594 1595 1596static int gdth_get_status(unchar *pIStatus,int irq) 1597{ 1598 register gdth_ha_str *ha; 1599 int i; 1600 1601 TRACE(("gdth_get_status() irq %d ctr_count %d\n", 1602 irq,gdth_ctr_count)); 1603 1604 *pIStatus = 0; 1605 for (i=0; i<gdth_ctr_count; ++i) { 1606 ha = HADATA(gdth_ctr_tab[i]); 1607 if (ha->irq != (unchar)irq) /* check IRQ */ 1608 continue; 1609 if (ha->type == GDT_EISA) 1610 *pIStatus = inb((ushort)ha->bmic + EDOORREG); 1611 else if (ha->type == GDT_ISA) 1612 *pIStatus = 1613 gdth_readb(&((gdt2_dpram_str __iomem *)ha->brd)->u.ic.Cmd_Index); 1614 else if (ha->type == GDT_PCI) 1615 *pIStatus = 1616 gdth_readb(&((gdt6_dpram_str __iomem *)ha->brd)->u.ic.Cmd_Index); 1617 else if (ha->type == GDT_PCINEW) 1618 *pIStatus = inb(PTR2USHORT(&ha->plx->edoor_reg)); 1619 else if (ha->type == GDT_PCIMPR) 1620 *pIStatus = 1621 gdth_readb(&((gdt6m_dpram_str __iomem *)ha->brd)->i960r.edoor_reg); 1622 1623 if (*pIStatus) 1624 return i; /* board found */ 1625 } 1626 return -1; 1627} 1628 1629 1630static int gdth_test_busy(int hanum) 1631{ 1632 register gdth_ha_str *ha; 1633 register int gdtsema0 = 0; 1634 1635 TRACE(("gdth_test_busy() hanum %d\n",hanum)); 1636 1637 ha = HADATA(gdth_ctr_tab[hanum]); 1638 if (ha->type == GDT_EISA) 1639 gdtsema0 = (int)inb(ha->bmic + SEMA0REG); 1640 else if (ha->type == GDT_ISA) 1641 gdtsema0 = (int)gdth_readb(&((gdt2_dpram_str __iomem *)ha->brd)->u.ic.Sema0); 1642 else if (ha->type == GDT_PCI) 1643 gdtsema0 = (int)gdth_readb(&((gdt6_dpram_str __iomem *)ha->brd)->u.ic.Sema0); 1644 else if (ha->type == GDT_PCINEW) 1645 gdtsema0 = (int)inb(PTR2USHORT(&ha->plx->sema0_reg)); 1646 else if (ha->type == GDT_PCIMPR) 1647 gdtsema0 = 1648 (int)gdth_readb(&((gdt6m_dpram_str __iomem *)ha->brd)->i960r.sema0_reg); 1649 1650 return (gdtsema0 & 1); 1651} 1652 1653 1654static int gdth_get_cmd_index(int hanum) 1655{ 1656 register gdth_ha_str *ha; 1657 int i; 1658 1659 TRACE(("gdth_get_cmd_index() hanum %d\n",hanum)); 1660 1661 ha = HADATA(gdth_ctr_tab[hanum]); 1662 for (i=0; i<GDTH_MAXCMDS; ++i) { 1663 if (ha->cmd_tab[i].cmnd == UNUSED_CMND) { 1664 ha->cmd_tab[i].cmnd = ha->pccb->RequestBuffer; 1665 ha->cmd_tab[i].service = ha->pccb->Service; 1666 ha->pccb->CommandIndex = (ulong32)i+2; 1667 return (i+2); 1668 } 1669 } 1670 return 0; 1671} 1672 1673 1674static void gdth_set_sema0(int hanum) 1675{ 1676 register gdth_ha_str *ha; 1677 1678 TRACE(("gdth_set_sema0() hanum %d\n",hanum)); 1679 1680 ha = HADATA(gdth_ctr_tab[hanum]); 1681 if (ha->type == GDT_EISA) { 1682 outb(1, ha->bmic + SEMA0REG); 1683 } else if (ha->type == GDT_ISA) { 1684 gdth_writeb(1, &((gdt2_dpram_str __iomem *)ha->brd)->u.ic.Sema0); 1685 } else if (ha->type == GDT_PCI) { 1686 gdth_writeb(1, &((gdt6_dpram_str __iomem *)ha->brd)->u.ic.Sema0); 1687 } else if (ha->type == GDT_PCINEW) { 1688 outb(1, PTR2USHORT(&ha->plx->sema0_reg)); 1689 } else if (ha->type == GDT_PCIMPR) { 1690 gdth_writeb(1, &((gdt6m_dpram_str __iomem *)ha->brd)->i960r.sema0_reg); 1691 } 1692} 1693 1694 1695static void gdth_copy_command(int hanum) 1696{ 1697 register gdth_ha_str *ha; 1698 register gdth_cmd_str *cmd_ptr; 1699 register gdt6m_dpram_str __iomem *dp6m_ptr; 1700 register gdt6c_dpram_str __iomem *dp6c_ptr; 1701 gdt6_dpram_str __iomem *dp6_ptr; 1702 gdt2_dpram_str __iomem *dp2_ptr; 1703 ushort cp_count,dp_offset,cmd_no; 1704 1705 TRACE(("gdth_copy_command() hanum %d\n",hanum)); 1706 1707 ha = HADATA(gdth_ctr_tab[hanum]); 1708 cp_count = ha->cmd_len; 1709 dp_offset= ha->cmd_offs_dpmem; 1710 cmd_no = ha->cmd_cnt; 1711 cmd_ptr = ha->pccb; 1712 1713 ++ha->cmd_cnt; 1714 if (ha->type == GDT_EISA) 1715 return; /* no DPMEM, no copy */ 1716 1717 /* set cpcount dword aligned */ 1718 if (cp_count & 3) 1719 cp_count += (4 - (cp_count & 3)); 1720 1721 ha->cmd_offs_dpmem += cp_count; 1722 1723 /* set offset and service, copy command to DPMEM */ 1724 if (ha->type == GDT_ISA) { 1725 dp2_ptr = ha->brd; 1726 gdth_writew(dp_offset + DPMEM_COMMAND_OFFSET, 1727 &dp2_ptr->u.ic.comm_queue[cmd_no].offset); 1728 gdth_writew((ushort)cmd_ptr->Service, 1729 &dp2_ptr->u.ic.comm_queue[cmd_no].serv_id); 1730 memcpy_toio(&dp2_ptr->u.ic.gdt_dpr_cmd[dp_offset],cmd_ptr,cp_count); 1731 } else if (ha->type == GDT_PCI) { 1732 dp6_ptr = ha->brd; 1733 gdth_writew(dp_offset + DPMEM_COMMAND_OFFSET, 1734 &dp6_ptr->u.ic.comm_queue[cmd_no].offset); 1735 gdth_writew((ushort)cmd_ptr->Service, 1736 &dp6_ptr->u.ic.comm_queue[cmd_no].serv_id); 1737 memcpy_toio(&dp6_ptr->u.ic.gdt_dpr_cmd[dp_offset],cmd_ptr,cp_count); 1738 } else if (ha->type == GDT_PCINEW) { 1739 dp6c_ptr = ha->brd; 1740 gdth_writew(dp_offset + DPMEM_COMMAND_OFFSET, 1741 &dp6c_ptr->u.ic.comm_queue[cmd_no].offset); 1742 gdth_writew((ushort)cmd_ptr->Service, 1743 &dp6c_ptr->u.ic.comm_queue[cmd_no].serv_id); 1744 memcpy_toio(&dp6c_ptr->u.ic.gdt_dpr_cmd[dp_offset],cmd_ptr,cp_count); 1745 } else if (ha->type == GDT_PCIMPR) { 1746 dp6m_ptr = ha->brd; 1747 gdth_writew(dp_offset + DPMEM_COMMAND_OFFSET, 1748 &dp6m_ptr->u.ic.comm_queue[cmd_no].offset); 1749 gdth_writew((ushort)cmd_ptr->Service, 1750 &dp6m_ptr->u.ic.comm_queue[cmd_no].serv_id); 1751 memcpy_toio(&dp6m_ptr->u.ic.gdt_dpr_cmd[dp_offset],cmd_ptr,cp_count); 1752 } 1753} 1754 1755 1756static void gdth_release_event(int hanum) 1757{ 1758 register gdth_ha_str *ha; 1759 1760 TRACE(("gdth_release_event() hanum %d\n",hanum)); 1761 ha = HADATA(gdth_ctr_tab[hanum]); 1762 1763#ifdef GDTH_STATISTICS 1764 { 1765 ulong32 i,j; 1766 for (i=0,j=0; j<GDTH_MAXCMDS; ++j) { 1767 if (ha->cmd_tab[j].cmnd != UNUSED_CMND) 1768 ++i; 1769 } 1770 if (max_index < i) { 1771 max_index = i; 1772 TRACE3(("GDT: max_index = %d\n",(ushort)i)); 1773 } 1774 } 1775#endif 1776 1777 if (ha->pccb->OpCode == GDT_INIT) 1778 ha->pccb->Service |= 0x80; 1779 1780 if (ha->type == GDT_EISA) { 1781 if (ha->pccb->OpCode == GDT_INIT) /* store DMA buffer */ 1782 outl(ha->ccb_phys, ha->bmic + MAILBOXREG); 1783 outb(ha->pccb->Service, ha->bmic + LDOORREG); 1784 } else if (ha->type == GDT_ISA) { 1785 gdth_writeb(0, &((gdt2_dpram_str __iomem *)ha->brd)->io.event); 1786 } else if (ha->type == GDT_PCI) { 1787 gdth_writeb(0, &((gdt6_dpram_str __iomem *)ha->brd)->io.event); 1788 } else if (ha->type == GDT_PCINEW) { 1789 outb(1, PTR2USHORT(&ha->plx->ldoor_reg)); 1790 } else if (ha->type == GDT_PCIMPR) { 1791 gdth_writeb(1, &((gdt6m_dpram_str __iomem *)ha->brd)->i960r.ldoor_reg); 1792 } 1793} 1794 1795 1796static int gdth_wait(int hanum,int index,ulong32 time) 1797{ 1798 gdth_ha_str *ha; 1799 int answer_found = FALSE; 1800 1801 TRACE(("gdth_wait() hanum %d index %d time %d\n",hanum,index,time)); 1802 1803 ha = HADATA(gdth_ctr_tab[hanum]); 1804 if (index == 0) 1805 return 1; /* no wait required */ 1806 1807 gdth_from_wait = TRUE; 1808 do { 1809 gdth_interrupt((int)ha->irq,ha); 1810 if (wait_hanum==hanum && wait_index==index) { 1811 answer_found = TRUE; 1812 break; 1813 } 1814 gdth_delay(1); 1815 } while (--time); 1816 gdth_from_wait = FALSE; 1817 1818 while (gdth_test_busy(hanum)) 1819 gdth_delay(0); 1820 1821 return (answer_found); 1822} 1823 1824 1825static int gdth_internal_cmd(int hanum,unchar service,ushort opcode,ulong32 p1, 1826 ulong64 p2,ulong64 p3) 1827{ 1828 register gdth_ha_str *ha; 1829 register gdth_cmd_str *cmd_ptr; 1830 int retries,index; 1831 1832 TRACE2(("gdth_internal_cmd() service %d opcode %d\n",service,opcode)); 1833 1834 ha = HADATA(gdth_ctr_tab[hanum]); 1835 cmd_ptr = ha->pccb; 1836 memset((char*)cmd_ptr,0,sizeof(gdth_cmd_str)); 1837 1838 /* make command */ 1839 for (retries = INIT_RETRIES;;) { 1840 cmd_ptr->Service = service; 1841 cmd_ptr->RequestBuffer = INTERNAL_CMND; 1842 if (!(index=gdth_get_cmd_index(hanum))) { 1843 TRACE(("GDT: No free command index found\n")); 1844 return 0; 1845 } 1846 gdth_set_sema0(hanum); 1847 cmd_ptr->OpCode = opcode; 1848 cmd_ptr->BoardNode = LOCALBOARD; 1849 if (service == CACHESERVICE) { 1850 if (opcode == GDT_IOCTL) { 1851 cmd_ptr->u.ioctl.subfunc = p1; 1852 cmd_ptr->u.ioctl.channel = (ulong32)p2; 1853 cmd_ptr->u.ioctl.param_size = (ushort)p3; 1854 cmd_ptr->u.ioctl.p_param = ha->scratch_phys; 1855 } else { 1856 if (ha->cache_feat & GDT_64BIT) { 1857 cmd_ptr->u.cache64.DeviceNo = (ushort)p1; 1858 cmd_ptr->u.cache64.BlockNo = p2; 1859 } else { 1860 cmd_ptr->u.cache.DeviceNo = (ushort)p1; 1861 cmd_ptr->u.cache.BlockNo = (ulong32)p2; 1862 } 1863 } 1864 } else if (service == SCSIRAWSERVICE) { 1865 if (ha->raw_feat & GDT_64BIT) { 1866 cmd_ptr->u.raw64.direction = p1; 1867 cmd_ptr->u.raw64.bus = (unchar)p2; 1868 cmd_ptr->u.raw64.target = (unchar)p3; 1869 cmd_ptr->u.raw64.lun = (unchar)(p3 >> 8); 1870 } else { 1871 cmd_ptr->u.raw.direction = p1; 1872 cmd_ptr->u.raw.bus = (unchar)p2; 1873 cmd_ptr->u.raw.target = (unchar)p3; 1874 cmd_ptr->u.raw.lun = (unchar)(p3 >> 8); 1875 } 1876 } else if (service == SCREENSERVICE) { 1877 if (opcode == GDT_REALTIME) { 1878 *(ulong32 *)&cmd_ptr->u.screen.su.data[0] = p1; 1879 *(ulong32 *)&cmd_ptr->u.screen.su.data[4] = (ulong32)p2; 1880 *(ulong32 *)&cmd_ptr->u.screen.su.data[8] = (ulong32)p3; 1881 } 1882 } 1883 ha->cmd_len = sizeof(gdth_cmd_str); 1884 ha->cmd_offs_dpmem = 0; 1885 ha->cmd_cnt = 0; 1886 gdth_copy_command(hanum); 1887 gdth_release_event(hanum); 1888 gdth_delay(20); 1889 if (!gdth_wait(hanum,index,INIT_TIMEOUT)) { 1890 printk("GDT: Initialization error (timeout service %d)\n",service); 1891 return 0; 1892 } 1893 if (ha->status != S_BSY || --retries == 0) 1894 break; 1895 gdth_delay(1); 1896 } 1897 1898 return (ha->status != S_OK ? 0:1); 1899} 1900 1901 1902/* search for devices */ 1903 1904static int __init gdth_search_drives(int hanum) 1905{ 1906 register gdth_ha_str *ha; 1907 ushort cdev_cnt, i; 1908 int ok; 1909 ulong32 bus_no, drv_cnt, drv_no, j; 1910 gdth_getch_str *chn; 1911 gdth_drlist_str *drl; 1912 gdth_iochan_str *ioc; 1913 gdth_raw_iochan_str *iocr; 1914 gdth_arcdl_str *alst; 1915 gdth_alist_str *alst2; 1916 gdth_oem_str_ioctl *oemstr; 1917#ifdef INT_COAL 1918 gdth_perf_modes *pmod; 1919#endif 1920 1921#ifdef GDTH_RTC 1922 unchar rtc[12]; 1923 ulong flags; 1924#endif 1925 1926 TRACE(("gdth_search_drives() hanum %d\n",hanum)); 1927 ha = HADATA(gdth_ctr_tab[hanum]); 1928 ok = 0; 1929 1930 /* initialize controller services, at first: screen service */ 1931 ha->screen_feat = 0; 1932 if (!force_dma32) { 1933 ok = gdth_internal_cmd(hanum,SCREENSERVICE,GDT_X_INIT_SCR,0,0,0); 1934 if (ok) 1935 ha->screen_feat = GDT_64BIT; 1936 } 1937 if (force_dma32 || (!ok && ha->status == (ushort)S_NOFUNC)) 1938 ok = gdth_internal_cmd(hanum,SCREENSERVICE,GDT_INIT,0,0,0); 1939 if (!ok) { 1940 printk("GDT-HA %d: Initialization error screen service (code %d)\n", 1941 hanum, ha->status); 1942 return 0; 1943 } 1944 TRACE2(("gdth_search_drives(): SCREENSERVICE initialized\n")); 1945 1946#ifdef GDTH_RTC 1947 /* read realtime clock info, send to controller */ 1948 /* 1. wait for the falling edge of update flag */ 1949 spin_lock_irqsave(&rtc_lock, flags); 1950 for (j = 0; j < 1000000; ++j) 1951 if (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP) 1952 break; 1953 for (j = 0; j < 1000000; ++j) 1954 if (!(CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP)) 1955 break; 1956 /* 2. read info */ 1957 do { 1958 for (j = 0; j < 12; ++j) 1959 rtc[j] = CMOS_READ(j); 1960 } while (rtc[0] != CMOS_READ(0)); 1961 spin_lock_irqrestore(&rtc_lock, flags); 1962 TRACE2(("gdth_search_drives(): RTC: %x/%x/%x\n",*(ulong32 *)&rtc[0], 1963 *(ulong32 *)&rtc[4], *(ulong32 *)&rtc[8])); 1964 /* 3. send to controller firmware */ 1965 gdth_internal_cmd(hanum,SCREENSERVICE,GDT_REALTIME, *(ulong32 *)&rtc[0], 1966 *(ulong32 *)&rtc[4], *(ulong32 *)&rtc[8]); 1967#endif 1968 1969 /* unfreeze all IOs */ 1970 gdth_internal_cmd(hanum,CACHESERVICE,GDT_UNFREEZE_IO,0,0,0); 1971 1972 /* initialize cache service */ 1973 ha->cache_feat = 0; 1974 if (!force_dma32) { 1975 ok = gdth_internal_cmd(hanum,CACHESERVICE,GDT_X_INIT_HOST,LINUX_OS,0,0); 1976 if (ok) 1977 ha->cache_feat = GDT_64BIT; 1978 } 1979 if (force_dma32 || (!ok && ha->status == (ushort)S_NOFUNC)) 1980 ok = gdth_internal_cmd(hanum,CACHESERVICE,GDT_INIT,LINUX_OS,0,0); 1981 if (!ok) { 1982 printk("GDT-HA %d: Initialization error cache service (code %d)\n", 1983 hanum, ha->status); 1984 return 0; 1985 } 1986 TRACE2(("gdth_search_drives(): CACHESERVICE initialized\n")); 1987 cdev_cnt = (ushort)ha->info; 1988 ha->fw_vers = ha->service; 1989 1990#ifdef INT_COAL 1991 if (ha->type == GDT_PCIMPR) { 1992 /* set perf. modes */ 1993 pmod = (gdth_perf_modes *)ha->pscratch; 1994 pmod->version = 1; 1995 pmod->st_mode = 1; /* enable one status buffer */ 1996 *((ulong64 *)&pmod->st_buff_addr1) = ha->coal_stat_phys; 1997 pmod->st_buff_indx1 = COALINDEX; 1998 pmod->st_buff_addr2 = 0; 1999 pmod->st_buff_u_addr2 = 0; 2000 pmod->st_buff_indx2 = 0; 2001 pmod->st_buff_size = sizeof(gdth_coal_status) * MAXOFFSETS; 2002 pmod->cmd_mode = 0; // disable all cmd buffers 2003 pmod->cmd_buff_addr1 = 0; 2004 pmod->cmd_buff_u_addr1 = 0; 2005 pmod->cmd_buff_indx1 = 0; 2006 pmod->cmd_buff_addr2 = 0; 2007 pmod->cmd_buff_u_addr2 = 0; 2008 pmod->cmd_buff_indx2 = 0; 2009 pmod->cmd_buff_size = 0; 2010 pmod->reserved1 = 0; 2011 pmod->reserved2 = 0; 2012 if (gdth_internal_cmd(hanum,CACHESERVICE,GDT_IOCTL,SET_PERF_MODES, 2013 INVALID_CHANNEL,sizeof(gdth_perf_modes))) { 2014 printk("GDT-HA %d: Interrupt coalescing activated\n", hanum); 2015 } 2016 } 2017#endif 2018 2019 /* detect number of buses - try new IOCTL */ 2020 iocr = (gdth_raw_iochan_str *)ha->pscratch; 2021 iocr->hdr.version = 0xffffffff; 2022 iocr->hdr.list_entries = MAXBUS; 2023 iocr->hdr.first_chan = 0; 2024 iocr->hdr.last_chan = MAXBUS-1; 2025 iocr->hdr.list_offset = GDTOFFSOF(gdth_raw_iochan_str, list[0]); 2026 if (gdth_internal_cmd(hanum,CACHESERVICE,GDT_IOCTL,IOCHAN_RAW_DESC, 2027 INVALID_CHANNEL,sizeof(gdth_raw_iochan_str))) { 2028 TRACE2(("IOCHAN_RAW_DESC supported!\n")); 2029 ha->bus_cnt = iocr->hdr.chan_count; 2030 for (bus_no = 0; bus_no < ha->bus_cnt; ++bus_no) { 2031 if (iocr->list[bus_no].proc_id < MAXID) 2032 ha->bus_id[bus_no] = iocr->list[bus_no].proc_id; 2033 else 2034 ha->bus_id[bus_no] = 0xff; 2035 } 2036 } else { 2037 /* old method */ 2038 chn = (gdth_getch_str *)ha->pscratch; 2039 for (bus_no = 0; bus_no < MAXBUS; ++bus_no) { 2040 chn->channel_no = bus_no; 2041 if (!gdth_internal_cmd(hanum,CACHESERVICE,GDT_IOCTL, 2042 SCSI_CHAN_CNT | L_CTRL_PATTERN, 2043 IO_CHANNEL | INVALID_CHANNEL, 2044 sizeof(gdth_getch_str))) { 2045 if (bus_no == 0) { 2046 printk("GDT-HA %d: Error detecting channel count (0x%x)\n", 2047 hanum, ha->status); 2048 return 0; 2049 } 2050 break; 2051 } 2052 if (chn->siop_id < MAXID) 2053 ha->bus_id[bus_no] = chn->siop_id; 2054 else 2055 ha->bus_id[bus_no] = 0xff; 2056 } 2057 ha->bus_cnt = (unchar)bus_no; 2058 } 2059 TRACE2(("gdth_search_drives() %d channels\n",ha->bus_cnt)); 2060 2061 /* read cache configuration */ 2062 if (!gdth_internal_cmd(hanum,CACHESERVICE,GDT_IOCTL,CACHE_INFO, 2063 INVALID_CHANNEL,sizeof(gdth_cinfo_str))) { 2064 printk("GDT-HA %d: Initialization error cache service (code %d)\n", 2065 hanum, ha->status); 2066 return 0; 2067 } 2068 ha->cpar = ((gdth_cinfo_str *)ha->pscratch)->cpar; 2069 TRACE2(("gdth_search_drives() cinfo: vs %x sta %d str %d dw %d b %d\n", 2070 ha->cpar.version,ha->cpar.state,ha->cpar.strategy, 2071 ha->cpar.write_back,ha->cpar.block_size)); 2072 2073 /* read board info and features */ 2074 ha->more_proc = FALSE; 2075 if (gdth_internal_cmd(hanum,CACHESERVICE,GDT_IOCTL,BOARD_INFO, 2076 INVALID_CHANNEL,sizeof(gdth_binfo_str))) { 2077 memcpy(&ha->binfo, (gdth_binfo_str *)ha->pscratch, 2078 sizeof(gdth_binfo_str)); 2079 if (gdth_internal_cmd(hanum,CACHESERVICE,GDT_IOCTL,BOARD_FEATURES, 2080 INVALID_CHANNEL,sizeof(gdth_bfeat_str))) { 2081 TRACE2(("BOARD_INFO/BOARD_FEATURES supported\n")); 2082 ha->bfeat = *(gdth_bfeat_str *)ha->pscratch; 2083 ha->more_proc = TRUE; 2084 } 2085 } else { 2086 TRACE2(("BOARD_INFO requires firmware >= 1.10/2.08\n")); 2087 strcpy(ha->binfo.type_string, gdth_ctr_name(hanum)); 2088 } 2089 TRACE2(("Controller name: %s\n",ha->binfo.type_string)); 2090 2091 /* read more informations */ 2092 if (ha->more_proc) { 2093 /* physical drives, channel addresses */ 2094 ioc = (gdth_iochan_str *)ha->pscratch; 2095 ioc->hdr.version = 0xffffffff; 2096 ioc->hdr.list_entries = MAXBUS; 2097 ioc->hdr.first_chan = 0; 2098 ioc->hdr.last_chan = MAXBUS-1; 2099 ioc->hdr.list_offset = GDTOFFSOF(gdth_iochan_str, list[0]); 2100 if (gdth_internal_cmd(hanum,CACHESERVICE,GDT_IOCTL,IOCHAN_DESC, 2101 INVALID_CHANNEL,sizeof(gdth_iochan_str))) { 2102 for (bus_no = 0; bus_no < ha->bus_cnt; ++bus_no) { 2103 ha->raw[bus_no].address = ioc->list[bus_no].address; 2104 ha->raw[bus_no].local_no = ioc->list[bus_no].local_no; 2105 } 2106 } else { 2107 for (bus_no = 0; bus_no < ha->bus_cnt; ++bus_no) { 2108 ha->raw[bus_no].address = IO_CHANNEL; 2109 ha->raw[bus_no].local_no = bus_no; 2110 } 2111 } 2112 for (bus_no = 0; bus_no < ha->bus_cnt; ++bus_no) { 2113 chn = (gdth_getch_str *)ha->pscratch; 2114 chn->channel_no = ha->raw[bus_no].local_no; 2115 if (gdth_internal_cmd(hanum,CACHESERVICE,GDT_IOCTL, 2116 SCSI_CHAN_CNT | L_CTRL_PATTERN, 2117 ha->raw[bus_no].address | INVALID_CHANNEL, 2118 sizeof(gdth_getch_str))) { 2119 ha->raw[bus_no].pdev_cnt = chn->drive_cnt; 2120 TRACE2(("Channel %d: %d phys. drives\n", 2121 bus_no,chn->drive_cnt)); 2122 } 2123 if (ha->raw[bus_no].pdev_cnt > 0) { 2124 drl = (gdth_drlist_str *)ha->pscratch; 2125 drl->sc_no = ha->raw[bus_no].local_no; 2126 drl->sc_cnt = ha->raw[bus_no].pdev_cnt; 2127 if (gdth_internal_cmd(hanum,CACHESERVICE,GDT_IOCTL, 2128 SCSI_DR_LIST | L_CTRL_PATTERN, 2129 ha->raw[bus_no].address | INVALID_CHANNEL, 2130 sizeof(gdth_drlist_str))) { 2131 for (j = 0; j < ha->raw[bus_no].pdev_cnt; ++j) 2132 ha->raw[bus_no].id_list[j] = drl->sc_list[j]; 2133 } else { 2134 ha->raw[bus_no].pdev_cnt = 0; 2135 } 2136 } 2137 } 2138 2139 /* logical drives */ 2140 if (gdth_internal_cmd(hanum,CACHESERVICE,GDT_IOCTL,CACHE_DRV_CNT, 2141 INVALID_CHANNEL,sizeof(ulong32))) { 2142 drv_cnt = *(ulong32 *)ha->pscratch; 2143 if (gdth_internal_cmd(hanum,CACHESERVICE,GDT_IOCTL,CACHE_DRV_LIST, 2144 INVALID_CHANNEL,drv_cnt * sizeof(ulong32))) { 2145 for (j = 0; j < drv_cnt; ++j) { 2146 drv_no = ((ulong32 *)ha->pscratch)[j]; 2147 if (drv_no < MAX_LDRIVES) { 2148 ha->hdr[drv_no].is_logdrv = TRUE; 2149 TRACE2(("Drive %d is log. drive\n",drv_no)); 2150 } 2151 } 2152 } 2153 alst = (gdth_arcdl_str *)ha->pscratch; 2154 alst->entries_avail = MAX_LDRIVES; 2155 alst->first_entry = 0; 2156 alst->list_offset = GDTOFFSOF(gdth_arcdl_str, list[0]); 2157 if (gdth_internal_cmd(hanum,CACHESERVICE,GDT_IOCTL, 2158 ARRAY_DRV_LIST2 | LA_CTRL_PATTERN, 2159 INVALID_CHANNEL, sizeof(gdth_arcdl_str) + 2160 (alst->entries_avail-1) * sizeof(gdth_alist_str))) { 2161 for (j = 0; j < alst->entries_init; ++j) { 2162 ha->hdr[j].is_arraydrv = alst->list[j].is_arrayd; 2163 ha->hdr[j].is_master = alst->list[j].is_master; 2164 ha->hdr[j].is_parity = alst->list[j].is_parity; 2165 ha->hdr[j].is_hotfix = alst->list[j].is_hotfix; 2166 ha->hdr[j].master_no = alst->list[j].cd_handle; 2167 } 2168 } else if (gdth_internal_cmd(hanum,CACHESERVICE,GDT_IOCTL, 2169 ARRAY_DRV_LIST | LA_CTRL_PATTERN, 2170 0, 35 * sizeof(gdth_alist_str))) { 2171 for (j = 0; j < 35; ++j) { 2172 alst2 = &((gdth_alist_str *)ha->pscratch)[j]; 2173 ha->hdr[j].is_arraydrv = alst2->is_arrayd; 2174 ha->hdr[j].is_master = alst2->is_master; 2175 ha->hdr[j].is_parity = alst2->is_parity; 2176 ha->hdr[j].is_hotfix = alst2->is_hotfix; 2177 ha->hdr[j].master_no = alst2->cd_handle; 2178 } 2179 } 2180 } 2181 } 2182 2183 /* initialize raw service */ 2184 ha->raw_feat = 0; 2185 if (!force_dma32) { 2186 ok = gdth_internal_cmd(hanum,SCSIRAWSERVICE,GDT_X_INIT_RAW,0,0,0); 2187 if (ok) 2188 ha->raw_feat = GDT_64BIT; 2189 } 2190 if (force_dma32 || (!ok && ha->status == (ushort)S_NOFUNC)) 2191 ok = gdth_internal_cmd(hanum,SCSIRAWSERVICE,GDT_INIT,0,0,0); 2192 if (!ok) { 2193 printk("GDT-HA %d: Initialization error raw service (code %d)\n", 2194 hanum, ha->status); 2195 return 0; 2196 } 2197 TRACE2(("gdth_search_drives(): RAWSERVICE initialized\n")); 2198 2199 /* set/get features raw service (scatter/gather) */ 2200 if (gdth_internal_cmd(hanum,SCSIRAWSERVICE,GDT_SET_FEAT,SCATTER_GATHER, 2201 0,0)) { 2202 TRACE2(("gdth_search_drives(): set features RAWSERVICE OK\n")); 2203 if (gdth_internal_cmd(hanum,SCSIRAWSERVICE,GDT_GET_FEAT,0,0,0)) { 2204 TRACE2(("gdth_search_dr(): get feat RAWSERVICE %d\n", 2205 ha->info)); 2206 ha->raw_feat |= (ushort)ha->info; 2207 } 2208 } 2209 2210 /* set/get features cache service (equal to raw service) */ 2211 if (gdth_internal_cmd(hanum,CACHESERVICE,GDT_SET_FEAT,0, 2212 SCATTER_GATHER,0)) { 2213 TRACE2(("gdth_search_drives(): set features CACHESERVICE OK\n")); 2214 if (gdth_internal_cmd(hanum,CACHESERVICE,GDT_GET_FEAT,0,0,0)) { 2215 TRACE2(("gdth_search_dr(): get feat CACHESERV. %d\n", 2216 ha->info)); 2217 ha->cache_feat |= (ushort)ha->info; 2218 } 2219 } 2220 2221 /* reserve drives for raw service */ 2222 if (reserve_mode != 0) { 2223 gdth_internal_cmd(hanum,SCSIRAWSERVICE,GDT_RESERVE_ALL, 2224 reserve_mode == 1 ? 1 : 3, 0, 0); 2225 TRACE2(("gdth_search_drives(): RESERVE_ALL code %d\n", 2226 ha->status)); 2227 } 2228 for (i = 0; i < MAX_RES_ARGS; i += 4) { 2229 if (reserve_list[i] == hanum && reserve_list[i+1] < ha->bus_cnt && 2230 reserve_list[i+2] < ha->tid_cnt && reserve_list[i+3] < MAXLUN) { 2231 TRACE2(("gdth_search_drives(): reserve ha %d bus %d id %d lun %d\n", 2232 reserve_list[i], reserve_list[i+1], 2233 reserve_list[i+2], reserve_list[i+3])); 2234 if (!gdth_internal_cmd(hanum,SCSIRAWSERVICE,GDT_RESERVE,0, 2235 reserve_list[i+1], reserve_list[i+2] | 2236 (reserve_list[i+3] << 8))) { 2237 printk("GDT-HA %d: Error raw service (RESERVE, code %d)\n", 2238 hanum, ha->status); 2239 } 2240 } 2241 } 2242 2243 /* Determine OEM string using IOCTL */ 2244 oemstr = (gdth_oem_str_ioctl *)ha->pscratch; 2245 oemstr->params.ctl_version = 0x01; 2246 oemstr->params.buffer_size = sizeof(oemstr->text); 2247 if (gdth_internal_cmd(hanum,CACHESERVICE,GDT_IOCTL, 2248 CACHE_READ_OEM_STRING_RECORD,INVALID_CHANNEL, 2249 sizeof(gdth_oem_str_ioctl))) { 2250 TRACE2(("gdth_search_drives(): CACHE_READ_OEM_STRING_RECORD OK\n")); 2251 printk("GDT-HA %d: Vendor: %s Name: %s\n", 2252 hanum,oemstr->text.oem_company_name,ha->binfo.type_string); 2253 /* Save the Host Drive inquiry data */ 2254#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) 2255 strlcpy(ha->oem_name,oemstr->text.scsi_host_drive_inquiry_vendor_id, 2256 sizeof(ha->oem_name)); 2257#else 2258 strncpy(ha->oem_name,oemstr->text.scsi_host_drive_inquiry_vendor_id,7); 2259 ha->oem_name[7] = '\0'; 2260#endif 2261 } else { 2262 /* Old method, based on PCI ID */ 2263 TRACE2(("gdth_search_drives(): CACHE_READ_OEM_STRING_RECORD failed\n")); 2264 printk("GDT-HA %d: Name: %s\n", 2265 hanum,ha->binfo.type_string); 2266#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) 2267 if (ha->oem_id == OEM_ID_INTEL) 2268 strlcpy(ha->oem_name,"Intel ", sizeof(ha->oem_name)); 2269 else 2270 strlcpy(ha->oem_name,"ICP ", sizeof(ha->oem_name)); 2271#else 2272 if (ha->oem_id == OEM_ID_INTEL) 2273 strcpy(ha->oem_name,"Intel "); 2274 else 2275 strcpy(ha->oem_name,"ICP "); 2276#endif 2277 } 2278 2279 /* scanning for host drives */ 2280 for (i = 0; i < cdev_cnt; ++i) 2281 gdth_analyse_hdrive(hanum,i); 2282 2283 TRACE(("gdth_search_drives() OK\n")); 2284 return 1; 2285} 2286 2287static int gdth_analyse_hdrive(int hanum,ushort hdrive) 2288{ 2289 register gdth_ha_str *ha; 2290 ulong32 drv_cyls; 2291 int drv_hds, drv_secs; 2292 2293 TRACE(("gdth_analyse_hdrive() hanum %d drive %d\n",hanum,hdrive)); 2294 if (hdrive >= MAX_HDRIVES) 2295 return 0; 2296 ha = HADATA(gdth_ctr_tab[hanum]); 2297 2298 if (!gdth_internal_cmd(hanum,CACHESERVICE,GDT_INFO,hdrive,0,0)) 2299 return 0; 2300 ha->hdr[hdrive].present = TRUE; 2301 ha->hdr[hdrive].size = ha->info; 2302 2303 /* evaluate mapping (sectors per head, heads per cylinder) */ 2304 ha->hdr[hdrive].size &= ~SECS32; 2305 if (ha->info2 == 0) { 2306 gdth_eval_mapping(ha->hdr[hdrive].size,&drv_cyls,&drv_hds,&drv_secs); 2307 } else { 2308 drv_hds = ha->info2 & 0xff; 2309 drv_secs = (ha->info2 >> 8) & 0xff; 2310 drv_cyls = (ulong32)ha->hdr[hdrive].size / drv_hds / drv_secs; 2311 } 2312 ha->hdr[hdrive].heads = (unchar)drv_hds; 2313 ha->hdr[hdrive].secs = (unchar)drv_secs; 2314 /* round size */ 2315 ha->hdr[hdrive].size = drv_cyls * drv_hds * drv_secs; 2316 2317 if (ha->cache_feat & GDT_64BIT) { 2318 if (gdth_internal_cmd(hanum,CACHESERVICE,GDT_X_INFO,hdrive,0,0) 2319 && ha->info2 != 0) { 2320 ha->hdr[hdrive].size = ((ulong64)ha->info2 << 32) | ha->info; 2321 } 2322 } 2323 TRACE2(("gdth_search_dr() cdr. %d size %d hds %d scs %d\n", 2324 hdrive,ha->hdr[hdrive].size,drv_hds,drv_secs)); 2325 2326 /* get informations about device */ 2327 if (gdth_internal_cmd(hanum,CACHESERVICE,GDT_DEVTYPE,hdrive,0,0)) { 2328 TRACE2(("gdth_search_dr() cache drive %d devtype %d\n", 2329 hdrive,ha->info)); 2330 ha->hdr[hdrive].devtype = (ushort)ha->info; 2331 } 2332 2333 /* cluster info */ 2334 if (gdth_internal_cmd(hanum,CACHESERVICE,GDT_CLUST_INFO,hdrive,0,0)) { 2335 TRACE2(("gdth_search_dr() cache drive %d cluster info %d\n", 2336 hdrive,ha->info)); 2337 if (!shared_access) 2338 ha->hdr[hdrive].cluster_type = (unchar)ha->info; 2339 } 2340 2341 /* R/W attributes */ 2342 if (gdth_internal_cmd(hanum,CACHESERVICE,GDT_RW_ATTRIBS,hdrive,0,0)) { 2343 TRACE2(("gdth_search_dr() cache drive %d r/w attrib. %d\n", 2344 hdrive,ha->info)); 2345 ha->hdr[hdrive].rw_attribs = (unchar)ha->info; 2346 } 2347 2348 return 1; 2349} 2350 2351 2352/* command queueing/sending functions */ 2353 2354static void gdth_putq(int hanum,Scsi_Cmnd *scp,unchar priority) 2355{ 2356 register gdth_ha_str *ha; 2357 register Scsi_Cmnd *pscp; 2358 register Scsi_Cmnd *nscp; 2359 ulong flags; 2360 unchar b, t; 2361 2362 TRACE(("gdth_putq() priority %d\n",priority)); 2363 ha = HADATA(gdth_ctr_tab[hanum]); 2364 spin_lock_irqsave(&ha->smp_lock, flags); 2365 2366 if (scp->done != gdth_scsi_done) { 2367 scp->SCp.this_residual = (int)priority; 2368 b = virt_ctr ? NUMDATA(scp->device->host)->busnum:scp->device->channel; 2369 t = scp->device->id; 2370 if (priority >= DEFAULT_PRI) { 2371 if ((b != ha->virt_bus && ha->raw[BUS_L2P(ha,b)].lock) || 2372 (b==ha->virt_bus && t<MAX_HDRIVES && ha->hdr[t].lock)) { 2373 TRACE2(("gdth_putq(): locked IO ->update_timeout()\n")); 2374 scp->SCp.buffers_residual = gdth_update_timeout(hanum, scp, 0); 2375 } 2376 } 2377 } 2378 2379 if (ha->req_first==NULL) { 2380 ha->req_first = scp; /* queue was empty */ 2381 scp->SCp.ptr = NULL; 2382 } else { /* queue not empty */ 2383 pscp = ha->req_first; 2384 nscp = (Scsi_Cmnd *)pscp->SCp.ptr; 2385 /* priority: 0-highest,..,0xff-lowest */ 2386 while (nscp && (unchar)nscp->SCp.this_residual <= priority) { 2387 pscp = nscp; 2388 nscp = (Scsi_Cmnd *)pscp->SCp.ptr; 2389 } 2390 pscp->SCp.ptr = (char *)scp; 2391 scp->SCp.ptr = (char *)nscp; 2392 } 2393 spin_unlock_irqrestore(&ha->smp_lock, flags); 2394 2395#ifdef GDTH_STATISTICS 2396 flags = 0; 2397 for (nscp=ha->req_first; nscp; nscp=(Scsi_Cmnd*)nscp->SCp.ptr) 2398 ++flags; 2399 if (max_rq < flags) { 2400 max_rq = flags; 2401 TRACE3(("GDT: max_rq = %d\n",(ushort)max_rq)); 2402 } 2403#endif 2404} 2405 2406static void gdth_next(int hanum) 2407{ 2408 register gdth_ha_str *ha; 2409 register Scsi_Cmnd *pscp; 2410 register Scsi_Cmnd *nscp; 2411 unchar b, t, l, firsttime; 2412 unchar this_cmd, next_cmd; 2413 ulong flags = 0; 2414 int cmd_index; 2415 2416 TRACE(("gdth_next() hanum %d\n",hanum)); 2417 ha = HADATA(gdth_ctr_tab[hanum]); 2418 if (!gdth_polling) 2419 spin_lock_irqsave(&ha->smp_lock, flags); 2420 2421 ha->cmd_cnt = ha->cmd_offs_dpmem = 0; 2422 this_cmd = firsttime = TRUE; 2423 next_cmd = gdth_polling ? FALSE:TRUE; 2424 cmd_index = 0; 2425 2426 for (nscp = pscp = ha->req_first; nscp; nscp = (Scsi_Cmnd *)nscp->SCp.ptr) { 2427 if (nscp != pscp && nscp != (Scsi_Cmnd *)pscp->SCp.ptr) 2428 pscp = (Scsi_Cmnd *)pscp->SCp.ptr; 2429 if (nscp->done != gdth_scsi_done) { 2430 b = virt_ctr ? 2431 NUMDATA(nscp->device->host)->busnum : nscp->device->channel; 2432 t = nscp->device->id; 2433 l = nscp->device->lun; 2434 if (nscp->SCp.this_residual >= DEFAULT_PRI) { 2435 if ((b != ha->virt_bus && ha->raw[BUS_L2P(ha,b)].lock) || 2436 (b == ha->virt_bus && t < MAX_HDRIVES && ha->hdr[t].lock)) 2437 continue; 2438 } 2439 } else 2440 b = t = l = 0; 2441 2442 if (firsttime) { 2443 if (gdth_test_busy(hanum)) { /* controller busy ? */ 2444 TRACE(("gdth_next() controller %d busy !\n",hanum)); 2445 if (!gdth_polling) { 2446 spin_unlock_irqrestore(&ha->smp_lock, flags); 2447 return; 2448 } 2449 while (gdth_test_busy(hanum)) 2450 gdth_delay(1); 2451 } 2452 firsttime = FALSE; 2453 } 2454 2455 if (nscp->done != gdth_scsi_done) { 2456 if (nscp->SCp.phase == -1) { 2457 nscp->SCp.phase = CACHESERVICE; /* default: cache svc. */ 2458 if (nscp->cmnd[0] == TEST_UNIT_READY) { 2459 TRACE2(("TEST_UNIT_READY Bus %d Id %d LUN %d\n", 2460 b, t, l)); 2461 /* TEST_UNIT_READY -> set scan mode */ 2462 if ((ha->scan_mode & 0x0f) == 0) { 2463 if (b == 0 && t == 0 && l == 0) { 2464 ha->scan_mode |= 1; 2465 TRACE2(("Scan mode: 0x%x\n", ha->scan_mode)); 2466 } 2467 } else if ((ha->scan_mode & 0x0f) == 1) { 2468 if (b == 0 && ((t == 0 && l == 1) || 2469 (t == 1 && l == 0))) { 2470 nscp->SCp.sent_command = GDT_SCAN_START; 2471 nscp->SCp.phase = ((ha->scan_mode & 0x10 ? 1:0) << 8) 2472 | SCSIRAWSERVICE; 2473 ha->scan_mode = 0x12; 2474 TRACE2(("Scan mode: 0x%x (SCAN_START)\n", 2475 ha->scan_mode)); 2476 } else { 2477 ha->scan_mode &= 0x10; 2478 TRACE2(("Scan mode: 0x%x\n", ha->scan_mode)); 2479 } 2480 } else if (ha->scan_mode == 0x12) { 2481 if (b == ha->bus_cnt && t == ha->tid_cnt-1) { 2482 nscp->SCp.phase = SCSIRAWSERVICE; 2483 nscp->SCp.sent_command = GDT_SCAN_END; 2484 ha->scan_mode &= 0x10; 2485 TRACE2(("Scan mode: 0x%x (SCAN_END)\n", 2486 ha->scan_mode)); 2487 } 2488 } 2489 } 2490 if (b == ha->virt_bus && nscp->cmnd[0] != INQUIRY && 2491 nscp->cmnd[0] != READ_CAPACITY && nscp->cmnd[0] != MODE_SENSE && 2492 (ha->hdr[t].cluster_type & CLUSTER_DRIVE)) { 2493 /* always GDT_CLUST_INFO! */ 2494 nscp->SCp.sent_command = GDT_CLUST_INFO; 2495 } 2496 } 2497 } 2498 2499 if (nscp->SCp.sent_command != -1) { 2500 if ((nscp->SCp.phase & 0xff) == CACHESERVICE) { 2501 if (!(cmd_index=gdth_fill_cache_cmd(hanum,nscp,t))) 2502 this_cmd = FALSE; 2503 next_cmd = FALSE; 2504 } else if ((nscp->SCp.phase & 0xff) == SCSIRAWSERVICE) { 2505 if (!(cmd_index=gdth_fill_raw_cmd(hanum,nscp,BUS_L2P(ha,b)))) 2506 this_cmd = FALSE; 2507 next_cmd = FALSE; 2508 } else { 2509 memset((char*)nscp->sense_buffer,0,16); 2510 nscp->sense_buffer[0] = 0x70; 2511 nscp->sense_buffer[2] = NOT_READY; 2512 nscp->result = (DID_OK << 16) | (CHECK_CONDITION << 1); 2513 if (!nscp->SCp.have_data_in) 2514 nscp->SCp.have_data_in++; 2515 else 2516 nscp->scsi_done(nscp); 2517 } 2518 } else if (nscp->done == gdth_scsi_done) { 2519 if (!(cmd_index=gdth_special_cmd(hanum,nscp))) 2520 this_cmd = FALSE; 2521 next_cmd = FALSE; 2522 } else if (b != ha->virt_bus) { 2523 if (ha->raw[BUS_L2P(ha,b)].io_cnt[t] >= GDTH_MAX_RAW || 2524 !(cmd_index=gdth_fill_raw_cmd(hanum,nscp,BUS_L2P(ha,b)))) 2525 this_cmd = FALSE; 2526 else 2527 ha->raw[BUS_L2P(ha,b)].io_cnt[t]++; 2528 } else if (t >= MAX_HDRIVES || !ha->hdr[t].present || l != 0) { 2529 TRACE2(("Command 0x%x to bus %d id %d lun %d -> IGNORE\n", 2530 nscp->cmnd[0], b, t, l)); 2531 nscp->result = DID_BAD_TARGET << 16; 2532 if (!nscp->SCp.have_data_in) 2533 nscp->SCp.have_data_in++; 2534 else 2535 nscp->scsi_done(nscp); 2536 } else { 2537 switch (nscp->cmnd[0]) { 2538 case TEST_UNIT_READY: 2539 case INQUIRY: 2540 case REQUEST_SENSE: 2541 case READ_CAPACITY: 2542 case VERIFY: 2543 case START_STOP: 2544 case MODE_SENSE: 2545 case SERVICE_ACTION_IN: 2546 TRACE(("cache cmd %x/%x/%x/%x/%x/%x\n",nscp->cmnd[0], 2547 nscp->cmnd[1],nscp->cmnd[2],nscp->cmnd[3], 2548 nscp->cmnd[4],nscp->cmnd[5])); 2549 if (ha->hdr[t].media_changed && nscp->cmnd[0] != INQUIRY) { 2550 /* return UNIT_ATTENTION */ 2551 TRACE2(("cmd 0x%x target %d: UNIT_ATTENTION\n", 2552 nscp->cmnd[0], t)); 2553 ha->hdr[t].media_changed = FALSE; 2554 memset((char*)nscp->sense_buffer,0,16); 2555 nscp->sense_buffer[0] = 0x70; 2556 nscp->sense_buffer[2] = UNIT_ATTENTION; 2557 nscp->result = (DID_OK << 16) | (CHECK_CONDITION << 1); 2558 if (!nscp->SCp.have_data_in) 2559 nscp->SCp.have_data_in++; 2560 else 2561 nscp->scsi_done(nscp); 2562 } else if (gdth_internal_cache_cmd(hanum,nscp)) 2563 nscp->scsi_done(nscp); 2564 break; 2565 2566 case ALLOW_MEDIUM_REMOVAL: 2567 TRACE(("cache cmd %x/%x/%x/%x/%x/%x\n",nscp->cmnd[0], 2568 nscp->cmnd[1],nscp->cmnd[2],nscp->cmnd[3], 2569 nscp->cmnd[4],nscp->cmnd[5])); 2570 if ( (nscp->cmnd[4]&1) && !(ha->hdr[t].devtype&1) ) { 2571 TRACE(("Prevent r. nonremov. drive->do nothing\n")); 2572 nscp->result = DID_OK << 16; 2573 nscp->sense_buffer[0] = 0; 2574 if (!nscp->SCp.have_data_in) 2575 nscp->SCp.have_data_in++; 2576 else 2577 nscp->scsi_done(nscp); 2578 } else { 2579 nscp->cmnd[3] = (ha->hdr[t].devtype&1) ? 1:0; 2580 TRACE(("Prevent/allow r. %d rem. drive %d\n", 2581 nscp->cmnd[4],nscp->cmnd[3])); 2582 if (!(cmd_index=gdth_fill_cache_cmd(hanum,nscp,t))) 2583 this_cmd = FALSE; 2584 } 2585 break; 2586 2587 case RESERVE: 2588 case RELEASE: 2589 TRACE2(("cache cmd %s\n",nscp->cmnd[0] == RESERVE ? 2590 "RESERVE" : "RELEASE")); 2591 if (!(cmd_index=gdth_fill_cache_cmd(hanum,nscp,t))) 2592 this_cmd = FALSE; 2593 break; 2594 2595 case READ_6: 2596 case WRITE_6: 2597 case READ_10: 2598 case WRITE_10: 2599 case READ_16: 2600 case WRITE_16: 2601 if (ha->hdr[t].media_changed) { 2602 /* return UNIT_ATTENTION */ 2603 TRACE2(("cmd 0x%x target %d: UNIT_ATTENTION\n", 2604 nscp->cmnd[0], t)); 2605 ha->hdr[t].media_changed = FALSE; 2606 memset((char*)nscp->sense_buffer,0,16); 2607 nscp->sense_buffer[0] = 0x70; 2608 nscp->sense_buffer[2] = UNIT_ATTENTION; 2609 nscp->result = (DID_OK << 16) | (CHECK_CONDITION << 1); 2610 if (!nscp->SCp.have_data_in) 2611 nscp->SCp.have_data_in++; 2612 else 2613 nscp->scsi_done(nscp); 2614 } else if (!(cmd_index=gdth_fill_cache_cmd(hanum,nscp,t))) 2615 this_cmd = FALSE; 2616 break; 2617 2618 default: 2619 TRACE2(("cache cmd %x/%x/%x/%x/%x/%x unknown\n",nscp->cmnd[0], 2620 nscp->cmnd[1],nscp->cmnd[2],nscp->cmnd[3], 2621 nscp->cmnd[4],nscp->cmnd[5])); 2622 printk("GDT-HA %d: Unknown SCSI command 0x%x to cache service !\n", 2623 hanum, nscp->cmnd[0]); 2624 nscp->result = DID_ABORT << 16; 2625 if (!nscp->SCp.have_data_in) 2626 nscp->SCp.have_data_in++; 2627 else 2628 nscp->scsi_done(nscp); 2629 break; 2630 } 2631 } 2632 2633 if (!this_cmd) 2634 break; 2635 if (nscp == ha->req_first) 2636 ha->req_first = pscp = (Scsi_Cmnd *)nscp->SCp.ptr; 2637 else 2638 pscp->SCp.ptr = nscp->SCp.ptr; 2639 if (!next_cmd) 2640 break; 2641 } 2642 2643 if (ha->cmd_cnt > 0) { 2644 gdth_release_event(hanum); 2645 } 2646 2647 if (!gdth_polling) 2648 spin_unlock_irqrestore(&ha->smp_lock, flags); 2649 2650 if (gdth_polling && ha->cmd_cnt > 0) { 2651 if (!gdth_wait(hanum,cmd_index,POLL_TIMEOUT)) 2652 printk("GDT-HA %d: Command %d timed out !\n", 2653 hanum,cmd_index); 2654 } 2655} 2656 2657static void gdth_copy_internal_data(int hanum,Scsi_Cmnd *scp, 2658 char *buffer,ushort count) 2659{ 2660 ushort cpcount,i; 2661 ushort cpsum,cpnow; 2662 struct scatterlist *sl; 2663 gdth_ha_str *ha; 2664 char *address; 2665 2666 cpcount = count<=(ushort)scp->request_bufflen ? count:(ushort)scp->request_bufflen; 2667 ha = HADATA(gdth_ctr_tab[hanum]); 2668 2669 if (scp->use_sg) { 2670 sl = (struct scatterlist *)scp->request_buffer; 2671 for (i=0,cpsum=0; i<scp->use_sg; ++i,++sl) { 2672 unsigned long flags; 2673 cpnow = (ushort)sl->length; 2674 TRACE(("copy_internal() now %d sum %d count %d %d\n", 2675 cpnow,cpsum,cpcount,(ushort)scp->bufflen)); 2676 if (cpsum+cpnow > cpcount) 2677 cpnow = cpcount - cpsum; 2678 cpsum += cpnow; 2679 if (!sl->page) { 2680 printk("GDT-HA %d: invalid sc/gt element in gdth_copy_internal_data()\n", 2681 hanum); 2682 return; 2683 } 2684 local_irq_save(flags); 2685#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) 2686 address = kmap_atomic(sl->page, KM_BIO_SRC_IRQ) + sl->offset; 2687 memcpy(address,buffer,cpnow); 2688 flush_dcache_page(sl->page); 2689 kunmap_atomic(address, KM_BIO_SRC_IRQ); 2690#else 2691 address = kmap_atomic(sl->page, KM_BH_IRQ) + sl->offset; 2692 memcpy(address,buffer,cpnow); 2693 flush_dcache_page(sl->page); 2694 kunmap_atomic(address, KM_BH_IRQ); 2695#endif 2696 local_irq_restore(flags); 2697 if (cpsum == cpcount) 2698 break; 2699 buffer += cpnow; 2700 } 2701 } else { 2702 TRACE(("copy_internal() count %d\n",cpcount)); 2703 memcpy((char*)scp->request_buffer,buffer,cpcount); 2704 } 2705} 2706 2707static int gdth_internal_cache_cmd(int hanum,Scsi_Cmnd *scp) 2708{ 2709 register gdth_ha_str *ha; 2710 unchar t; 2711 gdth_inq_data inq; 2712 gdth_rdcap_data rdc; 2713 gdth_sense_data sd; 2714 gdth_modep_data mpd; 2715 2716 ha = HADATA(gdth_ctr_tab[hanum]); 2717 t = scp->device->id; 2718 TRACE(("gdth_internal_cache_cmd() cmd 0x%x hdrive %d\n", 2719 scp->cmnd[0],t)); 2720 2721 scp->result = DID_OK << 16; 2722 scp->sense_buffer[0] = 0; 2723 2724 switch (scp->cmnd[0]) { 2725 case TEST_UNIT_READY: 2726 case VERIFY: 2727 case START_STOP: 2728 TRACE2(("Test/Verify/Start hdrive %d\n",t)); 2729 break; 2730 2731 case INQUIRY: 2732 TRACE2(("Inquiry hdrive %d devtype %d\n", 2733 t,ha->hdr[t].devtype)); 2734 inq.type_qual = (ha->hdr[t].devtype&4) ? TYPE_ROM:TYPE_DISK; 2735 /* you can here set all disks to removable, if you want to do 2736 a flush using the ALLOW_MEDIUM_REMOVAL command */ 2737 inq.modif_rmb = 0x00; 2738 if ((ha->hdr[t].devtype & 1) || 2739 (ha->hdr[t].cluster_type & CLUSTER_DRIVE)) 2740 inq.modif_rmb = 0x80; 2741 inq.version = 2; 2742 inq.resp_aenc = 2; 2743 inq.add_length= 32; 2744 strcpy(inq.vendor,ha->oem_name); 2745 sprintf(inq.product,"Host Drive #%02d",t); 2746 strcpy(inq.revision," "); 2747 gdth_copy_internal_data(hanum,scp,(char*)&inq,sizeof(gdth_inq_data)); 2748 break; 2749 2750 case REQUEST_SENSE: 2751 TRACE2(("Request sense hdrive %d\n",t)); 2752 sd.errorcode = 0x70; 2753 sd.segno = 0x00; 2754 sd.key = NO_SENSE; 2755 sd.info = 0; 2756 sd.add_length= 0; 2757 gdth_copy_internal_data(hanum,scp,(char*)&sd,sizeof(gdth_sense_data)); 2758 break; 2759 2760 case MODE_SENSE: 2761 TRACE2(("Mode sense hdrive %d\n",t)); 2762 memset((char*)&mpd,0,sizeof(gdth_modep_data)); 2763 mpd.hd.data_length = sizeof(gdth_modep_data); 2764 mpd.hd.dev_par = (ha->hdr[t].devtype&2) ? 0x80:0; 2765 mpd.hd.bd_length = sizeof(mpd.bd); 2766 mpd.bd.block_length[0] = (SECTOR_SIZE & 0x00ff0000) >> 16; 2767 mpd.bd.block_length[1] = (SECTOR_SIZE & 0x0000ff00) >> 8; 2768 mpd.bd.block_length[2] = (SECTOR_SIZE & 0x000000ff); 2769 gdth_copy_internal_data(hanum,scp,(char*)&mpd,sizeof(gdth_modep_data)); 2770 break; 2771 2772 case READ_CAPACITY: 2773 TRACE2(("Read capacity hdrive %d\n",t)); 2774 if (ha->hdr[t].size > (ulong64)0xffffffff) 2775 rdc.last_block_no = 0xffffffff; 2776 else 2777 rdc.last_block_no = cpu_to_be32(ha->hdr[t].size-1); 2778 rdc.block_length = cpu_to_be32(SECTOR_SIZE); 2779 gdth_copy_internal_data(hanum,scp,(char*)&rdc,sizeof(gdth_rdcap_data)); 2780 break; 2781 2782 case SERVICE_ACTION_IN: 2783 if ((scp->cmnd[1] & 0x1f) == SAI_READ_CAPACITY_16 && 2784 (ha->cache_feat & GDT_64BIT)) { 2785 gdth_rdcap16_data rdc16; 2786 2787 TRACE2(("Read capacity (16) hdrive %d\n",t)); 2788 rdc16.last_block_no = cpu_to_be64(ha->hdr[t].size-1); 2789 rdc16.block_length = cpu_to_be32(SECTOR_SIZE); 2790 gdth_copy_internal_data(hanum,scp,(char*)&rdc16,sizeof(gdth_rdcap16_data)); 2791 } else { 2792 scp->result = DID_ABORT << 16; 2793 } 2794 break; 2795 2796 default: 2797 TRACE2(("Internal cache cmd 0x%x unknown\n",scp->cmnd[0])); 2798 break; 2799 } 2800 2801 if (!scp->SCp.have_data_in) 2802 scp->SCp.have_data_in++; 2803 else 2804 return 1; 2805 2806 return 0; 2807} 2808 2809static int gdth_fill_cache_cmd(int hanum,Scsi_Cmnd *scp,ushort hdrive) 2810{ 2811 register gdth_ha_str *ha; 2812 register gdth_cmd_str *cmdp; 2813 struct scatterlist *sl; 2814 ulong32 cnt, blockcnt; 2815 ulong64 no, blockno; 2816 dma_addr_t phys_addr; 2817 int i, cmd_index, read_write, sgcnt, mode64; 2818 struct page *page; 2819 ulong offset; 2820 2821 ha = HADATA(gdth_ctr_tab[hanum]); 2822 cmdp = ha->pccb; 2823 TRACE(("gdth_fill_cache_cmd() cmd 0x%x cmdsize %d hdrive %d\n", 2824 scp->cmnd[0],scp->cmd_len,hdrive)); 2825 2826 if (ha->type==GDT_EISA && ha->cmd_cnt>0) 2827 return 0; 2828 2829 mode64 = (ha->cache_feat & GDT_64BIT) ? TRUE : FALSE; 2830 /* test for READ_16, WRITE_16 if !mode64 ? --- 2831 not required, should not occur due to error return on 2832 READ_CAPACITY_16 */ 2833 2834 cmdp->Service = CACHESERVICE; 2835 cmdp->RequestBuffer = scp; 2836 /* search free command index */ 2837 if (!(cmd_index=gdth_get_cmd_index(hanum))) { 2838 TRACE(("GDT: No free command index found\n")); 2839 return 0; 2840 } 2841 /* if it's the first command, set command semaphore */ 2842 if (ha->cmd_cnt == 0) 2843 gdth_set_sema0(hanum); 2844 2845 /* fill command */ 2846 read_write = 0; 2847 if (scp->SCp.sent_command != -1) 2848 cmdp->OpCode = scp->SCp.sent_command; /* special cache cmd. */ 2849 else if (scp->cmnd[0] == RESERVE) 2850 cmdp->OpCode = GDT_RESERVE_DRV; 2851 else if (scp->cmnd[0] == RELEASE) 2852 cmdp->OpCode = GDT_RELEASE_DRV; 2853 else if (scp->cmnd[0] == ALLOW_MEDIUM_REMOVAL) { 2854 if (scp->cmnd[4] & 1) /* prevent ? */ 2855 cmdp->OpCode = GDT_MOUNT; 2856 else if (scp->cmnd[3] & 1) /* removable drive ? */ 2857 cmdp->OpCode = GDT_UNMOUNT; 2858 else 2859 cmdp->OpCode = GDT_FLUSH; 2860 } else if (scp->cmnd[0] == WRITE_6 || scp->cmnd[0] == WRITE_10 || 2861 scp->cmnd[0] == WRITE_12 || scp->cmnd[0] == WRITE_16 2862 ) { 2863 read_write = 1; 2864 if (gdth_write_through || ((ha->hdr[hdrive].rw_attribs & 1) && 2865 (ha->cache_feat & GDT_WR_THROUGH))) 2866 cmdp->OpCode = GDT_WRITE_THR; 2867 else 2868 cmdp->OpCode = GDT_WRITE; 2869 } else { 2870 read_write = 2; 2871 cmdp->OpCode = GDT_READ; 2872 } 2873 2874 cmdp->BoardNode = LOCALBOARD; 2875 if (mode64) { 2876 cmdp->u.cache64.DeviceNo = hdrive; 2877 cmdp->u.cache64.BlockNo = 1; 2878 cmdp->u.cache64.sg_canz = 0; 2879 } else { 2880 cmdp->u.cache.DeviceNo = hdrive; 2881 cmdp->u.cache.BlockNo = 1; 2882 cmdp->u.cache.sg_canz = 0; 2883 } 2884 2885 if (read_write) { 2886 if (scp->cmd_len == 16) { 2887 memcpy(&no, &scp->cmnd[2], sizeof(ulong64)); 2888 blockno = be64_to_cpu(no); 2889 memcpy(&cnt, &scp->cmnd[10], sizeof(ulong32)); 2890 blockcnt = be32_to_cpu(cnt); 2891 } else if (scp->cmd_len == 10) { 2892 memcpy(&no, &scp->cmnd[2], sizeof(ulong32)); 2893 blockno = be32_to_cpu(no); 2894 memcpy(&cnt, &scp->cmnd[7], sizeof(ushort)); 2895 blockcnt = be16_to_cpu(cnt); 2896 } else { 2897 memcpy(&no, &scp->cmnd[0], sizeof(ulong32)); 2898 blockno = be32_to_cpu(no) & 0x001fffffUL; 2899 blockcnt= scp->cmnd[4]==0 ? 0x100 : scp->cmnd[4]; 2900 } 2901 if (mode64) { 2902 cmdp->u.cache64.BlockNo = blockno; 2903 cmdp->u.cache64.BlockCnt = blockcnt; 2904 } else { 2905 cmdp->u.cache.BlockNo = (ulong32)blockno; 2906 cmdp->u.cache.BlockCnt = blockcnt; 2907 } 2908 2909 if (scp->use_sg) { 2910 sl = (struct scatterlist *)scp->request_buffer; 2911 sgcnt = scp->use_sg; 2912 scp->SCp.Status = GDTH_MAP_SG; 2913 scp->SCp.Message = (read_write == 1 ? 2914 PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE); 2915 sgcnt = pci_map_sg(ha->pdev,sl,scp->use_sg,scp->SCp.Message); 2916 if (mode64) { 2917 cmdp->u.cache64.DestAddr= (ulong64)-1; 2918 cmdp->u.cache64.sg_canz = sgcnt; 2919 for (i=0; i<sgcnt; ++i,++sl) { 2920 cmdp->u.cache64.sg_lst[i].sg_ptr = sg_dma_address(sl); 2921#ifdef GDTH_DMA_STATISTICS 2922 if (cmdp->u.cache64.sg_lst[i].sg_ptr > (ulong64)0xffffffff) 2923 ha->dma64_cnt++; 2924 else 2925 ha->dma32_cnt++; 2926#endif 2927 cmdp->u.cache64.sg_lst[i].sg_len = sg_dma_len(sl); 2928 } 2929 } else { 2930 cmdp->u.cache.DestAddr= 0xffffffff; 2931 cmdp->u.cache.sg_canz = sgcnt; 2932 for (i=0; i<sgcnt; ++i,++sl) { 2933 cmdp->u.cache.sg_lst[i].sg_ptr = sg_dma_address(sl); 2934#ifdef GDTH_DMA_STATISTICS 2935 ha->dma32_cnt++; 2936#endif 2937 cmdp->u.cache.sg_lst[i].sg_len = sg_dma_len(sl); 2938 } 2939 } 2940 2941#ifdef GDTH_STATISTICS 2942 if (max_sg < (ulong32)sgcnt) { 2943 max_sg = (ulong32)sgcnt; 2944 TRACE3(("GDT: max_sg = %d\n",max_sg)); 2945 } 2946#endif 2947 2948 } else if (scp->request_bufflen) { 2949 scp->SCp.Status = GDTH_MAP_SINGLE; 2950 scp->SCp.Message = (read_write == 1 ? 2951 PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE); 2952 page = virt_to_page(scp->request_buffer); 2953 offset = (ulong)scp->request_buffer & ~PAGE_MASK; 2954 phys_addr = pci_map_page(ha->pdev,page,offset, 2955 scp->request_bufflen,scp->SCp.Message); 2956 scp->SCp.dma_handle = phys_addr; 2957 if (mode64) { 2958 if (ha->cache_feat & SCATTER_GATHER) { 2959 cmdp->u.cache64.DestAddr = (ulong64)-1; 2960 cmdp->u.cache64.sg_canz = 1; 2961 cmdp->u.cache64.sg_lst[0].sg_ptr = phys_addr; 2962 cmdp->u.cache64.sg_lst[0].sg_len = scp->request_bufflen; 2963 cmdp->u.cache64.sg_lst[1].sg_len = 0; 2964 } else { 2965 cmdp->u.cache64.DestAddr = phys_addr; 2966 cmdp->u.cache64.sg_canz= 0; 2967 } 2968 } else { 2969 if (ha->cache_feat & SCATTER_GATHER) { 2970 cmdp->u.cache.DestAddr = 0xffffffff; 2971 cmdp->u.cache.sg_canz = 1; 2972 cmdp->u.cache.sg_lst[0].sg_ptr = phys_addr; 2973 cmdp->u.cache.sg_lst[0].sg_len = scp->request_bufflen; 2974 cmdp->u.cache.sg_lst[1].sg_len = 0; 2975 } else { 2976 cmdp->u.cache.DestAddr = phys_addr; 2977 cmdp->u.cache.sg_canz= 0; 2978 } 2979 } 2980 } 2981 } 2982 /* evaluate command size, check space */ 2983 if (mode64) { 2984 TRACE(("cache cmd: addr. %x sganz %x sgptr0 %x sglen0 %x\n", 2985 cmdp->u.cache64.DestAddr,cmdp->u.cache64.sg_canz, 2986 cmdp->u.cache64.sg_lst[0].sg_ptr, 2987 cmdp->u.cache64.sg_lst[0].sg_len)); 2988 TRACE(("cache cmd: cmd %d blockno. %d, blockcnt %d\n", 2989 cmdp->OpCode,cmdp->u.cache64.BlockNo,cmdp->u.cache64.BlockCnt)); 2990 ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.cache64.sg_lst) + 2991 (ushort)cmdp->u.cache64.sg_canz * sizeof(gdth_sg64_str); 2992 } else { 2993 TRACE(("cache cmd: addr. %x sganz %x sgptr0 %x sglen0 %x\n", 2994 cmdp->u.cache.DestAddr,cmdp->u.cache.sg_canz, 2995 cmdp->u.cache.sg_lst[0].sg_ptr, 2996 cmdp->u.cache.sg_lst[0].sg_len)); 2997 TRACE(("cache cmd: cmd %d blockno. %d, blockcnt %d\n", 2998 cmdp->OpCode,cmdp->u.cache.BlockNo,cmdp->u.cache.BlockCnt)); 2999 ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.cache.sg_lst) + 3000 (ushort)cmdp->u.cache.sg_canz * sizeof(gdth_sg_str); 3001 } 3002 if (ha->cmd_len & 3) 3003 ha->cmd_len += (4 - (ha->cmd_len & 3)); 3004 3005 if (ha->cmd_cnt > 0) { 3006 if ((ha->cmd_offs_dpmem + ha->cmd_len + DPMEM_COMMAND_OFFSET) > 3007 ha->ic_all_size) { 3008 TRACE2(("gdth_fill_cache() DPMEM overflow\n")); 3009 ha->cmd_tab[cmd_index-2].cmnd = UNUSED_CMND; 3010 return 0; 3011 } 3012 } 3013 3014 /* copy command */ 3015 gdth_copy_command(hanum); 3016 return cmd_index; 3017} 3018 3019static int gdth_fill_raw_cmd(int hanum,Scsi_Cmnd *scp,unchar b) 3020{ 3021 register gdth_ha_str *ha; 3022 register gdth_cmd_str *cmdp; 3023 struct scatterlist *sl; 3024 ushort i; 3025 dma_addr_t phys_addr, sense_paddr; 3026 int cmd_index, sgcnt, mode64; 3027 unchar t,l; 3028 struct page *page; 3029 ulong offset; 3030 3031 ha = HADATA(gdth_ctr_tab[hanum]); 3032 t = scp->device->id; 3033 l = scp->device->lun; 3034 cmdp = ha->pccb; 3035 TRACE(("gdth_fill_raw_cmd() cmd 0x%x bus %d ID %d LUN %d\n", 3036 scp->cmnd[0],b,t,l)); 3037 3038 if (ha->type==GDT_EISA && ha->cmd_cnt>0) 3039 return 0; 3040 3041 mode64 = (ha->raw_feat & GDT_64BIT) ? TRUE : FALSE; 3042 3043 cmdp->Service = SCSIRAWSERVICE; 3044 cmdp->RequestBuffer = scp; 3045 /* search free command index */ 3046 if (!(cmd_index=gdth_get_cmd_index(hanum))) { 3047 TRACE(("GDT: No free command index found\n")); 3048 return 0; 3049 } 3050 /* if it's the first command, set command semaphore */ 3051 if (ha->cmd_cnt == 0) 3052 gdth_set_sema0(hanum); 3053 3054 /* fill command */ 3055 if (scp->SCp.sent_command != -1) { 3056 cmdp->OpCode = scp->SCp.sent_command; /* special raw cmd. */ 3057 cmdp->BoardNode = LOCALBOARD; 3058 if (mode64) { 3059 cmdp->u.raw64.direction = (scp->SCp.phase >> 8); 3060 TRACE2(("special raw cmd 0x%x param 0x%x\n", 3061 cmdp->OpCode, cmdp->u.raw64.direction)); 3062 /* evaluate command size */ 3063 ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.raw64.sg_lst); 3064 } else { 3065 cmdp->u.raw.direction = (scp->SCp.phase >> 8); 3066 TRACE2(("special raw cmd 0x%x param 0x%x\n", 3067 cmdp->OpCode, cmdp->u.raw.direction)); 3068 /* evaluate command size */ 3069 ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.raw.sg_lst); 3070 } 3071 3072 } else { 3073 page = virt_to_page(scp->sense_buffer); 3074 offset = (ulong)scp->sense_buffer & ~PAGE_MASK; 3075 sense_paddr = pci_map_page(ha->pdev,page,offset, 3076 16,PCI_DMA_FROMDEVICE); 3077 *(ulong32 *)&scp->SCp.buffer = (ulong32)sense_paddr; 3078 /* high part, if 64bit */ 3079 *(ulong32 *)&scp->host_scribble = (ulong32)((ulong64)sense_paddr >> 32); 3080 cmdp->OpCode = GDT_WRITE; /* always */ 3081 cmdp->BoardNode = LOCALBOARD; 3082 if (mode64) { 3083 cmdp->u.raw64.reserved = 0; 3084 cmdp->u.raw64.mdisc_time = 0; 3085 cmdp->u.raw64.mcon_time = 0; 3086 cmdp->u.raw64.clen = scp->cmd_len; 3087 cmdp->u.raw64.target = t; 3088 cmdp->u.raw64.lun = l; 3089 cmdp->u.raw64.bus = b; 3090 cmdp->u.raw64.priority = 0; 3091 cmdp->u.raw64.sdlen = scp->request_bufflen; 3092 cmdp->u.raw64.sense_len = 16; 3093 cmdp->u.raw64.sense_data = sense_paddr; 3094 cmdp->u.raw64.direction = 3095 gdth_direction_tab[scp->cmnd[0]]==DOU ? GDTH_DATA_OUT:GDTH_DATA_IN; 3096 memcpy(cmdp->u.raw64.cmd,scp->cmnd,16); 3097 cmdp->u.raw64.sg_ranz = 0; 3098 } else { 3099 cmdp->u.raw.reserved = 0; 3100 cmdp->u.raw.mdisc_time = 0; 3101 cmdp->u.raw.mcon_time = 0; 3102 cmdp->u.raw.clen = scp->cmd_len; 3103 cmdp->u.raw.target = t; 3104 cmdp->u.raw.lun = l; 3105 cmdp->u.raw.bus = b; 3106 cmdp->u.raw.priority = 0; 3107 cmdp->u.raw.link_p = 0; 3108 cmdp->u.raw.sdlen = scp->request_bufflen; 3109 cmdp->u.raw.sense_len = 16; 3110 cmdp->u.raw.sense_data = sense_paddr; 3111 cmdp->u.raw.direction = 3112 gdth_direction_tab[scp->cmnd[0]]==DOU ? GDTH_DATA_OUT:GDTH_DATA_IN; 3113 memcpy(cmdp->u.raw.cmd,scp->cmnd,12); 3114 cmdp->u.raw.sg_ranz = 0; 3115 } 3116 3117 if (scp->use_sg) { 3118 sl = (struct scatterlist *)scp->request_buffer; 3119 sgcnt = scp->use_sg; 3120 scp->SCp.Status = GDTH_MAP_SG; 3121 scp->SCp.Message = PCI_DMA_BIDIRECTIONAL; 3122 sgcnt = pci_map_sg(ha->pdev,sl,scp->use_sg,scp->SCp.Message); 3123 if (mode64) { 3124 cmdp->u.raw64.sdata = (ulong64)-1; 3125 cmdp->u.raw64.sg_ranz = sgcnt; 3126 for (i=0; i<sgcnt; ++i,++sl) { 3127 cmdp->u.raw64.sg_lst[i].sg_ptr = sg_dma_address(sl); 3128#ifdef GDTH_DMA_STATISTICS 3129 if (cmdp->u.raw64.sg_lst[i].sg_ptr > (ulong64)0xffffffff) 3130 ha->dma64_cnt++; 3131 else 3132 ha->dma32_cnt++; 3133#endif 3134 cmdp->u.raw64.sg_lst[i].sg_len = sg_dma_len(sl); 3135 } 3136 } else { 3137 cmdp->u.raw.sdata = 0xffffffff; 3138 cmdp->u.raw.sg_ranz = sgcnt; 3139 for (i=0; i<sgcnt; ++i,++sl) { 3140 cmdp->u.raw.sg_lst[i].sg_ptr = sg_dma_address(sl); 3141#ifdef GDTH_DMA_STATISTICS 3142 ha->dma32_cnt++; 3143#endif 3144 cmdp->u.raw.sg_lst[i].sg_len = sg_dma_len(sl); 3145 } 3146 } 3147 3148#ifdef GDTH_STATISTICS 3149 if (max_sg < sgcnt) { 3150 max_sg = sgcnt; 3151 TRACE3(("GDT: max_sg = %d\n",sgcnt)); 3152 } 3153#endif 3154 3155 } else if (scp->request_bufflen) { 3156 scp->SCp.Status = GDTH_MAP_SINGLE; 3157 scp->SCp.Message = PCI_DMA_BIDIRECTIONAL; 3158 page = virt_to_page(scp->request_buffer); 3159 offset = (ulong)scp->request_buffer & ~PAGE_MASK; 3160 phys_addr = pci_map_page(ha->pdev,page,offset, 3161 scp->request_bufflen,scp->SCp.Message); 3162 scp->SCp.dma_handle = phys_addr; 3163 3164 if (mode64) { 3165 if (ha->raw_feat & SCATTER_GATHER) { 3166 cmdp->u.raw64.sdata = (ulong64)-1; 3167 cmdp->u.raw64.sg_ranz= 1; 3168 cmdp->u.raw64.sg_lst[0].sg_ptr = phys_addr; 3169 cmdp->u.raw64.sg_lst[0].sg_len = scp->request_bufflen; 3170 cmdp->u.raw64.sg_lst[1].sg_len = 0; 3171 } else { 3172 cmdp->u.raw64.sdata = phys_addr; 3173 cmdp->u.raw64.sg_ranz= 0; 3174 } 3175 } else { 3176 if (ha->raw_feat & SCATTER_GATHER) { 3177 cmdp->u.raw.sdata = 0xffffffff; 3178 cmdp->u.raw.sg_ranz= 1; 3179 cmdp->u.raw.sg_lst[0].sg_ptr = phys_addr; 3180 cmdp->u.raw.sg_lst[0].sg_len = scp->request_bufflen; 3181 cmdp->u.raw.sg_lst[1].sg_len = 0; 3182 } else { 3183 cmdp->u.raw.sdata = phys_addr; 3184 cmdp->u.raw.sg_ranz= 0; 3185 } 3186 } 3187 } 3188 if (mode64) { 3189 TRACE(("raw cmd: addr. %x sganz %x sgptr0 %x sglen0 %x\n", 3190 cmdp->u.raw64.sdata,cmdp->u.raw64.sg_ranz, 3191 cmdp->u.raw64.sg_lst[0].sg_ptr, 3192 cmdp->u.raw64.sg_lst[0].sg_len)); 3193 /* evaluate command size */ 3194 ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.raw64.sg_lst) + 3195 (ushort)cmdp->u.raw64.sg_ranz * sizeof(gdth_sg64_str); 3196 } else { 3197 TRACE(("raw cmd: addr. %x sganz %x sgptr0 %x sglen0 %x\n", 3198 cmdp->u.raw.sdata,cmdp->u.raw.sg_ranz, 3199 cmdp->u.raw.sg_lst[0].sg_ptr, 3200 cmdp->u.raw.sg_lst[0].sg_len)); 3201 /* evaluate command size */ 3202 ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.raw.sg_lst) + 3203 (ushort)cmdp->u.raw.sg_ranz * sizeof(gdth_sg_str); 3204 } 3205 } 3206 /* check space */ 3207 if (ha->cmd_len & 3) 3208 ha->cmd_len += (4 - (ha->cmd_len & 3)); 3209 3210 if (ha->cmd_cnt > 0) { 3211 if ((ha->cmd_offs_dpmem + ha->cmd_len + DPMEM_COMMAND_OFFSET) > 3212 ha->ic_all_size) { 3213 TRACE2(("gdth_fill_raw() DPMEM overflow\n")); 3214 ha->cmd_tab[cmd_index-2].cmnd = UNUSED_CMND; 3215 return 0; 3216 } 3217 } 3218 3219 /* copy command */ 3220 gdth_copy_command(hanum); 3221 return cmd_index; 3222} 3223 3224static int gdth_special_cmd(int hanum,Scsi_Cmnd *scp) 3225{ 3226 register gdth_ha_str *ha; 3227 register gdth_cmd_str *cmdp; 3228 int cmd_index; 3229 3230 ha = HADATA(gdth_ctr_tab[hanum]); 3231 cmdp= ha->pccb; 3232 TRACE2(("gdth_special_cmd(): ")); 3233 3234 if (ha->type==GDT_EISA && ha->cmd_cnt>0) 3235 return 0; 3236 3237 memcpy( cmdp, scp->request_buffer, sizeof(gdth_cmd_str)); 3238 cmdp->RequestBuffer = scp; 3239 3240 /* search free command index */ 3241 if (!(cmd_index=gdth_get_cmd_index(hanum))) { 3242 TRACE(("GDT: No free command index found\n")); 3243 return 0; 3244 } 3245 3246 /* if it's the first command, set command semaphore */ 3247 if (ha->cmd_cnt == 0) 3248 gdth_set_sema0(hanum); 3249 3250 /* evaluate command size, check space */ 3251 if (cmdp->OpCode == GDT_IOCTL) { 3252 TRACE2(("IOCTL\n")); 3253 ha->cmd_len = 3254 GDTOFFSOF(gdth_cmd_str,u.ioctl.p_param) + sizeof(ulong64); 3255 } else if (cmdp->Service == CACHESERVICE) { 3256 TRACE2(("cache command %d\n",cmdp->OpCode)); 3257 if (ha->cache_feat & GDT_64BIT) 3258 ha->cmd_len = 3259 GDTOFFSOF(gdth_cmd_str,u.cache64.sg_lst) + sizeof(gdth_sg64_str); 3260 else 3261 ha->cmd_len = 3262 GDTOFFSOF(gdth_cmd_str,u.cache.sg_lst) + sizeof(gdth_sg_str); 3263 } else if (cmdp->Service == SCSIRAWSERVICE) { 3264 TRACE2(("raw command %d\n",cmdp->OpCode)); 3265 if (ha->raw_feat & GDT_64BIT) 3266 ha->cmd_len = 3267 GDTOFFSOF(gdth_cmd_str,u.raw64.sg_lst) + sizeof(gdth_sg64_str); 3268 else 3269 ha->cmd_len = 3270 GDTOFFSOF(gdth_cmd_str,u.raw.sg_lst) + sizeof(gdth_sg_str); 3271 } 3272 3273 if (ha->cmd_len & 3) 3274 ha->cmd_len += (4 - (ha->cmd_len & 3)); 3275 3276 if (ha->cmd_cnt > 0) { 3277 if ((ha->cmd_offs_dpmem + ha->cmd_len + DPMEM_COMMAND_OFFSET) > 3278 ha->ic_all_size) { 3279 TRACE2(("gdth_special_cmd() DPMEM overflow\n")); 3280 ha->cmd_tab[cmd_index-2].cmnd = UNUSED_CMND; 3281 return 0; 3282 } 3283 } 3284 3285 /* copy command */ 3286 gdth_copy_command(hanum); 3287 return cmd_index; 3288} 3289 3290 3291/* Controller event handling functions */ 3292static gdth_evt_str *gdth_store_event(gdth_ha_str *ha, ushort source, 3293 ushort idx, gdth_evt_data *evt) 3294{ 3295 gdth_evt_str *e; 3296 struct timeval tv; 3297 3298 /* no GDTH_LOCK_HA() ! */ 3299 TRACE2(("gdth_store_event() source %d idx %d\n", source, idx)); 3300 if (source == 0) /* no source -> no event */ 3301 return NULL; 3302 3303 if (ebuffer[elastidx].event_source == source && 3304 ebuffer[elastidx].event_idx == idx && 3305 ((evt->size != 0 && ebuffer[elastidx].event_data.size != 0 && 3306 !memcmp((char *)&ebuffer[elastidx].event_data.eu, 3307 (char *)&evt->eu, evt->size)) || 3308 (evt->size == 0 && ebuffer[elastidx].event_data.size == 0 && 3309 !strcmp((char *)&ebuffer[elastidx].event_data.event_string, 3310 (char *)&evt->event_string)))) { 3311 e = &ebuffer[elastidx]; 3312 do_gettimeofday(&tv); 3313 e->last_stamp = tv.tv_sec; 3314 ++e->same_count; 3315 } else { 3316 if (ebuffer[elastidx].event_source != 0) { /* entry not free ? */ 3317 ++elastidx; 3318 if (elastidx == MAX_EVENTS) 3319 elastidx = 0; 3320 if (elastidx == eoldidx) { /* reached mark ? */ 3321 ++eoldidx; 3322 if (eoldidx == MAX_EVENTS) 3323 eoldidx = 0; 3324 } 3325 } 3326 e = &ebuffer[elastidx]; 3327 e->event_source = source; 3328 e->event_idx = idx; 3329 do_gettimeofday(&tv); 3330 e->first_stamp = e->last_stamp = tv.tv_sec; 3331 e->same_count = 1; 3332 e->event_data = *evt; 3333 e->application = 0; 3334 } 3335 return e; 3336} 3337 3338static int gdth_read_event(gdth_ha_str *ha, int handle, gdth_evt_str *estr) 3339{ 3340 gdth_evt_str *e; 3341 int eindex; 3342 ulong flags; 3343 3344 TRACE2(("gdth_read_event() handle %d\n", handle)); 3345 spin_lock_irqsave(&ha->smp_lock, flags); 3346 if (handle == -1) 3347 eindex = eoldidx; 3348 else 3349 eindex = handle; 3350 estr->event_source = 0; 3351 3352 if (eindex >= MAX_EVENTS) { 3353 spin_unlock_irqrestore(&ha->smp_lock, flags); 3354 return eindex; 3355 } 3356 e = &ebuffer[eindex]; 3357 if (e->event_source != 0) { 3358 if (eindex != elastidx) { 3359 if (++eindex == MAX_EVENTS) 3360 eindex = 0; 3361 } else { 3362 eindex = -1; 3363 } 3364 memcpy(estr, e, sizeof(gdth_evt_str)); 3365 } 3366 spin_unlock_irqrestore(&ha->smp_lock, flags); 3367 return eindex; 3368} 3369 3370static void gdth_readapp_event(gdth_ha_str *ha, 3371 unchar application, gdth_evt_str *estr) 3372{ 3373 gdth_evt_str *e; 3374 int eindex; 3375 ulong flags; 3376 unchar found = FALSE; 3377 3378 TRACE2(("gdth_readapp_event() app. %d\n", application)); 3379 spin_lock_irqsave(&ha->smp_lock, flags); 3380 eindex = eoldidx; 3381 for (;;) { 3382 e = &ebuffer[eindex]; 3383 if (e->event_source == 0) 3384 break; 3385 if ((e->application & application) == 0) { 3386 e->application |= application; 3387 found = TRUE; 3388 break; 3389 } 3390 if (eindex == elastidx) 3391 break; 3392 if (++eindex == MAX_EVENTS) 3393 eindex = 0; 3394 } 3395 if (found) 3396 memcpy(estr, e, sizeof(gdth_evt_str)); 3397 else 3398 estr->event_source = 0; 3399 spin_unlock_irqrestore(&ha->smp_lock, flags); 3400} 3401 3402static void gdth_clear_events(void) 3403{ 3404 TRACE(("gdth_clear_events()")); 3405 3406 eoldidx = elastidx = 0; 3407 ebuffer[0].event_source = 0; 3408} 3409 3410 3411/* SCSI interface functions */ 3412 3413static irqreturn_t gdth_interrupt(int irq,void *dev_id) 3414{ 3415 gdth_ha_str *ha2 = (gdth_ha_str *)dev_id; 3416 register gdth_ha_str *ha; 3417 gdt6m_dpram_str __iomem *dp6m_ptr = NULL; 3418 gdt6_dpram_str __iomem *dp6_ptr; 3419 gdt2_dpram_str __iomem *dp2_ptr; 3420 Scsi_Cmnd *scp; 3421 int hanum, rval, i; 3422 unchar IStatus; 3423 ushort Service; 3424 ulong flags = 0; 3425#ifdef INT_COAL 3426 int coalesced = FALSE; 3427 int next = FALSE; 3428 gdth_coal_status *pcs = NULL; 3429 int act_int_coal = 0; 3430#endif 3431 3432 TRACE(("gdth_interrupt() IRQ %d\n",irq)); 3433 3434 /* if polling and not from gdth_wait() -> return */ 3435 if (gdth_polling) { 3436 if (!gdth_from_wait) { 3437 return IRQ_HANDLED; 3438 } 3439 } 3440 3441 if (!gdth_polling) 3442 spin_lock_irqsave(&ha2->smp_lock, flags); 3443 wait_index = 0; 3444 3445 /* search controller */ 3446 if ((hanum = gdth_get_status(&IStatus,irq)) == -1) { 3447 /* spurious interrupt */ 3448 if (!gdth_polling) 3449 spin_unlock_irqrestore(&ha2->smp_lock, flags); 3450 return IRQ_HANDLED; 3451 } 3452 ha = HADATA(gdth_ctr_tab[hanum]); 3453 3454#ifdef GDTH_STATISTICS 3455 ++act_ints; 3456#endif 3457 3458#ifdef INT_COAL 3459 /* See if the fw is returning coalesced status */ 3460 if (IStatus == COALINDEX) { 3461 /* Coalesced status. Setup the initial status 3462 buffer pointer and flags */ 3463 pcs = ha->coal_stat; 3464 coalesced = TRUE; 3465 next = TRUE; 3466 } 3467 3468 do { 3469 if (coalesced) { 3470 /* For coalesced requests all status 3471 information is found in the status buffer */ 3472 IStatus = (unchar)(pcs->status & 0xff); 3473 } 3474#endif 3475 3476 if (ha->type == GDT_EISA) { 3477 if (IStatus & 0x80) { /* error flag */ 3478 IStatus &= ~0x80; 3479 ha->status = inw(ha->bmic + MAILBOXREG+8); 3480 TRACE2(("gdth_interrupt() error %d/%d\n",IStatus,ha->status)); 3481 } else /* no error */ 3482 ha->status = S_OK; 3483 ha->info = inl(ha->bmic + MAILBOXREG+12); 3484 ha->service = inw(ha->bmic + MAILBOXREG+10); 3485 ha->info2 = inl(ha->bmic + MAILBOXREG+4); 3486 3487 outb(0xff, ha->bmic + EDOORREG); /* acknowledge interrupt */ 3488 outb(0x00, ha->bmic + SEMA1REG); /* reset status semaphore */ 3489 } else if (ha->type == GDT_ISA) { 3490 dp2_ptr = ha->brd; 3491 if (IStatus & 0x80) { /* error flag */ 3492 IStatus &= ~0x80; 3493 ha->status = gdth_readw(&dp2_ptr->u.ic.Status); 3494 TRACE2(("gdth_interrupt() error %d/%d\n",IStatus,ha->status)); 3495 } else /* no error */ 3496 ha->status = S_OK; 3497 ha->info = gdth_readl(&dp2_ptr->u.ic.Info[0]); 3498 ha->service = gdth_readw(&dp2_ptr->u.ic.Service); 3499 ha->info2 = gdth_readl(&dp2_ptr->u.ic.Info[1]); 3500 3501 gdth_writeb(0xff, &dp2_ptr->io.irqdel); /* acknowledge interrupt */ 3502 gdth_writeb(0, &dp2_ptr->u.ic.Cmd_Index);/* reset command index */ 3503 gdth_writeb(0, &dp2_ptr->io.Sema1); /* reset status semaphore */ 3504 } else if (ha->type == GDT_PCI) { 3505 dp6_ptr = ha->brd; 3506 if (IStatus & 0x80) { /* error flag */ 3507 IStatus &= ~0x80; 3508 ha->status = gdth_readw(&dp6_ptr->u.ic.Status); 3509 TRACE2(("gdth_interrupt() error %d/%d\n",IStatus,ha->status)); 3510 } else /* no error */ 3511 ha->status = S_OK; 3512 ha->info = gdth_readl(&dp6_ptr->u.ic.Info[0]); 3513 ha->service = gdth_readw(&dp6_ptr->u.ic.Service); 3514 ha->info2 = gdth_readl(&dp6_ptr->u.ic.Info[1]); 3515 3516 gdth_writeb(0xff, &dp6_ptr->io.irqdel); /* acknowledge interrupt */ 3517 gdth_writeb(0, &dp6_ptr->u.ic.Cmd_Index);/* reset command index */ 3518 gdth_writeb(0, &dp6_ptr->io.Sema1); /* reset status semaphore */ 3519 } else if (ha->type == GDT_PCINEW) { 3520 if (IStatus & 0x80) { /* error flag */ 3521 IStatus &= ~0x80; 3522 ha->status = inw(PTR2USHORT(&ha->plx->status)); 3523 TRACE2(("gdth_interrupt() error %d/%d\n",IStatus,ha->status)); 3524 } else 3525 ha->status = S_OK; 3526 ha->info = inl(PTR2USHORT(&ha->plx->info[0])); 3527 ha->service = inw(PTR2USHORT(&ha->plx->service)); 3528 ha->info2 = inl(PTR2USHORT(&ha->plx->info[1])); 3529 3530 outb(0xff, PTR2USHORT(&ha->plx->edoor_reg)); 3531 outb(0x00, PTR2USHORT(&ha->plx->sema1_reg)); 3532 } else if (ha->type == GDT_PCIMPR) { 3533 dp6m_ptr = ha->brd; 3534 if (IStatus & 0x80) { /* error flag */ 3535 IStatus &= ~0x80; 3536#ifdef INT_COAL 3537 if (coalesced) 3538 ha->status = pcs->ext_status & 0xffff; 3539 else 3540#endif 3541 ha->status = gdth_readw(&dp6m_ptr->i960r.status); 3542 TRACE2(("gdth_interrupt() error %d/%d\n",IStatus,ha->status)); 3543 } else /* no error */ 3544 ha->status = S_OK; 3545#ifdef INT_COAL 3546 /* get information */ 3547 if (coalesced) { 3548 ha->info = pcs->info0; 3549 ha->info2 = pcs->info1; 3550 ha->service = (pcs->ext_status >> 16) & 0xffff; 3551 } else 3552#endif 3553 { 3554 ha->info = gdth_readl(&dp6m_ptr->i960r.info[0]); 3555 ha->service = gdth_readw(&dp6m_ptr->i960r.service); 3556 ha->info2 = gdth_readl(&dp6m_ptr->i960r.info[1]); 3557 } 3558 /* event string */ 3559 if (IStatus == ASYNCINDEX) { 3560 if (ha->service != SCREENSERVICE && 3561 (ha->fw_vers & 0xff) >= 0x1a) { 3562 ha->dvr.severity = gdth_readb 3563 (&((gdt6m_dpram_str __iomem *)ha->brd)->i960r.severity); 3564 for (i = 0; i < 256; ++i) { 3565 ha->dvr.event_string[i] = gdth_readb 3566 (&((gdt6m_dpram_str __iomem *)ha->brd)->i960r.evt_str[i]); 3567 if (ha->dvr.event_string[i] == 0) 3568 break; 3569 } 3570 } 3571 } 3572#ifdef INT_COAL 3573 /* Make sure that non coalesced interrupts get cleared 3574 before being handled by gdth_async_event/gdth_sync_event */ 3575 if (!coalesced) 3576#endif 3577 { 3578 gdth_writeb(0xff, &dp6m_ptr->i960r.edoor_reg); 3579 gdth_writeb(0, &dp6m_ptr->i960r.sema1_reg); 3580 } 3581 } else { 3582 TRACE2(("gdth_interrupt() unknown controller type\n")); 3583 if (!gdth_polling) 3584 spin_unlock_irqrestore(&ha2->smp_lock, flags); 3585 return IRQ_HANDLED; 3586 } 3587 3588 TRACE(("gdth_interrupt() index %d stat %d info %d\n", 3589 IStatus,ha->status,ha->info)); 3590 3591 if (gdth_from_wait) { 3592 wait_hanum = hanum; 3593 wait_index = (int)IStatus; 3594 } 3595 3596 if (IStatus == ASYNCINDEX) { 3597 TRACE2(("gdth_interrupt() async. event\n")); 3598 gdth_async_event(hanum); 3599 if (!gdth_polling) 3600 spin_unlock_irqrestore(&ha2->smp_lock, flags); 3601 gdth_next(hanum); 3602 return IRQ_HANDLED; 3603 } 3604 3605 if (IStatus == SPEZINDEX) { 3606 TRACE2(("Service unknown or not initialized !\n")); 3607 ha->dvr.size = sizeof(ha->dvr.eu.driver); 3608 ha->dvr.eu.driver.ionode = hanum; 3609 gdth_store_event(ha, ES_DRIVER, 4, &ha->dvr); 3610 if (!gdth_polling) 3611 spin_unlock_irqrestore(&ha2->smp_lock, flags); 3612 return IRQ_HANDLED; 3613 } 3614 scp = ha->cmd_tab[IStatus-2].cmnd; 3615 Service = ha->cmd_tab[IStatus-2].service; 3616 ha->cmd_tab[IStatus-2].cmnd = UNUSED_CMND; 3617 if (scp == UNUSED_CMND) { 3618 TRACE2(("gdth_interrupt() index to unused command (%d)\n",IStatus)); 3619 ha->dvr.size = sizeof(ha->dvr.eu.driver); 3620 ha->dvr.eu.driver.ionode = hanum; 3621 ha->dvr.eu.driver.index = IStatus; 3622 gdth_store_event(ha, ES_DRIVER, 1, &ha->dvr); 3623 if (!gdth_polling) 3624 spin_unlock_irqrestore(&ha2->smp_lock, flags); 3625 return IRQ_HANDLED; 3626 } 3627 if (scp == INTERNAL_CMND) { 3628 TRACE(("gdth_interrupt() answer to internal command\n")); 3629 if (!gdth_polling) 3630 spin_unlock_irqrestore(&ha2->smp_lock, flags); 3631 return IRQ_HANDLED; 3632 } 3633 3634 TRACE(("gdth_interrupt() sync. status\n")); 3635 rval = gdth_sync_event(hanum,Service,IStatus,scp); 3636 if (!gdth_polling) 3637 spin_unlock_irqrestore(&ha2->smp_lock, flags); 3638 if (rval == 2) { 3639 gdth_putq(hanum,scp,scp->SCp.this_residual); 3640 } else if (rval == 1) { 3641 scp->scsi_done(scp); 3642 } 3643 3644#ifdef INT_COAL 3645 if (coalesced) { 3646 /* go to the next status in the status buffer */ 3647 ++pcs; 3648#ifdef GDTH_STATISTICS 3649 ++act_int_coal; 3650 if (act_int_coal > max_int_coal) { 3651 max_int_coal = act_int_coal; 3652 printk("GDT: max_int_coal = %d\n",(ushort)max_int_coal); 3653 } 3654#endif 3655 /* see if there is another status */ 3656 if (pcs->status == 0) 3657 /* Stop the coalesce loop */ 3658 next = FALSE; 3659 } 3660 } while (next); 3661 3662 /* coalescing only for new GDT_PCIMPR controllers available */ 3663 if (ha->type == GDT_PCIMPR && coalesced) { 3664 gdth_writeb(0xff, &dp6m_ptr->i960r.edoor_reg); 3665 gdth_writeb(0, &dp6m_ptr->i960r.sema1_reg); 3666 } 3667#endif 3668 3669 gdth_next(hanum); 3670 return IRQ_HANDLED; 3671} 3672 3673static int gdth_sync_event(int hanum,int service,unchar index,Scsi_Cmnd *scp) 3674{ 3675 register gdth_ha_str *ha; 3676 gdth_msg_str *msg; 3677 gdth_cmd_str *cmdp; 3678 unchar b, t; 3679 3680 ha = HADATA(gdth_ctr_tab[hanum]); 3681 cmdp = ha->pccb; 3682 TRACE(("gdth_sync_event() serv %d status %d\n", 3683 service,ha->status)); 3684 3685 if (service == SCREENSERVICE) { 3686 msg = ha->pmsg; 3687 TRACE(("len: %d, answer: %d, ext: %d, alen: %d\n", 3688 msg->msg_len,msg->msg_answer,msg->msg_ext,msg->msg_alen)); 3689 if (msg->msg_len > MSGLEN+1) 3690 msg->msg_len = MSGLEN+1; 3691 if (msg->msg_len) 3692 if (!(msg->msg_answer && msg->msg_ext)) { 3693 msg->msg_text[msg->msg_len] = '\0'; 3694 printk("%s",msg->msg_text); 3695 } 3696 3697 if (msg->msg_ext && !msg->msg_answer) { 3698 while (gdth_test_busy(hanum)) 3699 gdth_delay(0); 3700 cmdp->Service = SCREENSERVICE; 3701 cmdp->RequestBuffer = SCREEN_CMND; 3702 gdth_get_cmd_index(hanum); 3703 gdth_set_sema0(hanum); 3704 cmdp->OpCode = GDT_READ; 3705 cmdp->BoardNode = LOCALBOARD; 3706 cmdp->u.screen.reserved = 0; 3707 cmdp->u.screen.su.msg.msg_handle= msg->msg_handle; 3708 cmdp->u.screen.su.msg.msg_addr = ha->msg_phys; 3709 ha->cmd_offs_dpmem = 0; 3710 ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.screen.su.msg.msg_addr) 3711 + sizeof(ulong64); 3712 ha->cmd_cnt = 0; 3713 gdth_copy_command(hanum); 3714 gdth_release_event(hanum); 3715 return 0; 3716 } 3717 3718 if (msg->msg_answer && msg->msg_alen) { 3719 /* default answers (getchar() not possible) */ 3720 if (msg->msg_alen == 1) { 3721 msg->msg_alen = 0; 3722 msg->msg_len = 1; 3723 msg->msg_text[0] = 0; 3724 } else { 3725 msg->msg_alen -= 2; 3726 msg->msg_len = 2; 3727 msg->msg_text[0] = 1; 3728 msg->msg_text[1] = 0; 3729 } 3730 msg->msg_ext = 0; 3731 msg->msg_answer = 0; 3732 while (gdth_test_busy(hanum)) 3733 gdth_delay(0); 3734 cmdp->Service = SCREENSERVICE; 3735 cmdp->RequestBuffer = SCREEN_CMND; 3736 gdth_get_cmd_index(hanum); 3737 gdth_set_sema0(hanum); 3738 cmdp->OpCode = GDT_WRITE; 3739 cmdp->BoardNode = LOCALBOARD; 3740 cmdp->u.screen.reserved = 0; 3741 cmdp->u.screen.su.msg.msg_handle= msg->msg_handle; 3742 cmdp->u.screen.su.msg.msg_addr = ha->msg_phys; 3743 ha->cmd_offs_dpmem = 0; 3744 ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.screen.su.msg.msg_addr) 3745 + sizeof(ulong64); 3746 ha->cmd_cnt = 0; 3747 gdth_copy_command(hanum); 3748 gdth_release_event(hanum); 3749 return 0; 3750 } 3751 printk("\n"); 3752 3753 } else { 3754 b = virt_ctr ? NUMDATA(scp->device->host)->busnum : scp->device->channel; 3755 t = scp->device->id; 3756 if (scp->SCp.sent_command == -1 && b != ha->virt_bus) { 3757 ha->raw[BUS_L2P(ha,b)].io_cnt[t]--; 3758 } 3759 /* cache or raw service */ 3760 if (ha->status == S_BSY) { 3761 TRACE2(("Controller busy -> retry !\n")); 3762 if (scp->SCp.sent_command == GDT_MOUNT) 3763 scp->SCp.sent_command = GDT_CLUST_INFO; 3764 /* retry */ 3765 return 2; 3766 } 3767 if (scp->SCp.Status == GDTH_MAP_SG) 3768 pci_unmap_sg(ha->pdev,scp->request_buffer, 3769 scp->use_sg,scp->SCp.Message); 3770 else if (scp->SCp.Status == GDTH_MAP_SINGLE) 3771 pci_unmap_page(ha->pdev,scp->SCp.dma_handle, 3772 scp->request_bufflen,scp->SCp.Message); 3773 if (scp->SCp.buffer) { 3774 dma_addr_t addr; 3775 addr = (dma_addr_t)*(ulong32 *)&scp->SCp.buffer; 3776 if (scp->host_scribble) 3777 addr += (dma_addr_t) 3778 ((ulong64)(*(ulong32 *)&scp->host_scribble) << 32); 3779 pci_unmap_page(ha->pdev,addr,16,PCI_DMA_FROMDEVICE); 3780 } 3781 3782 if (ha->status == S_OK) { 3783 scp->SCp.Status = S_OK; 3784 scp->SCp.Message = ha->info; 3785 if (scp->SCp.sent_command != -1) { 3786 TRACE2(("gdth_sync_event(): special cmd 0x%x OK\n", 3787 scp->SCp.sent_command)); 3788 /* special commands GDT_CLUST_INFO/GDT_MOUNT ? */ 3789 if (scp->SCp.sent_command == GDT_CLUST_INFO) { 3790 ha->hdr[t].cluster_type = (unchar)ha->info; 3791 if (!(ha->hdr[t].cluster_type & 3792 CLUSTER_MOUNTED)) { 3793 /* NOT MOUNTED -> MOUNT */ 3794 scp->SCp.sent_command = GDT_MOUNT; 3795 if (ha->hdr[t].cluster_type & 3796 CLUSTER_RESERVED) { 3797 /* cluster drive RESERVED (on the other node) */ 3798 scp->SCp.phase = -2; /* reservation conflict */ 3799 } 3800 } else { 3801 scp->SCp.sent_command = -1; 3802 } 3803 } else { 3804 if (scp->SCp.sent_command == GDT_MOUNT) { 3805 ha->hdr[t].cluster_type |= CLUSTER_MOUNTED; 3806 ha->hdr[t].media_changed = TRUE; 3807 } else if (scp->SCp.sent_command == GDT_UNMOUNT) { 3808 ha->hdr[t].cluster_type &= ~CLUSTER_MOUNTED; 3809 ha->hdr[t].media_changed = TRUE; 3810 } 3811 scp->SCp.sent_command = -1; 3812 } 3813 /* retry */ 3814 scp->SCp.this_residual = HIGH_PRI; 3815 return 2; 3816 } else { 3817 /* RESERVE/RELEASE ? */ 3818 if (scp->cmnd[0] == RESERVE) { 3819 ha->hdr[t].cluster_type |= CLUSTER_RESERVED; 3820 } else if (scp->cmnd[0] == RELEASE) { 3821 ha->hdr[t].cluster_type &= ~CLUSTER_RESERVED; 3822 } 3823 scp->result = DID_OK << 16; 3824 scp->sense_buffer[0] = 0; 3825 } 3826 } else { 3827 scp->SCp.Status = ha->status; 3828 scp->SCp.Message = ha->info; 3829 3830 if (scp->SCp.sent_command != -1) { 3831 TRACE2(("gdth_sync_event(): special cmd 0x%x error 0x%x\n", 3832 scp->SCp.sent_command, ha->status)); 3833 if (scp->SCp.sent_command == GDT_SCAN_START || 3834 scp->SCp.sent_command == GDT_SCAN_END) { 3835 scp->SCp.sent_command = -1; 3836 /* retry */ 3837 scp->SCp.this_residual = HIGH_PRI; 3838 return 2; 3839 } 3840 memset((char*)scp->sense_buffer,0,16); 3841 scp->sense_buffer[0] = 0x70; 3842 scp->sense_buffer[2] = NOT_READY; 3843 scp->result = (DID_OK << 16) | (CHECK_CONDITION << 1); 3844 } else if (service == CACHESERVICE) { 3845 if (ha->status == S_CACHE_UNKNOWN && 3846 (ha->hdr[t].cluster_type & 3847 CLUSTER_RESERVE_STATE) == CLUSTER_RESERVE_STATE) { 3848 /* bus reset -> force GDT_CLUST_INFO */ 3849 ha->hdr[t].cluster_type &= ~CLUSTER_RESERVED; 3850 } 3851 memset((char*)scp->sense_buffer,0,16); 3852 if (ha->status == (ushort)S_CACHE_RESERV) { 3853 scp->result = (DID_OK << 16) | (RESERVATION_CONFLICT << 1); 3854 } else { 3855 scp->sense_buffer[0] = 0x70; 3856 scp->sense_buffer[2] = NOT_READY; 3857 scp->result = (DID_OK << 16) | (CHECK_CONDITION << 1); 3858 } 3859 if (scp->done != gdth_scsi_done) { 3860 ha->dvr.size = sizeof(ha->dvr.eu.sync); 3861 ha->dvr.eu.sync.ionode = hanum; 3862 ha->dvr.eu.sync.service = service; 3863 ha->dvr.eu.sync.status = ha->status; 3864 ha->dvr.eu.sync.info = ha->info; 3865 ha->dvr.eu.sync.hostdrive = t; 3866 if (ha->status >= 0x8000) 3867 gdth_store_event(ha, ES_SYNC, 0, &ha->dvr); 3868 else 3869 gdth_store_event(ha, ES_SYNC, service, &ha->dvr); 3870 } 3871 } else { 3872 /* sense buffer filled from controller firmware (DMA) */ 3873 if (ha->status != S_RAW_SCSI || ha->info >= 0x100) { 3874 scp->result = DID_BAD_TARGET << 16; 3875 } else { 3876 scp->result = (DID_OK << 16) | ha->info; 3877 } 3878 } 3879 } 3880 if (!scp->SCp.have_data_in) 3881 scp->SCp.have_data_in++; 3882 else 3883 return 1; 3884 } 3885 3886 return 0; 3887} 3888 3889static char *async_cache_tab[] = { 3890/* 0*/ "\011\000\002\002\002\004\002\006\004" 3891 "GDT HA %u, service %u, async. status %u/%lu unknown", 3892/* 1*/ "\011\000\002\002\002\004\002\006\004" 3893 "GDT HA %u, service %u, async. status %u/%lu unknown", 3894/* 2*/ "\005\000\002\006\004" 3895 "GDT HA %u, Host Drive %lu not ready", 3896/* 3*/ "\005\000\002\006\004" 3897 "GDT HA %u, Host Drive %lu: REASSIGN not successful and/or data error on reassigned blocks. Drive may crash in the future and should be replaced", 3898/* 4*/ "\005\000\002\006\004" 3899 "GDT HA %u, mirror update on Host Drive %lu failed", 3900/* 5*/ "\005\000\002\006\004" 3901 "GDT HA %u, Mirror Drive %lu failed", 3902/* 6*/ "\005\000\002\006\004" 3903 "GDT HA %u, Mirror Drive %lu: REASSIGN not successful and/or data error on reassigned blocks. Drive may crash in the future and should be replaced", 3904/* 7*/ "\005\000\002\006\004" 3905 "GDT HA %u, Host Drive %lu write protected", 3906/* 8*/ "\005\000\002\006\004" 3907 "GDT HA %u, media changed in Host Drive %lu", 3908/* 9*/ "\005\000\002\006\004" 3909 "GDT HA %u, Host Drive %lu is offline", 3910/*10*/ "\005\000\002\006\004" 3911 "GDT HA %u, media change of Mirror Drive %lu", 3912/*11*/ "\005\000\002\006\004" 3913 "GDT HA %u, Mirror Drive %lu is write protected", 3914/*12*/ "\005\000\002\006\004" 3915 "GDT HA %u, general error on Host Drive %lu. Please check the devices of this drive!", 3916/*13*/ "\007\000\002\006\002\010\002" 3917 "GDT HA %u, Array Drive %u: Cache Drive %u failed", 3918/*14*/ "\005\000\002\006\002" 3919 "GDT HA %u, Array Drive %u: FAIL state entered", 3920/*15*/ "\005\000\002\006\002" 3921 "GDT HA %u, Array Drive %u: error", 3922/*16*/ "\007\000\002\006\002\010\002" 3923 "GDT HA %u, Array Drive %u: failed drive replaced by Cache Drive %u", 3924/*17*/ "\005\000\002\006\002" 3925 "GDT HA %u, Array Drive %u: parity build failed", 3926/*18*/ "\005\000\002\006\002" 3927 "GDT HA %u, Array Drive %u: drive rebuild failed", 3928/*19*/ "\005\000\002\010\002" 3929 "GDT HA %u, Test of Hot Fix %u failed", 3930/*20*/ "\005\000\002\006\002" 3931 "GDT HA %u, Array Drive %u: drive build finished successfully", 3932/*21*/ "\005\000\002\006\002" 3933 "GDT HA %u, Array Drive %u: drive rebuild finished successfully", 3934/*22*/ "\007\000\002\006\002\010\002" 3935 "GDT HA %u, Array Drive %u: Hot Fix %u activated", 3936/*23*/ "\005\000\002\006\002" 3937 "GDT HA %u, Host Drive %u: processing of i/o aborted due to serious drive error", 3938/*24*/ "\005\000\002\010\002" 3939 "GDT HA %u, mirror update on Cache Drive %u completed", 3940/*25*/ "\005\000\002\010\002" 3941 "GDT HA %u, mirror update on Cache Drive %lu failed", 3942/*26*/ "\005\000\002\006\002" 3943 "GDT HA %u, Array Drive %u: drive rebuild started", 3944/*27*/ "\005\000\002\012\001" 3945 "GDT HA %u, Fault bus %u: SHELF OK detected", 3946/*28*/ "\005\000\002\012\001" 3947 "GDT HA %u, Fault bus %u: SHELF not OK detected", 3948/*29*/ "\007\000\002\012\001\013\001" 3949 "GDT HA %u, Fault bus %u, ID %u: Auto Hot Plug started", 3950/*30*/ "\007\000\002\012\001\013\001" 3951 "GDT HA %u, Fault bus %u, ID %u: new disk detected", 3952/*31*/ "\007\000\002\012\001\013\001" 3953 "GDT HA %u, Fault bus %u, ID %u: old disk detected", 3954/*32*/ "\007\000\002\012\001\013\001" 3955 "GDT HA %u, Fault bus %u, ID %u: plugging an active disk is invalid", 3956/*33*/ "\007\000\002\012\001\013\001" 3957 "GDT HA %u, Fault bus %u, ID %u: invalid device detected", 3958/*34*/ "\011\000\002\012\001\013\001\006\004" 3959 "GDT HA %u, Fault bus %u, ID %u: insufficient disk capacity (%lu MB required)", 3960/*35*/ "\007\000\002\012\001\013\001" 3961 "GDT HA %u, Fault bus %u, ID %u: disk write protected", 3962/*36*/ "\007\000\002\012\001\013\001" 3963 "GDT HA %u, Fault bus %u, ID %u: disk not available", 3964/*37*/ "\007\000\002\012\001\006\004" 3965 "GDT HA %u, Fault bus %u: swap detected (%lu)", 3966/*38*/ "\007\000\002\012\001\013\001" 3967 "GDT HA %u, Fault bus %u, ID %u: Auto Hot Plug finished successfully", 3968/*39*/ "\007\000\002\012\001\013\001" 3969 "GDT HA %u, Fault bus %u, ID %u: Auto Hot Plug aborted due to user Hot Plug", 3970/*40*/ "\007\000\002\012\001\013\001" 3971 "GDT HA %u, Fault bus %u, ID %u: Auto Hot Plug aborted", 3972/*41*/ "\007\000\002\012\001\013\001" 3973 "GDT HA %u, Fault bus %u, ID %u: Auto Hot Plug for Hot Fix started", 3974/*42*/ "\005\000\002\006\002" 3975 "GDT HA %u, Array Drive %u: drive build started", 3976/*43*/ "\003\000\002" 3977 "GDT HA %u, DRAM parity error detected", 3978/*44*/ "\005\000\002\006\002" 3979 "GDT HA %u, Mirror Drive %u: update started", 3980/*45*/ "\007\000\002\006\002\010\002" 3981 "GDT HA %u, Mirror Drive %u: Hot Fix %u activated", 3982/*46*/ "\005\000\002\006\002" 3983 "GDT HA %u, Array Drive %u: no matching Pool Hot Fix Drive available", 3984/*47*/ "\005\000\002\006\002" 3985 "GDT HA %u, Array Drive %u: Pool Hot Fix Drive available", 3986/*48*/ "\005\000\002\006\002" 3987 "GDT HA %u, Mirror Drive %u: no matching Pool Hot Fix Drive available", 3988/*49*/ "\005\000\002\006\002" 3989 "GDT HA %u, Mirror Drive %u: Pool Hot Fix Drive available", 3990/*50*/ "\007\000\002\012\001\013\001" 3991 "GDT HA %u, SCSI bus %u, ID %u: IGNORE_WIDE_RESIDUE message received", 3992/*51*/ "\005\000\002\006\002" 3993 "GDT HA %u, Array Drive %u: expand started", 3994/*52*/ "\005\000\002\006\002" 3995 "GDT HA %u, Array Drive %u: expand finished successfully", 3996/*53*/ "\005\000\002\006\002" 3997 "GDT HA %u, Array Drive %u: expand failed", 3998/*54*/ "\003\000\002" 3999 "GDT HA %u, CPU temperature critical", 4000/*55*/ "\003\000\002" 4001 "GDT HA %u, CPU temperature OK", 4002/*56*/ "\005\000\002\006\004" 4003 "GDT HA %u, Host drive %lu created", 4004/*57*/ "\005\000\002\006\002" 4005 "GDT HA %u, Array Drive %u: expand restarted", 4006/*58*/ "\005\000\002\006\002" 4007 "GDT HA %u, Array Drive %u: expand stopped", 4008/*59*/ "\005\000\002\010\002" 4009 "GDT HA %u, Mirror Drive %u: drive build quited", 4010/*60*/ "\005\000\002\006\002" 4011 "GDT HA %u, Array Drive %u: parity build quited", 4012/*61*/ "\005\000\002\006\002" 4013 "GDT HA %u, Array Drive %u: drive rebuild quited", 4014/*62*/ "\005\000\002\006\002" 4015 "GDT HA %u, Array Drive %u: parity verify started", 4016/*63*/ "\005\000\002\006\002" 4017 "GDT HA %u, Array Drive %u: parity verify done", 4018/*64*/ "\005\000\002\006\002" 4019 "GDT HA %u, Array Drive %u: parity verify failed", 4020/*65*/ "\005\000\002\006\002" 4021 "GDT HA %u, Array Drive %u: parity error detected", 4022/*66*/ "\005\000\002\006\002" 4023 "GDT HA %u, Array Drive %u: parity verify quited", 4024/*67*/ "\005\000\002\006\002" 4025 "GDT HA %u, Host Drive %u reserved", 4026/*68*/ "\005\000\002\006\002" 4027 "GDT HA %u, Host Drive %u mounted and released", 4028/*69*/ "\005\000\002\006\002" 4029 "GDT HA %u, Host Drive %u released", 4030/*70*/ "\003\000\002" 4031 "GDT HA %u, DRAM error detected and corrected with ECC", 4032/*71*/ "\003\000\002" 4033 "GDT HA %u, Uncorrectable DRAM error detected with ECC", 4034/*72*/ "\011\000\002\012\001\013\001\014\001" 4035 "GDT HA %u, SCSI bus %u, ID %u, LUN %u: reassigning block", 4036/*73*/ "\005\000\002\006\002" 4037 "GDT HA %u, Host drive %u resetted locally", 4038/*74*/ "\005\000\002\006\002" 4039 "GDT HA %u, Host drive %u resetted remotely", 4040/*75*/ "\003\000\002" 4041 "GDT HA %u, async. status 75 unknown", 4042}; 4043 4044 4045static int gdth_async_event(int hanum) 4046{ 4047 gdth_ha_str *ha; 4048 gdth_cmd_str *cmdp; 4049 int cmd_index; 4050 4051 ha = HADATA(gdth_ctr_tab[hanum]); 4052 cmdp= ha->pccb; 4053 TRACE2(("gdth_async_event() ha %d serv %d\n", 4054 hanum,ha->service)); 4055 4056 if (ha->service == SCREENSERVICE) { 4057 if (ha->status == MSG_REQUEST) { 4058 while (gdth_test_busy(hanum)) 4059 gdth_delay(0); 4060 cmdp->Service = SCREENSERVICE; 4061 cmdp->RequestBuffer = SCREEN_CMND; 4062 cmd_index = gdth_get_cmd_index(hanum); 4063 gdth_set_sema0(hanum); 4064 cmdp->OpCode = GDT_READ; 4065 cmdp->BoardNode = LOCALBOARD; 4066 cmdp->u.screen.reserved = 0; 4067 cmdp->u.screen.su.msg.msg_handle= MSG_INV_HANDLE; 4068 cmdp->u.screen.su.msg.msg_addr = ha->msg_phys; 4069 ha->cmd_offs_dpmem = 0; 4070 ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.screen.su.msg.msg_addr) 4071 + sizeof(ulong64); 4072 ha->cmd_cnt = 0; 4073 gdth_copy_command(hanum); 4074 if (ha->type == GDT_EISA) 4075 printk("[EISA slot %d] ",(ushort)ha->brd_phys); 4076 else if (ha->type == GDT_ISA) 4077 printk("[DPMEM 0x%4X] ",(ushort)ha->brd_phys); 4078 else 4079 printk("[PCI %d/%d] ",(ushort)(ha->brd_phys>>8), 4080 (ushort)((ha->brd_phys>>3)&0x1f)); 4081 gdth_release_event(hanum); 4082 } 4083 4084 } else { 4085 if (ha->type == GDT_PCIMPR && 4086 (ha->fw_vers & 0xff) >= 0x1a) { 4087 ha->dvr.size = 0; 4088 ha->dvr.eu.async.ionode = hanum; 4089 ha->dvr.eu.async.status = ha->status; 4090 /* severity and event_string already set! */ 4091 } else { 4092 ha->dvr.size = sizeof(ha->dvr.eu.async); 4093 ha->dvr.eu.async.ionode = hanum; 4094 ha->dvr.eu.async.service = ha->service; 4095 ha->dvr.eu.async.status = ha->status; 4096 ha->dvr.eu.async.info = ha->info; 4097 *(ulong32 *)ha->dvr.eu.async.scsi_coord = ha->info2; 4098 } 4099 gdth_store_event( ha, ES_ASYNC, ha->service, &ha->dvr ); 4100 gdth_log_event( &ha->dvr, NULL ); 4101 4102 /* new host drive from expand? */ 4103 if (ha->service == CACHESERVICE && ha->status == 56) { 4104 TRACE2(("gdth_async_event(): new host drive %d created\n", 4105 (ushort)ha->info)); 4106 /* gdth_analyse_hdrive(hanum, (ushort)ha->info); */ 4107 } 4108 } 4109 return 1; 4110} 4111 4112static void gdth_log_event(gdth_evt_data *dvr, char *buffer) 4113{ 4114 gdth_stackframe stack; 4115 char *f = NULL; 4116 int i,j; 4117 4118 TRACE2(("gdth_log_event()\n")); 4119 if (dvr->size == 0) { 4120 if (buffer == NULL) { 4121 printk("Adapter %d: %s\n",dvr->eu.async.ionode,dvr->event_string); 4122 } else { 4123 sprintf(buffer,"Adapter %d: %s\n", 4124 dvr->eu.async.ionode,dvr->event_string); 4125 } 4126 } else if (dvr->eu.async.service == CACHESERVICE && 4127 INDEX_OK(dvr->eu.async.status, async_cache_tab)) { 4128 TRACE2(("GDT: Async. event cache service, event no.: %d\n", 4129 dvr->eu.async.status)); 4130 4131 f = async_cache_tab[dvr->eu.async.status]; 4132 4133 /* i: parameter to push, j: stack element to fill */ 4134 for (j=0,i=1; i < f[0]; i+=2) { 4135 switch (f[i+1]) { 4136 case 4: 4137 stack.b[j++] = *(ulong32*)&dvr->eu.stream[(int)f[i]]; 4138 break; 4139 case 2: 4140 stack.b[j++] = *(ushort*)&dvr->eu.stream[(int)f[i]]; 4141 break; 4142 case 1: 4143 stack.b[j++] = *(unchar*)&dvr->eu.stream[(int)f[i]]; 4144 break; 4145 default: 4146 break; 4147 } 4148 } 4149 4150 if (buffer == NULL) { 4151 printk(&f[(int)f[0]],stack); 4152 printk("\n"); 4153 } else { 4154 sprintf(buffer,&f[(int)f[0]],stack); 4155 } 4156 4157 } else { 4158 if (buffer == NULL) { 4159 printk("GDT HA %u, Unknown async. event service %d event no. %d\n", 4160 dvr->eu.async.ionode,dvr->eu.async.service,dvr->eu.async.status); 4161 } else { 4162 sprintf(buffer,"GDT HA %u, Unknown async. event service %d event no. %d", 4163 dvr->eu.async.ionode,dvr->eu.async.service,dvr->eu.async.status); 4164 } 4165 } 4166} 4167 4168#ifdef GDTH_STATISTICS 4169static void gdth_timeout(ulong data) 4170{ 4171 ulong32 i; 4172 Scsi_Cmnd *nscp; 4173 gdth_ha_str *ha; 4174 ulong flags; 4175 int hanum = 0; 4176 4177 ha = HADATA(gdth_ctr_tab[hanum]); 4178 spin_lock_irqsave(&ha->smp_lock, flags); 4179 4180 for (act_stats=0,i=0; i<GDTH_MAXCMDS; ++i) 4181 if (ha->cmd_tab[i].cmnd != UNUSED_CMND) 4182 ++act_stats; 4183 4184 for (act_rq=0,nscp=ha->req_first; nscp; nscp=(Scsi_Cmnd*)nscp->SCp.ptr) 4185 ++act_rq; 4186 4187 TRACE2(("gdth_to(): ints %d, ios %d, act_stats %d, act_rq %d\n", 4188 act_ints, act_ios, act_stats, act_rq)); 4189 act_ints = act_ios = 0; 4190 4191 gdth_timer.expires = jiffies + 30 * HZ; 4192 add_timer(&gdth_timer); 4193 spin_unlock_irqrestore(&ha->smp_lock, flags); 4194} 4195#endif 4196 4197static void __init internal_setup(char *str,int *ints) 4198{ 4199 int i, argc; 4200 char *cur_str, *argv; 4201 4202 TRACE2(("internal_setup() str %s ints[0] %d\n", 4203 str ? str:"NULL", ints ? ints[0]:0)); 4204 4205 /* read irq[] from ints[] */ 4206 if (ints) { 4207 argc = ints[0]; 4208 if (argc > 0) { 4209 if (argc > MAXHA) 4210 argc = MAXHA; 4211 for (i = 0; i < argc; ++i) 4212 irq[i] = ints[i+1]; 4213 } 4214 } 4215 4216 /* analyse string */ 4217 argv = str; 4218 while (argv && (cur_str = strchr(argv, ':'))) { 4219 int val = 0, c = *++cur_str; 4220 4221 if (c == 'n' || c == 'N') 4222 val = 0; 4223 else if (c == 'y' || c == 'Y') 4224 val = 1; 4225 else 4226 val = (int)simple_strtoul(cur_str, NULL, 0); 4227 4228 if (!strncmp(argv, "disable:", 8)) 4229 disable = val; 4230 else if (!strncmp(argv, "reserve_mode:", 13)) 4231 reserve_mode = val; 4232 else if (!strncmp(argv, "reverse_scan:", 13)) 4233 reverse_scan = val; 4234 else if (!strncmp(argv, "hdr_channel:", 12)) 4235 hdr_channel = val; 4236 else if (!strncmp(argv, "max_ids:", 8)) 4237 max_ids = val; 4238 else if (!strncmp(argv, "rescan:", 7)) 4239 rescan = val; 4240 else if (!strncmp(argv, "virt_ctr:", 9)) 4241 virt_ctr = val; 4242 else if (!strncmp(argv, "shared_access:", 14)) 4243 shared_access = val; 4244 else if (!strncmp(argv, "probe_eisa_isa:", 15)) 4245 probe_eisa_isa = val; 4246 else if (!strncmp(argv, "reserve_list:", 13)) { 4247 reserve_list[0] = val; 4248 for (i = 1; i < MAX_RES_ARGS; i++) { 4249 cur_str = strchr(cur_str, ','); 4250 if (!cur_str) 4251 break; 4252 if (!isdigit((int)*++cur_str)) { 4253 --cur_str; 4254 break; 4255 } 4256 reserve_list[i] = 4257 (int)simple_strtoul(cur_str, NULL, 0); 4258 } 4259 if (!cur_str) 4260 break; 4261 argv = ++cur_str; 4262 continue; 4263 } 4264 4265 if ((argv = strchr(argv, ','))) 4266 ++argv; 4267 } 4268} 4269 4270int __init option_setup(char *str) 4271{ 4272 int ints[MAXHA]; 4273 char *cur = str; 4274 int i = 1; 4275 4276 TRACE2(("option_setup() str %s\n", str ? str:"NULL")); 4277 4278 while (cur && isdigit(*cur) && i <= MAXHA) { 4279 ints[i++] = simple_strtoul(cur, NULL, 0); 4280 if ((cur = strchr(cur, ',')) != NULL) cur++; 4281 } 4282 4283 ints[0] = i - 1; 4284 internal_setup(cur, ints); 4285 return 1; 4286} 4287 4288#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) 4289static int __init gdth_detect(struct scsi_host_template *shtp) 4290#else 4291static int __init gdth_detect(Scsi_Host_Template *shtp) 4292#endif 4293{ 4294 struct Scsi_Host *shp; 4295 gdth_pci_str pcistr[MAXHA]; 4296 gdth_ha_str *ha; 4297 ulong32 isa_bios; 4298 ushort eisa_slot; 4299 int i,hanum,cnt,ctr,err; 4300 unchar b; 4301 4302 4303#ifdef DEBUG_GDTH 4304 printk("GDT: This driver contains debugging information !! Trace level = %d\n", 4305 DebugState); 4306 printk(" Destination of debugging information: "); 4307#ifdef __SERIAL__ 4308#ifdef __COM2__ 4309 printk("Serial port COM2\n"); 4310#else 4311 printk("Serial port COM1\n"); 4312#endif 4313#else 4314 printk("Console\n"); 4315#endif 4316 gdth_delay(3000); 4317#endif 4318 4319 TRACE(("gdth_detect()\n")); 4320 4321 if (disable) { 4322 printk("GDT-HA: Controller driver disabled from command line !\n"); 4323 return 0; 4324 } 4325 4326 printk("GDT-HA: Storage RAID Controller Driver. Version: %s\n",GDTH_VERSION_STR); 4327 /* initializations */ 4328 gdth_polling = TRUE; b = 0; 4329 gdth_clear_events(); 4330 4331 /* As default we do not probe for EISA or ISA controllers */ 4332 if (probe_eisa_isa) { 4333 /* scanning for controllers, at first: ISA controller */ 4334 for (isa_bios=0xc8000UL; isa_bios<=0xd8000UL; isa_bios+=0x8000UL) { 4335 dma_addr_t scratch_dma_handle; 4336 scratch_dma_handle = 0; 4337 4338 if (gdth_ctr_count >= MAXHA) 4339 break; 4340 if (gdth_search_isa(isa_bios)) { /* controller found */ 4341 shp = scsi_register(shtp,sizeof(gdth_ext_str)); 4342 if (shp == NULL) 4343 continue; 4344 4345 ha = HADATA(shp); 4346 if (!gdth_init_isa(isa_bios,ha)) { 4347 scsi_unregister(shp); 4348 continue; 4349 } 4350#ifdef __ia64__ 4351 break; 4352#else 4353 /* controller found and initialized */ 4354 printk("Configuring GDT-ISA HA at BIOS 0x%05X IRQ %u DRQ %u\n", 4355 isa_bios,ha->irq,ha->drq); 4356 4357 if (request_irq(ha->irq,gdth_interrupt,IRQF_DISABLED,"gdth",ha)) { 4358 printk("GDT-ISA: Unable to allocate IRQ\n"); 4359 scsi_unregister(shp); 4360 continue; 4361 } 4362 if (request_dma(ha->drq,"gdth")) { 4363 printk("GDT-ISA: Unable to allocate DMA channel\n"); 4364 free_irq(ha->irq,ha); 4365 scsi_unregister(shp); 4366 continue; 4367 } 4368 set_dma_mode(ha->drq,DMA_MODE_CASCADE); 4369 enable_dma(ha->drq); 4370 shp->unchecked_isa_dma = 1; 4371 shp->irq = ha->irq; 4372 shp->dma_channel = ha->drq; 4373 hanum = gdth_ctr_count; 4374 gdth_ctr_tab[gdth_ctr_count++] = shp; 4375 gdth_ctr_vtab[gdth_ctr_vcount++] = shp; 4376 4377 NUMDATA(shp)->hanum = (ushort)hanum; 4378 NUMDATA(shp)->busnum= 0; 4379 4380 ha->pccb = CMDDATA(shp); 4381 ha->ccb_phys = 0L; 4382 ha->pdev = NULL; 4383 ha->pscratch = pci_alloc_consistent(ha->pdev, GDTH_SCRATCH, 4384 &scratch_dma_handle); 4385 ha->scratch_phys = scratch_dma_handle; 4386 ha->pmsg = pci_alloc_consistent(ha->pdev, sizeof(gdth_msg_str), 4387 &scratch_dma_handle); 4388 ha->msg_phys = scratch_dma_handle; 4389#ifdef INT_COAL 4390 ha->coal_stat = (gdth_coal_status *) 4391 pci_alloc_consistent(ha->pdev, sizeof(gdth_coal_status) * 4392 MAXOFFSETS, &scratch_dma_handle); 4393 ha->coal_stat_phys = scratch_dma_handle; 4394#endif 4395 4396 ha->scratch_busy = FALSE; 4397 ha->req_first = NULL; 4398 ha->tid_cnt = MAX_HDRIVES; 4399 if (max_ids > 0 && max_ids < ha->tid_cnt) 4400 ha->tid_cnt = max_ids; 4401 for (i=0; i<GDTH_MAXCMDS; ++i) 4402 ha->cmd_tab[i].cmnd = UNUSED_CMND; 4403 ha->scan_mode = rescan ? 0x10 : 0; 4404 4405 if (ha->pscratch == NULL || ha->pmsg == NULL || 4406 !gdth_search_drives(hanum)) { 4407 printk("GDT-ISA: Error during device scan\n"); 4408 --gdth_ctr_count; 4409 --gdth_ctr_vcount; 4410 4411#ifdef INT_COAL 4412 if (ha->coal_stat) 4413 pci_free_consistent(ha->pdev, sizeof(gdth_coal_status) * 4414 MAXOFFSETS, ha->coal_stat, 4415 ha->coal_stat_phys); 4416#endif 4417 if (ha->pscratch) 4418 pci_free_consistent(ha->pdev, GDTH_SCRATCH, 4419 ha->pscratch, ha->scratch_phys); 4420 if (ha->pmsg) 4421 pci_free_consistent(ha->pdev, sizeof(gdth_msg_str), 4422 ha->pmsg, ha->msg_phys); 4423 4424 free_irq(ha->irq,ha); 4425 scsi_unregister(shp); 4426 continue; 4427 } 4428 if (hdr_channel < 0 || hdr_channel > ha->bus_cnt) 4429 hdr_channel = ha->bus_cnt; 4430 ha->virt_bus = hdr_channel; 4431 4432#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,20) && LINUX_VERSION_CODE < \ 4433 KERNEL_VERSION(2,6,0) 4434 shp->highmem_io = 0; 4435#endif 4436 if (ha->cache_feat & ha->raw_feat & ha->screen_feat & GDT_64BIT) 4437 shp->max_cmd_len = 16; 4438 4439 shp->max_id = ha->tid_cnt; 4440 shp->max_lun = MAXLUN; 4441 shp->max_channel = virt_ctr ? 0 : ha->bus_cnt; 4442 if (virt_ctr) { 4443 virt_ctr = 1; 4444 /* register addit. SCSI channels as virtual controllers */ 4445 for (b = 1; b < ha->bus_cnt + 1; ++b) { 4446 shp = scsi_register(shtp,sizeof(gdth_num_str)); 4447 shp->unchecked_isa_dma = 1; 4448 shp->irq = ha->irq; 4449 shp->dma_channel = ha->drq; 4450 gdth_ctr_vtab[gdth_ctr_vcount++] = shp; 4451 NUMDATA(shp)->hanum = (ushort)hanum; 4452 NUMDATA(shp)->busnum = b; 4453 } 4454 } 4455 4456 spin_lock_init(&ha->smp_lock); 4457 gdth_enable_int(hanum); 4458#endif /* !__ia64__ */ 4459 } 4460 } 4461 4462 /* scanning for EISA controllers */ 4463 for (eisa_slot=0x1000; eisa_slot<=0x8000; eisa_slot+=0x1000) { 4464 dma_addr_t scratch_dma_handle; 4465 scratch_dma_handle = 0; 4466 4467 if (gdth_ctr_count >= MAXHA) 4468 break; 4469 if (gdth_search_eisa(eisa_slot)) { /* controller found */ 4470 shp = scsi_register(shtp,sizeof(gdth_ext_str)); 4471 if (shp == NULL) 4472 continue; 4473 4474 ha = HADATA(shp); 4475 if (!gdth_init_eisa(eisa_slot,ha)) { 4476 scsi_unregister(shp); 4477 continue; 4478 } 4479 /* controller found and initialized */ 4480 printk("Configuring GDT-EISA HA at Slot %d IRQ %u\n", 4481 eisa_slot>>12,ha->irq); 4482 4483 if (request_irq(ha->irq,gdth_interrupt,IRQF_DISABLED,"gdth",ha)) { 4484 printk("GDT-EISA: Unable to allocate IRQ\n"); 4485 scsi_unregister(shp); 4486 continue; 4487 } 4488 shp->unchecked_isa_dma = 0; 4489 shp->irq = ha->irq; 4490 shp->dma_channel = 0xff; 4491 hanum = gdth_ctr_count; 4492 gdth_ctr_tab[gdth_ctr_count++] = shp; 4493 gdth_ctr_vtab[gdth_ctr_vcount++] = shp; 4494 4495 NUMDATA(shp)->hanum = (ushort)hanum; 4496 NUMDATA(shp)->busnum= 0; 4497 TRACE2(("EISA detect Bus 0: hanum %d\n", 4498 NUMDATA(shp)->hanum)); 4499 4500 ha->pccb = CMDDATA(shp); 4501 ha->ccb_phys = 0L; 4502 4503 ha->pdev = NULL; 4504 ha->pscratch = pci_alloc_consistent(ha->pdev, GDTH_SCRATCH, 4505 &scratch_dma_handle); 4506 ha->scratch_phys = scratch_dma_handle; 4507 ha->pmsg = pci_alloc_consistent(ha->pdev, sizeof(gdth_msg_str), 4508 &scratch_dma_handle); 4509 ha->msg_phys = scratch_dma_handle; 4510#ifdef INT_COAL 4511 ha->coal_stat = (gdth_coal_status *) 4512 pci_alloc_consistent(ha->pdev, sizeof(gdth_coal_status) * 4513 MAXOFFSETS, &scratch_dma_handle); 4514 ha->coal_stat_phys = scratch_dma_handle; 4515#endif 4516 ha->ccb_phys = 4517 pci_map_single(ha->pdev,ha->pccb, 4518 sizeof(gdth_cmd_str),PCI_DMA_BIDIRECTIONAL); 4519 ha->scratch_busy = FALSE; 4520 ha->req_first = NULL; 4521 ha->tid_cnt = MAX_HDRIVES; 4522 if (max_ids > 0 && max_ids < ha->tid_cnt) 4523 ha->tid_cnt = max_ids; 4524 for (i=0; i<GDTH_MAXCMDS; ++i) 4525 ha->cmd_tab[i].cmnd = UNUSED_CMND; 4526 ha->scan_mode = rescan ? 0x10 : 0; 4527 4528 if (ha->pscratch == NULL || ha->pmsg == NULL || 4529 !gdth_search_drives(hanum)) { 4530 printk("GDT-EISA: Error during device scan\n"); 4531 --gdth_ctr_count; 4532 --gdth_ctr_vcount; 4533#ifdef INT_COAL 4534 if (ha->coal_stat) 4535 pci_free_consistent(ha->pdev, sizeof(gdth_coal_status) * 4536 MAXOFFSETS, ha->coal_stat, 4537 ha->coal_stat_phys); 4538#endif 4539 if (ha->pscratch) 4540 pci_free_consistent(ha->pdev, GDTH_SCRATCH, 4541 ha->pscratch, ha->scratch_phys); 4542 if (ha->pmsg) 4543 pci_free_consistent(ha->pdev, sizeof(gdth_msg_str), 4544 ha->pmsg, ha->msg_phys); 4545 if (ha->ccb_phys) 4546 pci_unmap_single(ha->pdev,ha->ccb_phys, 4547 sizeof(gdth_cmd_str),PCI_DMA_BIDIRECTIONAL); 4548 free_irq(ha->irq,ha); 4549 scsi_unregister(shp); 4550 continue; 4551 } 4552 if (hdr_channel < 0 || hdr_channel > ha->bus_cnt) 4553 hdr_channel = ha->bus_cnt; 4554 ha->virt_bus = hdr_channel; 4555 4556#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,20) && LINUX_VERSION_CODE < \ 4557 KERNEL_VERSION(2,6,0) 4558 shp->highmem_io = 0; 4559#endif 4560 if (ha->cache_feat & ha->raw_feat & ha->screen_feat & GDT_64BIT) 4561 shp->max_cmd_len = 16; 4562 4563 shp->max_id = ha->tid_cnt; 4564 shp->max_lun = MAXLUN; 4565 shp->max_channel = virt_ctr ? 0 : ha->bus_cnt; 4566 if (virt_ctr) { 4567 virt_ctr = 1; 4568 /* register addit. SCSI channels as virtual controllers */ 4569 for (b = 1; b < ha->bus_cnt + 1; ++b) { 4570 shp = scsi_register(shtp,sizeof(gdth_num_str)); 4571 shp->unchecked_isa_dma = 0; 4572 shp->irq = ha->irq; 4573 shp->dma_channel = 0xff; 4574 gdth_ctr_vtab[gdth_ctr_vcount++] = shp; 4575 NUMDATA(shp)->hanum = (ushort)hanum; 4576 NUMDATA(shp)->busnum = b; 4577 } 4578 } 4579 4580 spin_lock_init(&ha->smp_lock); 4581 gdth_enable_int(hanum); 4582 } 4583 } 4584 } 4585 4586 /* scanning for PCI controllers */ 4587 cnt = gdth_search_pci(pcistr); 4588 printk("GDT-HA: Found %d PCI Storage RAID Controllers\n",cnt); 4589 gdth_sort_pci(pcistr,cnt); 4590 for (ctr = 0; ctr < cnt; ++ctr) { 4591 dma_addr_t scratch_dma_handle; 4592 scratch_dma_handle = 0; 4593 4594 if (gdth_ctr_count >= MAXHA) 4595 break; 4596 shp = scsi_register(shtp,sizeof(gdth_ext_str)); 4597 if (shp == NULL) 4598 continue; 4599 4600 ha = HADATA(shp); 4601 if (!gdth_init_pci(&pcistr[ctr],ha)) { 4602 scsi_unregister(shp); 4603 continue; 4604 } 4605 /* controller found and initialized */ 4606 printk("Configuring GDT-PCI HA at %d/%d IRQ %u\n", 4607 pcistr[ctr].bus,PCI_SLOT(pcistr[ctr].device_fn),ha->irq); 4608 4609 if (request_irq(ha->irq, gdth_interrupt, 4610 IRQF_DISABLED|IRQF_SHARED, "gdth", ha)) 4611 { 4612 printk("GDT-PCI: Unable to allocate IRQ\n"); 4613 scsi_unregister(shp); 4614 continue; 4615 } 4616 shp->unchecked_isa_dma = 0; 4617 shp->irq = ha->irq; 4618 shp->dma_channel = 0xff; 4619 hanum = gdth_ctr_count; 4620 gdth_ctr_tab[gdth_ctr_count++] = shp; 4621 gdth_ctr_vtab[gdth_ctr_vcount++] = shp; 4622 4623 NUMDATA(shp)->hanum = (ushort)hanum; 4624 NUMDATA(shp)->busnum= 0; 4625 4626 ha->pccb = CMDDATA(shp); 4627 ha->ccb_phys = 0L; 4628 4629 ha->pscratch = pci_alloc_consistent(ha->pdev, GDTH_SCRATCH, 4630 &scratch_dma_handle); 4631 ha->scratch_phys = scratch_dma_handle; 4632 ha->pmsg = pci_alloc_consistent(ha->pdev, sizeof(gdth_msg_str), 4633 &scratch_dma_handle); 4634 ha->msg_phys = scratch_dma_handle; 4635#ifdef INT_COAL 4636 ha->coal_stat = (gdth_coal_status *) 4637 pci_alloc_consistent(ha->pdev, sizeof(gdth_coal_status) * 4638 MAXOFFSETS, &scratch_dma_handle); 4639 ha->coal_stat_phys = scratch_dma_handle; 4640#endif 4641 ha->scratch_busy = FALSE; 4642 ha->req_first = NULL; 4643 ha->tid_cnt = pcistr[ctr].device_id >= 0x200 ? MAXID : MAX_HDRIVES; 4644 if (max_ids > 0 && max_ids < ha->tid_cnt) 4645 ha->tid_cnt = max_ids; 4646 for (i=0; i<GDTH_MAXCMDS; ++i) 4647 ha->cmd_tab[i].cmnd = UNUSED_CMND; 4648 ha->scan_mode = rescan ? 0x10 : 0; 4649 4650 err = FALSE; 4651 if (ha->pscratch == NULL || ha->pmsg == NULL || 4652 !gdth_search_drives(hanum)) { 4653 err = TRUE; 4654 } else { 4655 if (hdr_channel < 0 || hdr_channel > ha->bus_cnt) 4656 hdr_channel = ha->bus_cnt; 4657 ha->virt_bus = hdr_channel; 4658 4659 4660#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) 4661 scsi_set_pci_device(shp, pcistr[ctr].pdev); 4662#endif 4663 if (!(ha->cache_feat & ha->raw_feat & ha->screen_feat &GDT_64BIT)|| 4664 /* 64-bit DMA only supported from FW >= x.43 */ 4665 (!ha->dma64_support)) { 4666 if (pci_set_dma_mask(pcistr[ctr].pdev, DMA_32BIT_MASK)) { 4667 printk(KERN_WARNING "GDT-PCI %d: Unable to set 32-bit DMA\n", hanum); 4668 err = TRUE; 4669 } 4670 } else { 4671 shp->max_cmd_len = 16; 4672 if (!pci_set_dma_mask(pcistr[ctr].pdev, DMA_64BIT_MASK)) { 4673 printk("GDT-PCI %d: 64-bit DMA enabled\n", hanum); 4674 } else if (pci_set_dma_mask(pcistr[ctr].pdev, DMA_32BIT_MASK)) { 4675 printk(KERN_WARNING "GDT-PCI %d: Unable to set 64/32-bit DMA\n", hanum); 4676 err = TRUE; 4677 } 4678 } 4679 } 4680 4681 if (err) { 4682 printk("GDT-PCI %d: Error during device scan\n", hanum); 4683 --gdth_ctr_count; 4684 --gdth_ctr_vcount; 4685#ifdef INT_COAL 4686 if (ha->coal_stat) 4687 pci_free_consistent(ha->pdev, sizeof(gdth_coal_status) * 4688 MAXOFFSETS, ha->coal_stat, 4689 ha->coal_stat_phys); 4690#endif 4691 if (ha->pscratch) 4692 pci_free_consistent(ha->pdev, GDTH_SCRATCH, 4693 ha->pscratch, ha->scratch_phys); 4694 if (ha->pmsg) 4695 pci_free_consistent(ha->pdev, sizeof(gdth_msg_str), 4696 ha->pmsg, ha->msg_phys); 4697 free_irq(ha->irq,ha); 4698 scsi_unregister(shp); 4699 continue; 4700 } 4701 4702 shp->max_id = ha->tid_cnt; 4703 shp->max_lun = MAXLUN; 4704 shp->max_channel = virt_ctr ? 0 : ha->bus_cnt; 4705 if (virt_ctr) { 4706 virt_ctr = 1; 4707 /* register addit. SCSI channels as virtual controllers */ 4708 for (b = 1; b < ha->bus_cnt + 1; ++b) { 4709 shp = scsi_register(shtp,sizeof(gdth_num_str)); 4710 shp->unchecked_isa_dma = 0; 4711 shp->irq = ha->irq; 4712 shp->dma_channel = 0xff; 4713 gdth_ctr_vtab[gdth_ctr_vcount++] = shp; 4714 NUMDATA(shp)->hanum = (ushort)hanum; 4715 NUMDATA(shp)->busnum = b; 4716 } 4717 } 4718 4719 spin_lock_init(&ha->smp_lock); 4720 gdth_enable_int(hanum); 4721 } 4722 4723 TRACE2(("gdth_detect() %d controller detected\n",gdth_ctr_count)); 4724 if (gdth_ctr_count > 0) { 4725#ifdef GDTH_STATISTICS 4726 TRACE2(("gdth_detect(): Initializing timer !\n")); 4727 init_timer(&gdth_timer); 4728 gdth_timer.expires = jiffies + HZ; 4729 gdth_timer.data = 0L; 4730 gdth_timer.function = gdth_timeout; 4731 add_timer(&gdth_timer); 4732#endif 4733 major = register_chrdev(0,"gdth",&gdth_fops); 4734 notifier_disabled = 0; 4735 register_reboot_notifier(&gdth_notifier); 4736 } 4737 gdth_polling = FALSE; 4738 return gdth_ctr_vcount; 4739} 4740 4741static int gdth_release(struct Scsi_Host *shp) 4742{ 4743 int hanum; 4744 gdth_ha_str *ha; 4745 4746 TRACE2(("gdth_release()\n")); 4747 if (NUMDATA(shp)->busnum == 0) { 4748 hanum = NUMDATA(shp)->hanum; 4749 ha = HADATA(gdth_ctr_tab[hanum]); 4750 if (ha->sdev) { 4751 scsi_free_host_dev(ha->sdev); 4752 ha->sdev = NULL; 4753 } 4754 gdth_flush(hanum); 4755 4756 if (shp->irq) { 4757 free_irq(shp->irq,ha); 4758 } 4759#ifndef __ia64__ 4760 if (shp->dma_channel != 0xff) { 4761 free_dma(shp->dma_channel); 4762 } 4763#endif 4764#ifdef INT_COAL 4765 if (ha->coal_stat) 4766 pci_free_consistent(ha->pdev, sizeof(gdth_coal_status) * 4767 MAXOFFSETS, ha->coal_stat, ha->coal_stat_phys); 4768#endif 4769 if (ha->pscratch) 4770 pci_free_consistent(ha->pdev, GDTH_SCRATCH, 4771 ha->pscratch, ha->scratch_phys); 4772 if (ha->pmsg) 4773 pci_free_consistent(ha->pdev, sizeof(gdth_msg_str), 4774 ha->pmsg, ha->msg_phys); 4775 if (ha->ccb_phys) 4776 pci_unmap_single(ha->pdev,ha->ccb_phys, 4777 sizeof(gdth_cmd_str),PCI_DMA_BIDIRECTIONAL); 4778 gdth_ctr_released++; 4779 TRACE2(("gdth_release(): HA %d of %d\n", 4780 gdth_ctr_released, gdth_ctr_count)); 4781 4782 if (gdth_ctr_released == gdth_ctr_count) { 4783#ifdef GDTH_STATISTICS 4784 del_timer(&gdth_timer); 4785#endif 4786 unregister_chrdev(major,"gdth"); 4787 unregister_reboot_notifier(&gdth_notifier); 4788 } 4789 } 4790 4791 scsi_unregister(shp); 4792 return 0; 4793} 4794 4795 4796static const char *gdth_ctr_name(int hanum) 4797{ 4798 gdth_ha_str *ha; 4799 4800 TRACE2(("gdth_ctr_name()\n")); 4801 4802 ha = HADATA(gdth_ctr_tab[hanum]); 4803 4804 if (ha->type == GDT_EISA) { 4805 switch (ha->stype) { 4806 case GDT3_ID: 4807 return("GDT3000/3020"); 4808 case GDT3A_ID: 4809 return("GDT3000A/3020A/3050A"); 4810 case GDT3B_ID: 4811 return("GDT3000B/3010A"); 4812 } 4813 } else if (ha->type == GDT_ISA) { 4814 return("GDT2000/2020"); 4815 } else if (ha->type == GDT_PCI) { 4816 switch (ha->stype) { 4817 case PCI_DEVICE_ID_VORTEX_GDT60x0: 4818 return("GDT6000/6020/6050"); 4819 case PCI_DEVICE_ID_VORTEX_GDT6000B: 4820 return("GDT6000B/6010"); 4821 } 4822 } 4823 /* new controllers (GDT_PCINEW, GDT_PCIMPR, ..) use board_info IOCTL! */ 4824 4825 return(""); 4826} 4827 4828static const char *gdth_info(struct Scsi_Host *shp) 4829{ 4830 int hanum; 4831 gdth_ha_str *ha; 4832 4833 TRACE2(("gdth_info()\n")); 4834 hanum = NUMDATA(shp)->hanum; 4835 ha = HADATA(gdth_ctr_tab[hanum]); 4836 4837 return ((const char *)ha->binfo.type_string); 4838} 4839 4840static int gdth_eh_bus_reset(Scsi_Cmnd *scp) 4841{ 4842 int i, hanum; 4843 gdth_ha_str *ha; 4844 ulong flags; 4845 Scsi_Cmnd *cmnd; 4846 unchar b; 4847 4848 TRACE2(("gdth_eh_bus_reset()\n")); 4849 4850 hanum = NUMDATA(scp->device->host)->hanum; 4851 b = virt_ctr ? NUMDATA(scp->device->host)->busnum : scp->device->channel; 4852 ha = HADATA(gdth_ctr_tab[hanum]); 4853 4854 /* clear command tab */ 4855 spin_lock_irqsave(&ha->smp_lock, flags); 4856 for (i = 0; i < GDTH_MAXCMDS; ++i) { 4857 cmnd = ha->cmd_tab[i].cmnd; 4858 if (!SPECIAL_SCP(cmnd) && cmnd->device->channel == b) 4859 ha->cmd_tab[i].cmnd = UNUSED_CMND; 4860 } 4861 spin_unlock_irqrestore(&ha->smp_lock, flags); 4862 4863 if (b == ha->virt_bus) { 4864 /* host drives */ 4865 for (i = 0; i < MAX_HDRIVES; ++i) { 4866 if (ha->hdr[i].present) { 4867 spin_lock_irqsave(&ha->smp_lock, flags); 4868 gdth_polling = TRUE; 4869 while (gdth_test_busy(hanum)) 4870 gdth_delay(0); 4871 if (gdth_internal_cmd(hanum, CACHESERVICE, 4872 GDT_CLUST_RESET, i, 0, 0)) 4873 ha->hdr[i].cluster_type &= ~CLUSTER_RESERVED; 4874 gdth_polling = FALSE; 4875 spin_unlock_irqrestore(&ha->smp_lock, flags); 4876 } 4877 } 4878 } else { 4879 /* raw devices */ 4880 spin_lock_irqsave(&ha->smp_lock, flags); 4881 for (i = 0; i < MAXID; ++i) 4882 ha->raw[BUS_L2P(ha,b)].io_cnt[i] = 0; 4883 gdth_polling = TRUE; 4884 while (gdth_test_busy(hanum)) 4885 gdth_delay(0); 4886 gdth_internal_cmd(hanum, SCSIRAWSERVICE, GDT_RESET_BUS, 4887 BUS_L2P(ha,b), 0, 0); 4888 gdth_polling = FALSE; 4889 spin_unlock_irqrestore(&ha->smp_lock, flags); 4890 } 4891 return SUCCESS; 4892} 4893 4894#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) 4895static int gdth_bios_param(struct scsi_device *sdev,struct block_device *bdev,sector_t cap,int *ip) 4896#else 4897static int gdth_bios_param(Disk *disk,kdev_t dev,int *ip) 4898#endif 4899{ 4900 unchar b, t; 4901 int hanum; 4902 gdth_ha_str *ha; 4903 struct scsi_device *sd; 4904 unsigned capacity; 4905 4906#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) 4907 sd = sdev; 4908 capacity = cap; 4909#else 4910 sd = disk->device; 4911 capacity = disk->capacity; 4912#endif 4913 hanum = NUMDATA(sd->host)->hanum; 4914 b = virt_ctr ? NUMDATA(sd->host)->busnum : sd->channel; 4915 t = sd->id; 4916 TRACE2(("gdth_bios_param() ha %d bus %d target %d\n", hanum, b, t)); 4917 ha = HADATA(gdth_ctr_tab[hanum]); 4918 4919 if (b != ha->virt_bus || ha->hdr[t].heads == 0) { 4920 /* raw device or host drive without mapping information */ 4921 TRACE2(("Evaluate mapping\n")); 4922 gdth_eval_mapping(capacity,&ip[2],&ip[0],&ip[1]); 4923 } else { 4924 ip[0] = ha->hdr[t].heads; 4925 ip[1] = ha->hdr[t].secs; 4926 ip[2] = capacity / ip[0] / ip[1]; 4927 } 4928 4929 TRACE2(("gdth_bios_param(): %d heads, %d secs, %d cyls\n", 4930 ip[0],ip[1],ip[2])); 4931 return 0; 4932} 4933 4934 4935static int gdth_queuecommand(Scsi_Cmnd *scp,void (*done)(Scsi_Cmnd *)) 4936{ 4937 int hanum; 4938 int priority; 4939 4940 TRACE(("gdth_queuecommand() cmd 0x%x\n", scp->cmnd[0])); 4941 4942 scp->scsi_done = (void *)done; 4943 scp->SCp.have_data_in = 1; 4944 scp->SCp.phase = -1; 4945 scp->SCp.sent_command = -1; 4946 scp->SCp.Status = GDTH_MAP_NONE; 4947 scp->SCp.buffer = (struct scatterlist *)NULL; 4948 4949 hanum = NUMDATA(scp->device->host)->hanum; 4950#ifdef GDTH_STATISTICS 4951 ++act_ios; 4952#endif 4953 4954 priority = DEFAULT_PRI; 4955 if (scp->done == gdth_scsi_done) 4956 priority = scp->SCp.this_residual; 4957 else 4958 gdth_update_timeout(hanum, scp, scp->timeout_per_command * 6); 4959 4960 gdth_putq( hanum, scp, priority ); 4961 gdth_next( hanum ); 4962 return 0; 4963} 4964 4965 4966static int gdth_open(struct inode *inode, struct file *filep) 4967{ 4968 gdth_ha_str *ha; 4969 int i; 4970 4971 for (i = 0; i < gdth_ctr_count; i++) { 4972 ha = HADATA(gdth_ctr_tab[i]); 4973 if (!ha->sdev) 4974 ha->sdev = scsi_get_host_dev(gdth_ctr_tab[i]); 4975 } 4976 4977 TRACE(("gdth_open()\n")); 4978 return 0; 4979} 4980 4981static int gdth_close(struct inode *inode, struct file *filep) 4982{ 4983 TRACE(("gdth_close()\n")); 4984 return 0; 4985} 4986 4987static int ioc_event(void __user *arg) 4988{ 4989 gdth_ioctl_event evt; 4990 gdth_ha_str *ha; 4991 ulong flags; 4992 4993 if (copy_from_user(&evt, arg, sizeof(gdth_ioctl_event)) || 4994 evt.ionode >= gdth_ctr_count) 4995 return -EFAULT; 4996 ha = HADATA(gdth_ctr_tab[evt.ionode]); 4997 4998 if (evt.erase == 0xff) { 4999 if (evt.event.event_source == ES_TEST) 5000 evt.event.event_data.size=sizeof(evt.event.event_data.eu.test); 5001 else if (evt.event.event_source == ES_DRIVER) 5002 evt.event.event_data.size=sizeof(evt.event.event_data.eu.driver); 5003 else if (evt.event.event_source == ES_SYNC) 5004 evt.event.event_data.size=sizeof(evt.event.event_data.eu.sync); 5005 else 5006 evt.event.event_data.size=sizeof(evt.event.event_data.eu.async); 5007 spin_lock_irqsave(&ha->smp_lock, flags); 5008 gdth_store_event(ha, evt.event.event_source, evt.event.event_idx, 5009 &evt.event.event_data); 5010 spin_unlock_irqrestore(&ha->smp_lock, flags); 5011 } else if (evt.erase == 0xfe) { 5012 gdth_clear_events(); 5013 } else if (evt.erase == 0) { 5014 evt.handle = gdth_read_event(ha, evt.handle, &evt.event); 5015 } else { 5016 gdth_readapp_event(ha, evt.erase, &evt.event); 5017 } 5018 if (copy_to_user(arg, &evt, sizeof(gdth_ioctl_event))) 5019 return -EFAULT; 5020 return 0; 5021} 5022 5023static int ioc_lockdrv(void __user *arg) 5024{ 5025 gdth_ioctl_lockdrv ldrv; 5026 unchar i, j; 5027 ulong flags; 5028 gdth_ha_str *ha; 5029 5030 if (copy_from_user(&ldrv, arg, sizeof(gdth_ioctl_lockdrv)) || 5031 ldrv.ionode >= gdth_ctr_count) 5032 return -EFAULT; 5033 ha = HADATA(gdth_ctr_tab[ldrv.ionode]); 5034 5035 for (i = 0; i < ldrv.drive_cnt && i < MAX_HDRIVES; ++i) { 5036 j = ldrv.drives[i]; 5037 if (j >= MAX_HDRIVES || !ha->hdr[j].present) 5038 continue; 5039 if (ldrv.lock) { 5040 spin_lock_irqsave(&ha->smp_lock, flags); 5041 ha->hdr[j].lock = 1; 5042 spin_unlock_irqrestore(&ha->smp_lock, flags); 5043 gdth_wait_completion(ldrv.ionode, ha->bus_cnt, j); 5044 gdth_stop_timeout(ldrv.ionode, ha->bus_cnt, j); 5045 } else { 5046 spin_lock_irqsave(&ha->smp_lock, flags); 5047 ha->hdr[j].lock = 0; 5048 spin_unlock_irqrestore(&ha->smp_lock, flags); 5049 gdth_start_timeout(ldrv.ionode, ha->bus_cnt, j); 5050 gdth_next(ldrv.ionode); 5051 } 5052 } 5053 return 0; 5054} 5055 5056static int ioc_resetdrv(void __user *arg, char *cmnd) 5057{ 5058 gdth_ioctl_reset res; 5059 gdth_cmd_str cmd; 5060 int hanum; 5061 gdth_ha_str *ha; 5062 int rval; 5063 5064 if (copy_from_user(&res, arg, sizeof(gdth_ioctl_reset)) || 5065 res.ionode >= gdth_ctr_count || res.number >= MAX_HDRIVES) 5066 return -EFAULT; 5067 hanum = res.ionode; 5068 ha = HADATA(gdth_ctr_tab[hanum]); 5069 5070 if (!ha->hdr[res.number].present) 5071 return 0; 5072 memset(&cmd, 0, sizeof(gdth_cmd_str)); 5073 cmd.Service = CACHESERVICE; 5074 cmd.OpCode = GDT_CLUST_RESET; 5075 if (ha->cache_feat & GDT_64BIT) 5076 cmd.u.cache64.DeviceNo = res.number; 5077 else 5078 cmd.u.cache.DeviceNo = res.number; 5079 5080 rval = __gdth_execute(ha->sdev, &cmd, cmnd, 30, NULL); 5081 if (rval < 0) 5082 return rval; 5083 res.status = rval; 5084 5085 if (copy_to_user(arg, &res, sizeof(gdth_ioctl_reset))) 5086 return -EFAULT; 5087 return 0; 5088} 5089 5090static int ioc_general(void __user *arg, char *cmnd) 5091{ 5092 gdth_ioctl_general gen; 5093 char *buf = NULL; 5094 ulong64 paddr; 5095 int hanum; 5096 gdth_ha_str *ha; 5097 int rval; 5098 5099 if (copy_from_user(&gen, arg, sizeof(gdth_ioctl_general)) || 5100 gen.ionode >= gdth_ctr_count) 5101 return -EFAULT; 5102 hanum = gen.ionode; 5103 ha = HADATA(gdth_ctr_tab[hanum]); 5104 if (gen.data_len + gen.sense_len != 0) { 5105 if (!(buf = gdth_ioctl_alloc(hanum, gen.data_len + gen.sense_len, 5106 FALSE, &paddr))) 5107 return -EFAULT; 5108 if (copy_from_user(buf, arg + sizeof(gdth_ioctl_general), 5109 gen.data_len + gen.sense_len)) { 5110 gdth_ioctl_free(hanum, gen.data_len+gen.sense_len, buf, paddr); 5111 return -EFAULT; 5112 } 5113 5114 if (gen.command.OpCode == GDT_IOCTL) { 5115 gen.command.u.ioctl.p_param = paddr; 5116 } else if (gen.command.Service == CACHESERVICE) { 5117 if (ha->cache_feat & GDT_64BIT) { 5118 /* copy elements from 32-bit IOCTL structure */ 5119 gen.command.u.cache64.BlockCnt = gen.command.u.cache.BlockCnt; 5120 gen.command.u.cache64.BlockNo = gen.command.u.cache.BlockNo; 5121 gen.command.u.cache64.DeviceNo = gen.command.u.cache.DeviceNo; 5122 /* addresses */ 5123 if (ha->cache_feat & SCATTER_GATHER) { 5124 gen.command.u.cache64.DestAddr = (ulong64)-1; 5125 gen.command.u.cache64.sg_canz = 1; 5126 gen.command.u.cache64.sg_lst[0].sg_ptr = paddr; 5127 gen.command.u.cache64.sg_lst[0].sg_len = gen.data_len; 5128 gen.command.u.cache64.sg_lst[1].sg_len = 0; 5129 } else { 5130 gen.command.u.cache64.DestAddr = paddr; 5131 gen.command.u.cache64.sg_canz = 0; 5132 } 5133 } else { 5134 if (ha->cache_feat & SCATTER_GATHER) { 5135 gen.command.u.cache.DestAddr = 0xffffffff; 5136 gen.command.u.cache.sg_canz = 1; 5137 gen.command.u.cache.sg_lst[0].sg_ptr = (ulong32)paddr; 5138 gen.command.u.cache.sg_lst[0].sg_len = gen.data_len; 5139 gen.command.u.cache.sg_lst[1].sg_len = 0; 5140 } else { 5141 gen.command.u.cache.DestAddr = paddr; 5142 gen.command.u.cache.sg_canz = 0; 5143 } 5144 } 5145 } else if (gen.command.Service == SCSIRAWSERVICE) { 5146 if (ha->raw_feat & GDT_64BIT) { 5147 /* copy elements from 32-bit IOCTL structure */ 5148 char cmd[16]; 5149 gen.command.u.raw64.sense_len = gen.command.u.raw.sense_len; 5150 gen.command.u.raw64.bus = gen.command.u.raw.bus; 5151 gen.command.u.raw64.lun = gen.command.u.raw.lun; 5152 gen.command.u.raw64.target = gen.command.u.raw.target; 5153 memcpy(cmd, gen.command.u.raw.cmd, 16); 5154 memcpy(gen.command.u.raw64.cmd, cmd, 16); 5155 gen.command.u.raw64.clen = gen.command.u.raw.clen; 5156 gen.command.u.raw64.sdlen = gen.command.u.raw.sdlen; 5157 gen.command.u.raw64.direction = gen.command.u.raw.direction; 5158 /* addresses */ 5159 if (ha->raw_feat & SCATTER_GATHER) { 5160 gen.command.u.raw64.sdata = (ulong64)-1; 5161 gen.command.u.raw64.sg_ranz = 1; 5162 gen.command.u.raw64.sg_lst[0].sg_ptr = paddr; 5163 gen.command.u.raw64.sg_lst[0].sg_len = gen.data_len; 5164 gen.command.u.raw64.sg_lst[1].sg_len = 0; 5165 } else { 5166 gen.command.u.raw64.sdata = paddr; 5167 gen.command.u.raw64.sg_ranz = 0; 5168 } 5169 gen.command.u.raw64.sense_data = paddr + gen.data_len; 5170 } else { 5171 if (ha->raw_feat & SCATTER_GATHER) { 5172 gen.command.u.raw.sdata = 0xffffffff; 5173 gen.command.u.raw.sg_ranz = 1; 5174 gen.command.u.raw.sg_lst[0].sg_ptr = (ulong32)paddr; 5175 gen.command.u.raw.sg_lst[0].sg_len = gen.data_len; 5176 gen.command.u.raw.sg_lst[1].sg_len = 0; 5177 } else { 5178 gen.command.u.raw.sdata = paddr; 5179 gen.command.u.raw.sg_ranz = 0; 5180 } 5181 gen.command.u.raw.sense_data = (ulong32)paddr + gen.data_len; 5182 } 5183 } else { 5184 gdth_ioctl_free(hanum, gen.data_len+gen.sense_len, buf, paddr); 5185 return -EFAULT; 5186 } 5187 } 5188 5189 rval = __gdth_execute(ha->sdev, &gen.command, cmnd, gen.timeout, &gen.info); 5190 if (rval < 0) 5191 return rval; 5192 gen.status = rval; 5193 5194 if (copy_to_user(arg + sizeof(gdth_ioctl_general), buf, 5195 gen.data_len + gen.sense_len)) { 5196 gdth_ioctl_free(hanum, gen.data_len+gen.sense_len, buf, paddr); 5197 return -EFAULT; 5198 } 5199 if (copy_to_user(arg, &gen, 5200 sizeof(gdth_ioctl_general) - sizeof(gdth_cmd_str))) { 5201 gdth_ioctl_free(hanum, gen.data_len+gen.sense_len, buf, paddr); 5202 return -EFAULT; 5203 } 5204 gdth_ioctl_free(hanum, gen.data_len+gen.sense_len, buf, paddr); 5205 return 0; 5206} 5207 5208static int ioc_hdrlist(void __user *arg, char *cmnd) 5209{ 5210 gdth_ioctl_rescan *rsc; 5211 gdth_cmd_str *cmd; 5212 gdth_ha_str *ha; 5213 unchar i; 5214 int hanum, rc = -ENOMEM; 5215 u32 cluster_type = 0; 5216 5217 rsc = kmalloc(sizeof(*rsc), GFP_KERNEL); 5218 cmd = kmalloc(sizeof(*cmd), GFP_KERNEL); 5219 if (!rsc || !cmd) 5220 goto free_fail; 5221 5222 if (copy_from_user(rsc, arg, sizeof(gdth_ioctl_rescan)) || 5223 rsc->ionode >= gdth_ctr_count) { 5224 rc = -EFAULT; 5225 goto free_fail; 5226 } 5227 hanum = rsc->ionode; 5228 ha = HADATA(gdth_ctr_tab[hanum]); 5229 memset(cmd, 0, sizeof(gdth_cmd_str)); 5230 5231 for (i = 0; i < MAX_HDRIVES; ++i) { 5232 if (!ha->hdr[i].present) { 5233 rsc->hdr_list[i].bus = 0xff; 5234 continue; 5235 } 5236 rsc->hdr_list[i].bus = ha->virt_bus; 5237 rsc->hdr_list[i].target = i; 5238 rsc->hdr_list[i].lun = 0; 5239 rsc->hdr_list[i].cluster_type = ha->hdr[i].cluster_type; 5240 if (ha->hdr[i].cluster_type & CLUSTER_DRIVE) { 5241 cmd->Service = CACHESERVICE; 5242 cmd->OpCode = GDT_CLUST_INFO; 5243 if (ha->cache_feat & GDT_64BIT) 5244 cmd->u.cache64.DeviceNo = i; 5245 else 5246 cmd->u.cache.DeviceNo = i; 5247 if (__gdth_execute(ha->sdev, cmd, cmnd, 30, &cluster_type) == S_OK) 5248 rsc->hdr_list[i].cluster_type = cluster_type; 5249 } 5250 } 5251 5252 if (copy_to_user(arg, rsc, sizeof(gdth_ioctl_rescan))) 5253 rc = -EFAULT; 5254 else 5255 rc = 0; 5256 5257free_fail: 5258 kfree(rsc); 5259 kfree(cmd); 5260 return rc; 5261} 5262 5263static int ioc_rescan(void __user *arg, char *cmnd) 5264{ 5265 gdth_ioctl_rescan *rsc; 5266 gdth_cmd_str *cmd; 5267 ushort i, status, hdr_cnt; 5268 ulong32 info; 5269 int hanum, cyls, hds, secs; 5270 int rc = -ENOMEM; 5271 ulong flags; 5272 gdth_ha_str *ha; 5273 5274 rsc = kmalloc(sizeof(*rsc), GFP_KERNEL); 5275 cmd = kmalloc(sizeof(*cmd), GFP_KERNEL); 5276 if (!cmd || !rsc) 5277 goto free_fail; 5278 5279 if (copy_from_user(rsc, arg, sizeof(gdth_ioctl_rescan)) || 5280 rsc->ionode >= gdth_ctr_count) { 5281 rc = -EFAULT; 5282 goto free_fail; 5283 } 5284 hanum = rsc->ionode; 5285 ha = HADATA(gdth_ctr_tab[hanum]); 5286 memset(cmd, 0, sizeof(gdth_cmd_str)); 5287 5288 if (rsc->flag == 0) { 5289 /* old method: re-init. cache service */ 5290 cmd->Service = CACHESERVICE; 5291 if (ha->cache_feat & GDT_64BIT) { 5292 cmd->OpCode = GDT_X_INIT_HOST; 5293 cmd->u.cache64.DeviceNo = LINUX_OS; 5294 } else { 5295 cmd->OpCode = GDT_INIT; 5296 cmd->u.cache.DeviceNo = LINUX_OS; 5297 } 5298 5299 status = __gdth_execute(ha->sdev, cmd, cmnd, 30, &info); 5300 i = 0; 5301 hdr_cnt = (status == S_OK ? (ushort)info : 0); 5302 } else { 5303 i = rsc->hdr_no; 5304 hdr_cnt = i + 1; 5305 } 5306 5307 for (; i < hdr_cnt && i < MAX_HDRIVES; ++i) { 5308 cmd->Service = CACHESERVICE; 5309 cmd->OpCode = GDT_INFO; 5310 if (ha->cache_feat & GDT_64BIT) 5311 cmd->u.cache64.DeviceNo = i; 5312 else 5313 cmd->u.cache.DeviceNo = i; 5314 5315 status = __gdth_execute(ha->sdev, cmd, cmnd, 30, &info); 5316 5317 spin_lock_irqsave(&ha->smp_lock, flags); 5318 rsc->hdr_list[i].bus = ha->virt_bus; 5319 rsc->hdr_list[i].target = i; 5320 rsc->hdr_list[i].lun = 0; 5321 if (status != S_OK) { 5322 ha->hdr[i].present = FALSE; 5323 } else { 5324 ha->hdr[i].present = TRUE; 5325 ha->hdr[i].size = info; 5326 /* evaluate mapping */ 5327 ha->hdr[i].size &= ~SECS32; 5328 gdth_eval_mapping(ha->hdr[i].size,&cyls,&hds,&secs); 5329 ha->hdr[i].heads = hds; 5330 ha->hdr[i].secs = secs; 5331 /* round size */ 5332 ha->hdr[i].size = cyls * hds * secs; 5333 } 5334 spin_unlock_irqrestore(&ha->smp_lock, flags); 5335 if (status != S_OK) 5336 continue; 5337 5338 /* extended info, if GDT_64BIT, for drives > 2 TB */ 5339 /* but we need ha->info2, not yet stored in scp->SCp */ 5340 5341 /* devtype, cluster info, R/W attribs */ 5342 cmd->Service = CACHESERVICE; 5343 cmd->OpCode = GDT_DEVTYPE; 5344 if (ha->cache_feat & GDT_64BIT) 5345 cmd->u.cache64.DeviceNo = i; 5346 else 5347 cmd->u.cache.DeviceNo = i; 5348 5349 status = __gdth_execute(ha->sdev, cmd, cmnd, 30, &info); 5350 5351 spin_lock_irqsave(&ha->smp_lock, flags); 5352 ha->hdr[i].devtype = (status == S_OK ? (ushort)info : 0); 5353 spin_unlock_irqrestore(&ha->smp_lock, flags); 5354 5355 cmd->Service = CACHESERVICE; 5356 cmd->OpCode = GDT_CLUST_INFO; 5357 if (ha->cache_feat & GDT_64BIT) 5358 cmd->u.cache64.DeviceNo = i; 5359 else 5360 cmd->u.cache.DeviceNo = i; 5361 5362 status = __gdth_execute(ha->sdev, cmd, cmnd, 30, &info); 5363 5364 spin_lock_irqsave(&ha->smp_lock, flags); 5365 ha->hdr[i].cluster_type = 5366 ((status == S_OK && !shared_access) ? (ushort)info : 0); 5367 spin_unlock_irqrestore(&ha->smp_lock, flags); 5368 rsc->hdr_list[i].cluster_type = ha->hdr[i].cluster_type; 5369 5370 cmd->Service = CACHESERVICE; 5371 cmd->OpCode = GDT_RW_ATTRIBS; 5372 if (ha->cache_feat & GDT_64BIT) 5373 cmd->u.cache64.DeviceNo = i; 5374 else 5375 cmd->u.cache.DeviceNo = i; 5376 5377 status = __gdth_execute(ha->sdev, cmd, cmnd, 30, &info); 5378 5379 spin_lock_irqsave(&ha->smp_lock, flags); 5380 ha->hdr[i].rw_attribs = (status == S_OK ? (ushort)info : 0); 5381 spin_unlock_irqrestore(&ha->smp_lock, flags); 5382 } 5383 5384 if (copy_to_user(arg, rsc, sizeof(gdth_ioctl_rescan))) 5385 rc = -EFAULT; 5386 else 5387 rc = 0; 5388 5389free_fail: 5390 kfree(rsc); 5391 kfree(cmd); 5392 return rc; 5393} 5394 5395static int gdth_ioctl(struct inode *inode, struct file *filep, 5396 unsigned int cmd, unsigned long arg) 5397{ 5398 gdth_ha_str *ha; 5399 Scsi_Cmnd *scp; 5400 ulong flags; 5401 char cmnd[MAX_COMMAND_SIZE]; 5402 void __user *argp = (void __user *)arg; 5403 5404 memset(cmnd, 0xff, 12); 5405 5406 TRACE(("gdth_ioctl() cmd 0x%x\n", cmd)); 5407 5408 switch (cmd) { 5409 case GDTIOCTL_CTRCNT: 5410 { 5411 int cnt = gdth_ctr_count; 5412 if (put_user(cnt, (int __user *)argp)) 5413 return -EFAULT; 5414 break; 5415 } 5416 5417 case GDTIOCTL_DRVERS: 5418 { 5419 int ver = (GDTH_VERSION<<8) | GDTH_SUBVERSION; 5420 if (put_user(ver, (int __user *)argp)) 5421 return -EFAULT; 5422 break; 5423 } 5424 5425 case GDTIOCTL_OSVERS: 5426 { 5427 gdth_ioctl_osvers osv; 5428 5429 osv.version = (unchar)(LINUX_VERSION_CODE >> 16); 5430 osv.subversion = (unchar)(LINUX_VERSION_CODE >> 8); 5431 osv.revision = (ushort)(LINUX_VERSION_CODE & 0xff); 5432 if (copy_to_user(argp, &osv, sizeof(gdth_ioctl_osvers))) 5433 return -EFAULT; 5434 break; 5435 } 5436 5437 case GDTIOCTL_CTRTYPE: 5438 { 5439 gdth_ioctl_ctrtype ctrt; 5440 5441 if (copy_from_user(&ctrt, argp, sizeof(gdth_ioctl_ctrtype)) || 5442 ctrt.ionode >= gdth_ctr_count) 5443 return -EFAULT; 5444 ha = HADATA(gdth_ctr_tab[ctrt.ionode]); 5445 if (ha->type == GDT_ISA || ha->type == GDT_EISA) { 5446 ctrt.type = (unchar)((ha->stype>>20) - 0x10); 5447 } else { 5448 if (ha->type != GDT_PCIMPR) { 5449 ctrt.type = (unchar)((ha->stype<<4) + 6); 5450 } else { 5451 ctrt.type = 5452 (ha->oem_id == OEM_ID_INTEL ? 0xfd : 0xfe); 5453 if (ha->stype >= 0x300) 5454 ctrt.ext_type = 0x6000 | ha->subdevice_id; 5455 else 5456 ctrt.ext_type = 0x6000 | ha->stype; 5457 } 5458 ctrt.device_id = ha->stype; 5459 ctrt.sub_device_id = ha->subdevice_id; 5460 } 5461 ctrt.info = ha->brd_phys; 5462 ctrt.oem_id = ha->oem_id; 5463 if (copy_to_user(argp, &ctrt, sizeof(gdth_ioctl_ctrtype))) 5464 return -EFAULT; 5465 break; 5466 } 5467 5468 case GDTIOCTL_GENERAL: 5469 return ioc_general(argp, cmnd); 5470 5471 case GDTIOCTL_EVENT: 5472 return ioc_event(argp); 5473 5474 case GDTIOCTL_LOCKDRV: 5475 return ioc_lockdrv(argp); 5476 5477 case GDTIOCTL_LOCKCHN: 5478 { 5479 gdth_ioctl_lockchn lchn; 5480 unchar i, j; 5481 5482 if (copy_from_user(&lchn, argp, sizeof(gdth_ioctl_lockchn)) || 5483 lchn.ionode >= gdth_ctr_count) 5484 return -EFAULT; 5485 ha = HADATA(gdth_ctr_tab[lchn.ionode]); 5486 5487 i = lchn.channel; 5488 if (i < ha->bus_cnt) { 5489 if (lchn.lock) { 5490 spin_lock_irqsave(&ha->smp_lock, flags); 5491 ha->raw[i].lock = 1; 5492 spin_unlock_irqrestore(&ha->smp_lock, flags); 5493 for (j = 0; j < ha->tid_cnt; ++j) { 5494 gdth_wait_completion(lchn.ionode, i, j); 5495 gdth_stop_timeout(lchn.ionode, i, j); 5496 } 5497 } else { 5498 spin_lock_irqsave(&ha->smp_lock, flags); 5499 ha->raw[i].lock = 0; 5500 spin_unlock_irqrestore(&ha->smp_lock, flags); 5501 for (j = 0; j < ha->tid_cnt; ++j) { 5502 gdth_start_timeout(lchn.ionode, i, j); 5503 gdth_next(lchn.ionode); 5504 } 5505 } 5506 } 5507 break; 5508 } 5509 5510 case GDTIOCTL_RESCAN: 5511 return ioc_rescan(argp, cmnd); 5512 5513 case GDTIOCTL_HDRLIST: 5514 return ioc_hdrlist(argp, cmnd); 5515 5516 case GDTIOCTL_RESET_BUS: 5517 { 5518 gdth_ioctl_reset res; 5519 int hanum, rval; 5520 5521 if (copy_from_user(&res, argp, sizeof(gdth_ioctl_reset)) || 5522 res.ionode >= gdth_ctr_count) 5523 return -EFAULT; 5524 hanum = res.ionode; 5525 ha = HADATA(gdth_ctr_tab[hanum]); 5526 5527#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) 5528 scp = kmalloc(sizeof(*scp), GFP_KERNEL); 5529 if (!scp) 5530 return -ENOMEM; 5531 memset(scp, 0, sizeof(*scp)); 5532 scp->device = ha->sdev; 5533 scp->cmd_len = 12; 5534 scp->use_sg = 0; 5535 scp->device->channel = virt_ctr ? 0 : res.number; 5536 rval = gdth_eh_bus_reset(scp); 5537 res.status = (rval == SUCCESS ? S_OK : S_GENERR); 5538 kfree(scp); 5539#else 5540 scp = scsi_allocate_device(ha->sdev, 1, FALSE); 5541 if (!scp) 5542 return -ENOMEM; 5543 scp->cmd_len = 12; 5544 scp->use_sg = 0; 5545 scp->channel = virt_ctr ? 0 : res.number; 5546 rval = gdth_eh_bus_reset(scp); 5547 res.status = (rval == SUCCESS ? S_OK : S_GENERR); 5548 scsi_release_command(scp); 5549#endif 5550 if (copy_to_user(argp, &res, sizeof(gdth_ioctl_reset))) 5551 return -EFAULT; 5552 break; 5553 } 5554 5555 case GDTIOCTL_RESET_DRV: 5556 return ioc_resetdrv(argp, cmnd); 5557 5558 default: 5559 break; 5560 } 5561 return 0; 5562} 5563 5564 5565/* flush routine */ 5566static void gdth_flush(int hanum) 5567{ 5568 int i; 5569 gdth_ha_str *ha; 5570 gdth_cmd_str gdtcmd; 5571 char cmnd[MAX_COMMAND_SIZE]; 5572 memset(cmnd, 0xff, MAX_COMMAND_SIZE); 5573 5574 TRACE2(("gdth_flush() hanum %d\n",hanum)); 5575 ha = HADATA(gdth_ctr_tab[hanum]); 5576 5577 for (i = 0; i < MAX_HDRIVES; ++i) { 5578 if (ha->hdr[i].present) { 5579 gdtcmd.BoardNode = LOCALBOARD; 5580 gdtcmd.Service = CACHESERVICE; 5581 gdtcmd.OpCode = GDT_FLUSH; 5582 if (ha->cache_feat & GDT_64BIT) { 5583 gdtcmd.u.cache64.DeviceNo = i; 5584 gdtcmd.u.cache64.BlockNo = 1; 5585 gdtcmd.u.cache64.sg_canz = 0; 5586 } else { 5587 gdtcmd.u.cache.DeviceNo = i; 5588 gdtcmd.u.cache.BlockNo = 1; 5589 gdtcmd.u.cache.sg_canz = 0; 5590 } 5591 TRACE2(("gdth_flush(): flush ha %d drive %d\n", hanum, i)); 5592 5593 gdth_execute(gdth_ctr_tab[hanum], &gdtcmd, cmnd, 30, NULL); 5594 } 5595 } 5596} 5597 5598/* shutdown routine */ 5599static int gdth_halt(struct notifier_block *nb, ulong event, void *buf) 5600{ 5601 int hanum; 5602#ifndef __alpha__ 5603 gdth_cmd_str gdtcmd; 5604 char cmnd[MAX_COMMAND_SIZE]; 5605#endif 5606 5607 if (notifier_disabled) 5608 return NOTIFY_OK; 5609 5610 TRACE2(("gdth_halt() event %d\n",(int)event)); 5611 if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF) 5612 return NOTIFY_DONE; 5613 5614 notifier_disabled = 1; 5615 printk("GDT-HA: Flushing all host drives .. "); 5616 for (hanum = 0; hanum < gdth_ctr_count; ++hanum) { 5617 gdth_flush(hanum); 5618 5619#ifndef __alpha__ 5620 /* controller reset */ 5621 memset(cmnd, 0xff, MAX_COMMAND_SIZE); 5622 gdtcmd.BoardNode = LOCALBOARD; 5623 gdtcmd.Service = CACHESERVICE; 5624 gdtcmd.OpCode = GDT_RESET; 5625 TRACE2(("gdth_halt(): reset controller %d\n", hanum)); 5626 gdth_execute(gdth_ctr_tab[hanum], &gdtcmd, cmnd, 10, NULL); 5627#endif 5628 } 5629 printk("Done.\n"); 5630 5631#ifdef GDTH_STATISTICS 5632 del_timer(&gdth_timer); 5633#endif 5634 return NOTIFY_OK; 5635} 5636 5637#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) 5638/* configure lun */ 5639static int gdth_slave_configure(struct scsi_device *sdev) 5640{ 5641 scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun); 5642 sdev->skip_ms_page_3f = 1; 5643 sdev->skip_ms_page_8 = 1; 5644 return 0; 5645} 5646#endif 5647 5648#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) 5649static struct scsi_host_template driver_template = { 5650#else 5651static Scsi_Host_Template driver_template = { 5652#endif 5653 .proc_name = "gdth", 5654 .proc_info = gdth_proc_info, 5655 .name = "GDT SCSI Disk Array Controller", 5656 .detect = gdth_detect, 5657 .release = gdth_release, 5658 .info = gdth_info, 5659 .queuecommand = gdth_queuecommand, 5660 .eh_bus_reset_handler = gdth_eh_bus_reset, 5661 .bios_param = gdth_bios_param, 5662 .can_queue = GDTH_MAXCMDS, 5663#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) 5664 .slave_configure = gdth_slave_configure, 5665#endif 5666 .this_id = -1, 5667 .sg_tablesize = GDTH_MAXSG, 5668 .cmd_per_lun = GDTH_MAXC_P_L, 5669 .unchecked_isa_dma = 1, 5670 .use_clustering = ENABLE_CLUSTERING, 5671#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) 5672 .use_new_eh_code = 1, 5673#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,20) 5674 .highmem_io = 1, 5675#endif 5676#endif 5677}; 5678 5679#include "scsi_module.c" 5680#ifndef MODULE 5681__setup("gdth=", option_setup); 5682#endif 5683