nv_sata.c revision 10336:d12a11a8c2b8
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22/*
23 * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24 * Use is subject to license terms.
25 */
26
27/*
28 *
29 * nv_sata is a combo SATA HBA driver for ck804/mcp5x (mcp5x = mcp55/mcp51)
30 * based chipsets.
31 *
32 * NCQ
33 * ---
34 *
35 * A portion of the NCQ is in place, but is incomplete.  NCQ is disabled
36 * and is likely to be revisited in the future.
37 *
38 *
39 * Power Management
40 * ----------------
41 *
42 * Normally power management would be responsible for ensuring the device
43 * is quiescent and then changing power states to the device, such as
44 * powering down parts or all of the device.  mcp5x/ck804 is unique in
45 * that it is only available as part of a larger southbridge chipset, so
46 * removing power to the device isn't possible.  Switches to control
47 * power management states D0/D3 in the PCI configuration space appear to
48 * be supported but changes to these states are apparently are ignored.
49 * The only further PM that the driver _could_ do is shut down the PHY,
50 * but in order to deliver the first rev of the driver sooner than later,
51 * that will be deferred until some future phase.
52 *
53 * Since the driver currently will not directly change any power state to
54 * the device, no power() entry point will be required.  However, it is
55 * possible that in ACPI power state S3, aka suspend to RAM, that power
56 * can be removed to the device, and the driver cannot rely on BIOS to
57 * have reset any state.  For the time being, there is no known
58 * non-default configurations that need to be programmed.  This judgement
59 * is based on the port of the legacy ata driver not having any such
60 * functionality and based on conversations with the PM team.  If such a
61 * restoration is later deemed necessary it can be incorporated into the
62 * DDI_RESUME processing.
63 *
64 */
65
66#include <sys/scsi/scsi.h>
67#include <sys/pci.h>
68#include <sys/byteorder.h>
69#include <sys/sunddi.h>
70#include <sys/sata/sata_hba.h>
71#ifdef SGPIO_SUPPORT
72#include <sys/sata/adapters/nv_sata/nv_sgpio.h>
73#include <sys/devctl.h>
74#include <sys/sdt.h>
75#endif
76#include <sys/sata/adapters/nv_sata/nv_sata.h>
77#include <sys/disp.h>
78#include <sys/note.h>
79#include <sys/promif.h>
80
81
82/*
83 * Function prototypes for driver entry points
84 */
85static int nv_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
86static int nv_detach(dev_info_t *dip, ddi_detach_cmd_t cmd);
87static int nv_quiesce(dev_info_t *dip);
88static int nv_getinfo(dev_info_t *dip, ddi_info_cmd_t infocmd,
89    void *arg, void **result);
90
91/*
92 * Function prototypes for entry points from sata service module
93 * These functions are distinguished from other local functions
94 * by the prefix "nv_sata_"
95 */
96static int nv_sata_start(dev_info_t *dip, sata_pkt_t *spkt);
97static int nv_sata_abort(dev_info_t *dip, sata_pkt_t *spkt, int);
98static int nv_sata_reset(dev_info_t *dip, sata_device_t *sd);
99static int nv_sata_activate(dev_info_t *dip, sata_device_t *sd);
100static int nv_sata_deactivate(dev_info_t *dip, sata_device_t *sd);
101
102/*
103 * Local function prototypes
104 */
105static uint_t mcp5x_intr(caddr_t arg1, caddr_t arg2);
106static uint_t ck804_intr(caddr_t arg1, caddr_t arg2);
107static int nv_add_legacy_intrs(nv_ctl_t *nvc);
108#ifdef NV_MSI_SUPPORTED
109static int nv_add_msi_intrs(nv_ctl_t *nvc);
110#endif
111static void nv_rem_intrs(nv_ctl_t *nvc);
112static int nv_start_common(nv_port_t *nvp, sata_pkt_t *spkt);
113static int nv_start_nodata(nv_port_t *nvp, int slot);
114static void nv_intr_nodata(nv_port_t *nvp, nv_slot_t *spkt);
115static int nv_start_pio_in(nv_port_t *nvp, int slot);
116static int nv_start_pio_out(nv_port_t *nvp, int slot);
117static void nv_intr_pio_in(nv_port_t *nvp, nv_slot_t *spkt);
118static void nv_intr_pio_out(nv_port_t *nvp, nv_slot_t *spkt);
119static int nv_start_pkt_pio(nv_port_t *nvp, int slot);
120static void nv_intr_pkt_pio(nv_port_t *nvp, nv_slot_t *nv_slotp);
121static int nv_start_dma(nv_port_t *nvp, int slot);
122static void nv_intr_dma(nv_port_t *nvp, struct nv_slot *spkt);
123static void nv_log(uint_t flag, nv_ctl_t *nvc, nv_port_t *nvp, char *fmt, ...);
124static void nv_uninit_ctl(nv_ctl_t *nvc);
125static void mcp5x_reg_init(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle);
126static void ck804_reg_init(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle);
127static void nv_uninit_port(nv_port_t *nvp);
128static int nv_init_port(nv_port_t *nvp);
129static int nv_init_ctl(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle);
130static int mcp5x_packet_complete_intr(nv_ctl_t *nvc, nv_port_t *nvp);
131#ifdef NCQ
132static int mcp5x_dma_setup_intr(nv_ctl_t *nvc, nv_port_t *nvp);
133#endif
134static void nv_start_dma_engine(nv_port_t *nvp, int slot);
135static void nv_port_state_change(nv_port_t *nvp, int event, uint8_t addr_type,
136    int state);
137static boolean_t nv_check_link(uint32_t sstatus);
138static void nv_common_reg_init(nv_ctl_t *nvc);
139static void ck804_intr_process(nv_ctl_t *nvc, uint8_t intr_status);
140static void nv_reset(nv_port_t *nvp);
141static void nv_complete_io(nv_port_t *nvp,  sata_pkt_t *spkt, int slot);
142static void nv_timeout(void *);
143static int nv_poll_wait(nv_port_t *nvp, sata_pkt_t *spkt);
144static void nv_cmn_err(int ce, nv_ctl_t *nvc, nv_port_t *nvp, char *fmt, ...);
145static void nv_read_signature(nv_port_t *nvp);
146static void mcp5x_set_intr(nv_port_t *nvp, int flag);
147static void ck804_set_intr(nv_port_t *nvp, int flag);
148static void nv_resume(nv_port_t *nvp);
149static void nv_suspend(nv_port_t *nvp);
150static int nv_start_sync(nv_port_t *nvp, sata_pkt_t *spkt);
151static int nv_abort_active(nv_port_t *nvp, sata_pkt_t *spkt, int abort_reason);
152static void nv_copy_registers(nv_port_t *nvp, sata_device_t *sd,
153    sata_pkt_t *spkt);
154static void nv_report_add_remove(nv_port_t *nvp, int flags);
155static int nv_start_async(nv_port_t *nvp, sata_pkt_t *spkt);
156static int nv_wait3(nv_port_t *nvp, uchar_t onbits1, uchar_t offbits1,
157    uchar_t failure_onbits2, uchar_t failure_offbits2,
158    uchar_t failure_onbits3, uchar_t failure_offbits3,
159    uint_t timeout_usec, int type_wait);
160static int nv_wait(nv_port_t *nvp, uchar_t onbits, uchar_t offbits,
161    uint_t timeout_usec, int type_wait);
162static int nv_start_rqsense_pio(nv_port_t *nvp, nv_slot_t *nv_slotp);
163
164#ifdef SGPIO_SUPPORT
165static int nv_open(dev_t *devp, int flag, int otyp, cred_t *credp);
166static int nv_close(dev_t dev, int flag, int otyp, cred_t *credp);
167static int nv_ioctl(dev_t dev, int cmd, intptr_t arg, int mode,
168    cred_t *credp, int *rvalp);
169
170static void nv_sgp_led_init(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle);
171static int nv_sgp_detect(ddi_acc_handle_t pci_conf_handle, uint16_t *csrpp,
172    uint32_t *cbpp);
173static int nv_sgp_init(nv_ctl_t *nvc);
174static void nv_sgp_reset(nv_ctl_t *nvc);
175static int nv_sgp_init_cmd(nv_ctl_t *nvc);
176static int nv_sgp_check_set_cmn(nv_ctl_t *nvc);
177static int nv_sgp_csr_read(nv_ctl_t *nvc);
178static void nv_sgp_csr_write(nv_ctl_t *nvc, uint32_t val);
179static int nv_sgp_write_data(nv_ctl_t *nvc);
180static void nv_sgp_activity_led_ctl(void *arg);
181static void nv_sgp_drive_connect(nv_ctl_t *nvc, int drive);
182static void nv_sgp_drive_disconnect(nv_ctl_t *nvc, int drive);
183static void nv_sgp_drive_active(nv_ctl_t *nvc, int drive);
184static void nv_sgp_locate(nv_ctl_t *nvc, int drive, int value);
185static void nv_sgp_error(nv_ctl_t *nvc, int drive, int value);
186static void nv_sgp_cleanup(nv_ctl_t *nvc);
187#endif
188
189
190/*
191 * DMA attributes for the data buffer for x86.  dma_attr_burstsizes is unused.
192 * Verify if needed if ported to other ISA.
193 */
194static ddi_dma_attr_t buffer_dma_attr = {
195	DMA_ATTR_V0,		/* dma_attr_version */
196	0,			/* dma_attr_addr_lo: lowest bus address */
197	0xffffffffull,		/* dma_attr_addr_hi: */
198	NV_BM_64K_BOUNDARY - 1,	/* dma_attr_count_max i.e for one cookie */
199	4,			/* dma_attr_align */
200	1,			/* dma_attr_burstsizes. */
201	1,			/* dma_attr_minxfer */
202	0xffffffffull,		/* dma_attr_maxxfer including all cookies */
203	0xffffffffull,		/* dma_attr_seg */
204	NV_DMA_NSEGS,		/* dma_attr_sgllen */
205	512,			/* dma_attr_granular */
206	0,			/* dma_attr_flags */
207};
208static ddi_dma_attr_t buffer_dma_40bit_attr = {
209	DMA_ATTR_V0,		/* dma_attr_version */
210	0,			/* dma_attr_addr_lo: lowest bus address */
211	0xffffffffffull,	/* dma_attr_addr_hi: */
212	NV_BM_64K_BOUNDARY - 1,	/* dma_attr_count_max i.e for one cookie */
213	4,			/* dma_attr_align */
214	1,			/* dma_attr_burstsizes. */
215	1,			/* dma_attr_minxfer */
216	0xffffffffull,		/* dma_attr_maxxfer including all cookies */
217	0xffffffffull,		/* dma_attr_seg */
218	NV_DMA_NSEGS,		/* dma_attr_sgllen */
219	512,			/* dma_attr_granular */
220	0,			/* dma_attr_flags */
221};
222
223
224/*
225 * DMA attributes for PRD tables
226 */
227ddi_dma_attr_t nv_prd_dma_attr = {
228	DMA_ATTR_V0,		/* dma_attr_version */
229	0,			/* dma_attr_addr_lo */
230	0xffffffffull,		/* dma_attr_addr_hi */
231	NV_BM_64K_BOUNDARY - 1,	/* dma_attr_count_max */
232	4,			/* dma_attr_align */
233	1,			/* dma_attr_burstsizes */
234	1,			/* dma_attr_minxfer */
235	NV_BM_64K_BOUNDARY,	/* dma_attr_maxxfer */
236	NV_BM_64K_BOUNDARY - 1,	/* dma_attr_seg */
237	1,			/* dma_attr_sgllen */
238	1,			/* dma_attr_granular */
239	0			/* dma_attr_flags */
240};
241
242/*
243 * Device access attributes
244 */
245static ddi_device_acc_attr_t accattr = {
246    DDI_DEVICE_ATTR_V0,
247    DDI_STRUCTURE_LE_ACC,
248    DDI_STRICTORDER_ACC
249};
250
251
252#ifdef SGPIO_SUPPORT
253static struct cb_ops nv_cb_ops = {
254	nv_open,		/* open */
255	nv_close,		/* close */
256	nodev,			/* strategy (block) */
257	nodev,			/* print (block) */
258	nodev,			/* dump (block) */
259	nodev,			/* read */
260	nodev,			/* write */
261	nv_ioctl,		/* ioctl */
262	nodev,			/* devmap */
263	nodev,			/* mmap */
264	nodev,			/* segmap */
265	nochpoll,		/* chpoll */
266	ddi_prop_op,		/* prop_op */
267	NULL,			/* streams */
268	D_NEW | D_MP |
269	D_64BIT | D_HOTPLUG,	/* flags */
270	CB_REV			/* rev */
271};
272#endif  /* SGPIO_SUPPORT */
273
274
275static struct dev_ops nv_dev_ops = {
276	DEVO_REV,		/* devo_rev */
277	0,			/* refcnt  */
278	nv_getinfo,		/* info */
279	nulldev,		/* identify */
280	nulldev,		/* probe */
281	nv_attach,		/* attach */
282	nv_detach,		/* detach */
283	nodev,			/* no reset */
284#ifdef SGPIO_SUPPORT
285	&nv_cb_ops,		/* driver operations */
286#else
287	(struct cb_ops *)0,	/* driver operations */
288#endif
289	NULL,			/* bus operations */
290	NULL,			/* power */
291	nv_quiesce		/* quiesce */
292};
293
294
295/*
296 * Request Sense CDB for ATAPI
297 */
298static const uint8_t nv_rqsense_cdb[16] = {
299	SCMD_REQUEST_SENSE,
300	0,
301	0,
302	0,
303	SATA_ATAPI_MIN_RQSENSE_LEN,
304	0,
305	0, 0, 0, 0, 0, 0, 0, 0, 0, 0	/* pad out to max CDB length */
306};
307
308
309static sata_tran_hotplug_ops_t nv_hotplug_ops;
310
311extern struct mod_ops mod_driverops;
312
313static  struct modldrv modldrv = {
314	&mod_driverops,	/* driverops */
315	"Nvidia ck804/mcp51/mcp55 HBA",
316	&nv_dev_ops,	/* driver ops */
317};
318
319static  struct modlinkage modlinkage = {
320	MODREV_1,
321	&modldrv,
322	NULL
323};
324
325
326/*
327 * wait between checks of reg status
328 */
329int nv_usec_delay = NV_WAIT_REG_CHECK;
330
331/*
332 * The following is needed for nv_vcmn_err()
333 */
334static kmutex_t nv_log_mutex; /* protects nv_log_buf */
335static char nv_log_buf[NV_STRING_512];
336int nv_debug_flags = NVDBG_ALWAYS;
337int nv_log_to_console = B_FALSE;
338
339int nv_log_delay = 0;
340int nv_prom_print = B_FALSE;
341
342/*
343 * for debugging
344 */
345#ifdef DEBUG
346int ncq_commands = 0;
347int non_ncq_commands = 0;
348#endif
349
350/*
351 * Opaque state pointer to be initialized by ddi_soft_state_init()
352 */
353static void *nv_statep	= NULL;
354
355/* We still have problems in 40-bit DMA support, so disable it by default */
356int nv_sata_40bit_dma = B_FALSE;
357
358static sata_tran_hotplug_ops_t nv_hotplug_ops = {
359	SATA_TRAN_HOTPLUG_OPS_REV_1,	/* structure version */
360	nv_sata_activate,	/* activate port. cfgadm -c connect */
361	nv_sata_deactivate	/* deactivate port. cfgadm -c disconnect */
362};
363
364
365/*
366 *  nv module initialization
367 */
368int
369_init(void)
370{
371	int	error;
372
373	error = ddi_soft_state_init(&nv_statep, sizeof (nv_ctl_t), 0);
374
375	if (error != 0) {
376
377		return (error);
378	}
379
380	mutex_init(&nv_log_mutex, NULL, MUTEX_DRIVER, NULL);
381
382	if ((error = sata_hba_init(&modlinkage)) != 0) {
383		ddi_soft_state_fini(&nv_statep);
384		mutex_destroy(&nv_log_mutex);
385
386		return (error);
387	}
388
389	error = mod_install(&modlinkage);
390	if (error != 0) {
391		sata_hba_fini(&modlinkage);
392		ddi_soft_state_fini(&nv_statep);
393		mutex_destroy(&nv_log_mutex);
394
395		return (error);
396	}
397
398	return (error);
399}
400
401
402/*
403 * nv module uninitialize
404 */
405int
406_fini(void)
407{
408	int	error;
409
410	error = mod_remove(&modlinkage);
411
412	if (error != 0) {
413		return (error);
414	}
415
416	/*
417	 * remove the resources allocated in _init()
418	 */
419	mutex_destroy(&nv_log_mutex);
420	sata_hba_fini(&modlinkage);
421	ddi_soft_state_fini(&nv_statep);
422
423	return (error);
424}
425
426
427/*
428 * nv _info entry point
429 */
430int
431_info(struct modinfo *modinfop)
432{
433	return (mod_info(&modlinkage, modinfop));
434}
435
436
437/*
438 * these wrappers for ddi_{get,put}8 are for observability
439 * with dtrace
440 */
441#ifdef DEBUG
442
443static void
444nv_put8(ddi_acc_handle_t handle, uint8_t *dev_addr, uint8_t value)
445{
446	ddi_put8(handle, dev_addr, value);
447}
448
449static void
450nv_put32(ddi_acc_handle_t handle, uint32_t *dev_addr, uint32_t value)
451{
452	ddi_put32(handle, dev_addr, value);
453}
454
455static uint32_t
456nv_get32(ddi_acc_handle_t handle, uint32_t *dev_addr)
457{
458	return (ddi_get32(handle, dev_addr));
459}
460
461static void
462nv_put16(ddi_acc_handle_t handle, uint16_t *dev_addr, uint16_t value)
463{
464	ddi_put16(handle, dev_addr, value);
465}
466
467static uint16_t
468nv_get16(ddi_acc_handle_t handle, uint16_t *dev_addr)
469{
470	return (ddi_get16(handle, dev_addr));
471}
472
473static uint8_t
474nv_get8(ddi_acc_handle_t handle, uint8_t *dev_addr)
475{
476	return (ddi_get8(handle, dev_addr));
477}
478
479#else
480
481#define	nv_put8 ddi_put8
482#define	nv_put32 ddi_put32
483#define	nv_get32 ddi_get32
484#define	nv_put16 ddi_put16
485#define	nv_get16 ddi_get16
486#define	nv_get8 ddi_get8
487
488#endif
489
490
491/*
492 * Driver attach
493 */
494static int
495nv_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
496{
497	int status, attach_state, intr_types, bar, i, command;
498	int inst = ddi_get_instance(dip);
499	ddi_acc_handle_t pci_conf_handle;
500	nv_ctl_t *nvc;
501	uint8_t subclass;
502	uint32_t reg32;
503#ifdef SGPIO_SUPPORT
504	pci_regspec_t *regs;
505	int rlen;
506#endif
507
508	switch (cmd) {
509
510	case DDI_ATTACH:
511
512		NVLOG((NVDBG_INIT, NULL, NULL,
513		    "nv_attach(): DDI_ATTACH inst %d", inst));
514
515		attach_state = ATTACH_PROGRESS_NONE;
516
517		status = ddi_soft_state_zalloc(nv_statep, inst);
518
519		if (status != DDI_SUCCESS) {
520			break;
521		}
522
523		nvc = ddi_get_soft_state(nv_statep, inst);
524
525		nvc->nvc_dip = dip;
526
527		attach_state |= ATTACH_PROGRESS_STATEP_ALLOC;
528
529		if (pci_config_setup(dip, &pci_conf_handle) == DDI_SUCCESS) {
530			nvc->nvc_revid = pci_config_get8(pci_conf_handle,
531			    PCI_CONF_REVID);
532			NVLOG((NVDBG_INIT, NULL, NULL,
533			    "inst %d: silicon revid is %x nv_debug_flags=%x",
534			    inst, nvc->nvc_revid, nv_debug_flags));
535		} else {
536			break;
537		}
538
539		attach_state |= ATTACH_PROGRESS_CONF_HANDLE;
540
541		/*
542		 * Set the PCI command register: enable IO/MEM/Master.
543		 */
544		command = pci_config_get16(pci_conf_handle, PCI_CONF_COMM);
545		pci_config_put16(pci_conf_handle, PCI_CONF_COMM,
546		    command|PCI_COMM_IO|PCI_COMM_MAE|PCI_COMM_ME);
547
548		subclass = pci_config_get8(pci_conf_handle, PCI_CONF_SUBCLASS);
549
550		if (subclass & PCI_MASS_RAID) {
551			cmn_err(CE_WARN,
552			    "attach failed: RAID mode not supported");
553			break;
554		}
555
556		/*
557		 * the 6 bars of the controller are:
558		 * 0: port 0 task file
559		 * 1: port 0 status
560		 * 2: port 1 task file
561		 * 3: port 1 status
562		 * 4: bus master for both ports
563		 * 5: extended registers for SATA features
564		 */
565		for (bar = 0; bar < 6; bar++) {
566			status = ddi_regs_map_setup(dip, bar + 1,
567			    (caddr_t *)&nvc->nvc_bar_addr[bar], 0, 0, &accattr,
568			    &nvc->nvc_bar_hdl[bar]);
569
570			if (status != DDI_SUCCESS) {
571				NVLOG((NVDBG_INIT, nvc, NULL,
572				    "ddi_regs_map_setup failure for bar"
573				    " %d status = %d", bar, status));
574				break;
575			}
576		}
577
578		attach_state |= ATTACH_PROGRESS_BARS;
579
580		/*
581		 * initialize controller and driver core
582		 */
583		status = nv_init_ctl(nvc, pci_conf_handle);
584
585		if (status == NV_FAILURE) {
586			NVLOG((NVDBG_INIT, nvc, NULL, "nv_init_ctl failed"));
587
588			break;
589		}
590
591		attach_state |= ATTACH_PROGRESS_CTL_SETUP;
592
593		/*
594		 * initialize mutexes
595		 */
596		mutex_init(&nvc->nvc_mutex, NULL, MUTEX_DRIVER,
597		    DDI_INTR_PRI(nvc->nvc_intr_pri));
598
599		attach_state |= ATTACH_PROGRESS_MUTEX_INIT;
600
601		/*
602		 * get supported interrupt types
603		 */
604		if (ddi_intr_get_supported_types(dip, &intr_types) !=
605		    DDI_SUCCESS) {
606			nv_cmn_err(CE_WARN, nvc, NULL,
607			    "!ddi_intr_get_supported_types failed");
608			NVLOG((NVDBG_INIT, nvc, NULL,
609			    "interrupt supported types failed"));
610
611			break;
612		}
613
614		NVLOG((NVDBG_INIT, nvc, NULL,
615		    "ddi_intr_get_supported_types() returned: 0x%x",
616		    intr_types));
617
618#ifdef NV_MSI_SUPPORTED
619		if (intr_types & DDI_INTR_TYPE_MSI) {
620			NVLOG((NVDBG_INIT, nvc, NULL,
621			    "using MSI interrupt type"));
622
623			/*
624			 * Try MSI first, but fall back to legacy if MSI
625			 * attach fails
626			 */
627			if (nv_add_msi_intrs(nvc) == DDI_SUCCESS) {
628				nvc->nvc_intr_type = DDI_INTR_TYPE_MSI;
629				attach_state |= ATTACH_PROGRESS_INTR_ADDED;
630				NVLOG((NVDBG_INIT, nvc, NULL,
631				    "MSI interrupt setup done"));
632			} else {
633				nv_cmn_err(CE_CONT, nvc, NULL,
634				    "!MSI registration failed "
635				    "will try Legacy interrupts");
636			}
637		}
638#endif
639
640		/*
641		 * Either the MSI interrupt setup has failed or only
642		 * the fixed interrupts are available on the system.
643		 */
644		if (!(attach_state & ATTACH_PROGRESS_INTR_ADDED) &&
645		    (intr_types & DDI_INTR_TYPE_FIXED)) {
646
647			NVLOG((NVDBG_INIT, nvc, NULL,
648			    "using Legacy interrupt type"));
649
650			if (nv_add_legacy_intrs(nvc) == DDI_SUCCESS) {
651				nvc->nvc_intr_type = DDI_INTR_TYPE_FIXED;
652				attach_state |= ATTACH_PROGRESS_INTR_ADDED;
653				NVLOG((NVDBG_INIT, nvc, NULL,
654				    "Legacy interrupt setup done"));
655			} else {
656				nv_cmn_err(CE_WARN, nvc, NULL,
657				    "!legacy interrupt setup failed");
658				NVLOG((NVDBG_INIT, nvc, NULL,
659				    "legacy interrupt setup failed"));
660				break;
661			}
662		}
663
664		if (!(attach_state & ATTACH_PROGRESS_INTR_ADDED)) {
665			NVLOG((NVDBG_INIT, nvc, NULL,
666			    "no interrupts registered"));
667			break;
668		}
669
670#ifdef SGPIO_SUPPORT
671		/*
672		 * save off the controller number
673		 */
674		(void) ddi_getlongprop(DDI_DEV_T_NONE, dip, DDI_PROP_DONTPASS,
675		    "reg", (caddr_t)&regs, &rlen);
676		nvc->nvc_ctlr_num = PCI_REG_FUNC_G(regs->pci_phys_hi);
677		kmem_free(regs, rlen);
678
679		/*
680		 * initialize SGPIO
681		 */
682		nv_sgp_led_init(nvc, pci_conf_handle);
683#endif	/* SGPIO_SUPPORT */
684
685		/*
686		 * attach to sata module
687		 */
688		if (sata_hba_attach(nvc->nvc_dip,
689		    &nvc->nvc_sata_hba_tran,
690		    DDI_ATTACH) != DDI_SUCCESS) {
691			attach_state |= ATTACH_PROGRESS_SATA_MODULE;
692
693			break;
694		}
695
696		pci_config_teardown(&pci_conf_handle);
697
698		NVLOG((NVDBG_INIT, nvc, NULL, "nv_attach DDI_SUCCESS"));
699
700		return (DDI_SUCCESS);
701
702	case DDI_RESUME:
703
704		nvc = ddi_get_soft_state(nv_statep, inst);
705
706		NVLOG((NVDBG_INIT, nvc, NULL,
707		    "nv_attach(): DDI_RESUME inst %d", inst));
708
709		if (pci_config_setup(dip, &pci_conf_handle) != DDI_SUCCESS) {
710			return (DDI_FAILURE);
711		}
712
713		/*
714		 * Set the PCI command register: enable IO/MEM/Master.
715		 */
716		command = pci_config_get16(pci_conf_handle, PCI_CONF_COMM);
717		pci_config_put16(pci_conf_handle, PCI_CONF_COMM,
718		    command|PCI_COMM_IO|PCI_COMM_MAE|PCI_COMM_ME);
719
720		/*
721		 * Need to set bit 2 to 1 at config offset 0x50
722		 * to enable access to the bar5 registers.
723		 */
724		reg32 = pci_config_get32(pci_conf_handle, NV_SATA_CFG_20);
725
726		if ((reg32 & NV_BAR5_SPACE_EN) != NV_BAR5_SPACE_EN) {
727			pci_config_put32(pci_conf_handle, NV_SATA_CFG_20,
728			    reg32 | NV_BAR5_SPACE_EN);
729		}
730
731		nvc->nvc_state &= ~NV_CTRL_SUSPEND;
732
733		for (i = 0; i < NV_MAX_PORTS(nvc); i++) {
734			nv_resume(&(nvc->nvc_port[i]));
735		}
736
737		pci_config_teardown(&pci_conf_handle);
738
739		return (DDI_SUCCESS);
740
741	default:
742		return (DDI_FAILURE);
743	}
744
745
746	/*
747	 * DDI_ATTACH failure path starts here
748	 */
749
750	if (attach_state & ATTACH_PROGRESS_INTR_ADDED) {
751		nv_rem_intrs(nvc);
752	}
753
754	if (attach_state & ATTACH_PROGRESS_SATA_MODULE) {
755		/*
756		 * Remove timers
757		 */
758		int port = 0;
759		nv_port_t *nvp;
760
761		for (; port < NV_MAX_PORTS(nvc); port++) {
762			nvp = &(nvc->nvc_port[port]);
763			if (nvp->nvp_timeout_id != 0) {
764				(void) untimeout(nvp->nvp_timeout_id);
765			}
766		}
767	}
768
769	if (attach_state & ATTACH_PROGRESS_MUTEX_INIT) {
770		mutex_destroy(&nvc->nvc_mutex);
771	}
772
773	if (attach_state & ATTACH_PROGRESS_CTL_SETUP) {
774		nv_uninit_ctl(nvc);
775	}
776
777	if (attach_state & ATTACH_PROGRESS_BARS) {
778		while (--bar >= 0) {
779			ddi_regs_map_free(&nvc->nvc_bar_hdl[bar]);
780		}
781	}
782
783	if (attach_state & ATTACH_PROGRESS_STATEP_ALLOC) {
784		ddi_soft_state_free(nv_statep, inst);
785	}
786
787	if (attach_state & ATTACH_PROGRESS_CONF_HANDLE) {
788		pci_config_teardown(&pci_conf_handle);
789	}
790
791	cmn_err(CE_WARN, "nv_sata%d attach failed", inst);
792
793	return (DDI_FAILURE);
794}
795
796
797static int
798nv_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
799{
800	int i, port, inst = ddi_get_instance(dip);
801	nv_ctl_t *nvc;
802	nv_port_t *nvp;
803
804	nvc = ddi_get_soft_state(nv_statep, inst);
805
806	switch (cmd) {
807
808	case DDI_DETACH:
809
810		NVLOG((NVDBG_INIT, nvc, NULL, "nv_detach: DDI_DETACH"));
811
812		/*
813		 * Remove interrupts
814		 */
815		nv_rem_intrs(nvc);
816
817		/*
818		 * Remove timers
819		 */
820		for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
821			nvp = &(nvc->nvc_port[port]);
822			if (nvp->nvp_timeout_id != 0) {
823				(void) untimeout(nvp->nvp_timeout_id);
824			}
825		}
826
827		/*
828		 * Remove maps
829		 */
830		for (i = 0; i < 6; i++) {
831			ddi_regs_map_free(&nvc->nvc_bar_hdl[i]);
832		}
833
834		/*
835		 * Destroy mutexes
836		 */
837		mutex_destroy(&nvc->nvc_mutex);
838
839		/*
840		 * Uninitialize the controller
841		 */
842		nv_uninit_ctl(nvc);
843
844#ifdef SGPIO_SUPPORT
845		/*
846		 * release SGPIO resources
847		 */
848		nv_sgp_cleanup(nvc);
849#endif
850
851		/*
852		 * unregister from the sata module
853		 */
854		(void) sata_hba_detach(nvc->nvc_dip, DDI_DETACH);
855
856		/*
857		 * Free soft state
858		 */
859		ddi_soft_state_free(nv_statep, inst);
860
861		return (DDI_SUCCESS);
862
863	case DDI_SUSPEND:
864
865		NVLOG((NVDBG_INIT, nvc, NULL, "nv_detach: DDI_SUSPEND"));
866
867		for (i = 0; i < NV_MAX_PORTS(nvc); i++) {
868			nv_suspend(&(nvc->nvc_port[i]));
869		}
870
871		nvc->nvc_state |= NV_CTRL_SUSPEND;
872
873		return (DDI_SUCCESS);
874
875	default:
876		return (DDI_FAILURE);
877	}
878}
879
880
881/*ARGSUSED*/
882static int
883nv_getinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
884{
885	nv_ctl_t *nvc;
886	int instance;
887	dev_t dev;
888
889	dev = (dev_t)arg;
890	instance = getminor(dev);
891
892	switch (infocmd) {
893	case DDI_INFO_DEVT2DEVINFO:
894		nvc = ddi_get_soft_state(nv_statep,  instance);
895		if (nvc != NULL) {
896			*result = nvc->nvc_dip;
897			return (DDI_SUCCESS);
898		} else {
899			*result = NULL;
900			return (DDI_FAILURE);
901		}
902	case DDI_INFO_DEVT2INSTANCE:
903		*(int *)result = instance;
904		break;
905	default:
906		break;
907	}
908	return (DDI_SUCCESS);
909}
910
911
912#ifdef SGPIO_SUPPORT
913/* ARGSUSED */
914static int
915nv_open(dev_t *devp, int flag, int otyp, cred_t *credp)
916{
917	nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, getminor(*devp));
918
919	if (nvc == NULL) {
920		return (ENXIO);
921	}
922
923	return (0);
924}
925
926
927/* ARGSUSED */
928static int
929nv_close(dev_t dev, int flag, int otyp, cred_t *credp)
930{
931	return (0);
932}
933
934
935/* ARGSUSED */
936static int
937nv_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp, int *rvalp)
938{
939	nv_ctl_t *nvc;
940	int inst;
941	int status;
942	int ctlr, port;
943	int drive;
944	uint8_t curr_led;
945	struct dc_led_ctl led;
946
947	inst = getminor(dev);
948	if (inst == -1) {
949		return (EBADF);
950	}
951
952	nvc = ddi_get_soft_state(nv_statep, inst);
953	if (nvc == NULL) {
954		return (EBADF);
955	}
956
957	if ((nvc->nvc_sgp_cbp == NULL) || (nvc->nvc_sgp_cmn == NULL)) {
958		return (EIO);
959	}
960
961	switch (cmd) {
962	case DEVCTL_SET_LED:
963		status = ddi_copyin((void *)arg, &led,
964		    sizeof (struct dc_led_ctl), mode);
965		if (status != 0)
966			return (EFAULT);
967
968		/*
969		 * Since only the first two controller currently support
970		 * SGPIO (as per NVIDIA docs), this code will as well.
971		 * Note that this validate the port value within led_state
972		 * as well.
973		 */
974
975		ctlr = SGP_DRV_TO_CTLR(led.led_number);
976		if ((ctlr != 0) && (ctlr != 1))
977			return (ENXIO);
978
979		if ((led.led_state & DCL_STATE_FAST_BLNK) ||
980		    (led.led_state & DCL_STATE_SLOW_BLNK)) {
981			return (EINVAL);
982		}
983
984		drive = led.led_number;
985
986		if ((led.led_ctl_active == DCL_CNTRL_OFF) ||
987		    (led.led_state == DCL_STATE_OFF)) {
988
989			if (led.led_type == DCL_TYPE_DEVICE_FAIL) {
990				nv_sgp_error(nvc, drive, TR_ERROR_DISABLE);
991			} else if (led.led_type == DCL_TYPE_DEVICE_OK2RM) {
992				nv_sgp_locate(nvc, drive, TR_LOCATE_DISABLE);
993			} else {
994				return (ENXIO);
995			}
996
997			port = SGP_DRV_TO_PORT(led.led_number);
998			nvc->nvc_port[port].nvp_sgp_ioctl_mod |= led.led_type;
999		}
1000
1001		if (led.led_ctl_active == DCL_CNTRL_ON) {
1002			if (led.led_type == DCL_TYPE_DEVICE_FAIL) {
1003				nv_sgp_error(nvc, drive, TR_ERROR_ENABLE);
1004			} else if (led.led_type == DCL_TYPE_DEVICE_OK2RM) {
1005				nv_sgp_locate(nvc, drive, TR_LOCATE_ENABLE);
1006			} else {
1007				return (ENXIO);
1008			}
1009
1010			port = SGP_DRV_TO_PORT(led.led_number);
1011			nvc->nvc_port[port].nvp_sgp_ioctl_mod |= led.led_type;
1012		}
1013
1014		break;
1015
1016	case DEVCTL_GET_LED:
1017		status = ddi_copyin((void *)arg, &led,
1018		    sizeof (struct dc_led_ctl), mode);
1019		if (status != 0)
1020			return (EFAULT);
1021
1022		/*
1023		 * Since only the first two controller currently support
1024		 * SGPIO (as per NVIDIA docs), this code will as well.
1025		 * Note that this validate the port value within led_state
1026		 * as well.
1027		 */
1028
1029		ctlr = SGP_DRV_TO_CTLR(led.led_number);
1030		if ((ctlr != 0) && (ctlr != 1))
1031			return (ENXIO);
1032
1033		curr_led = SGPIO0_TR_DRV(nvc->nvc_sgp_cbp->sgpio0_tr,
1034		    led.led_number);
1035
1036		port = SGP_DRV_TO_PORT(led.led_number);
1037		if (nvc->nvc_port[port].nvp_sgp_ioctl_mod & led.led_type) {
1038			led.led_ctl_active = DCL_CNTRL_ON;
1039
1040			if (led.led_type == DCL_TYPE_DEVICE_FAIL) {
1041				if (TR_ERROR(curr_led) == TR_ERROR_DISABLE)
1042					led.led_state = DCL_STATE_OFF;
1043				else
1044					led.led_state = DCL_STATE_ON;
1045			} else if (led.led_type == DCL_TYPE_DEVICE_OK2RM) {
1046				if (TR_LOCATE(curr_led) == TR_LOCATE_DISABLE)
1047					led.led_state = DCL_STATE_OFF;
1048				else
1049					led.led_state = DCL_STATE_ON;
1050			} else {
1051				return (ENXIO);
1052			}
1053		} else {
1054			led.led_ctl_active = DCL_CNTRL_OFF;
1055			/*
1056			 * Not really off, but never set and no constant for
1057			 * tri-state
1058			 */
1059			led.led_state = DCL_STATE_OFF;
1060		}
1061
1062		status = ddi_copyout(&led, (void *)arg,
1063		    sizeof (struct dc_led_ctl), mode);
1064		if (status != 0)
1065			return (EFAULT);
1066
1067		break;
1068
1069	case DEVCTL_NUM_LEDS:
1070		led.led_number = SGPIO_DRV_CNT_VALUE;
1071		led.led_ctl_active = 1;
1072		led.led_type = 3;
1073
1074		/*
1075		 * According to documentation, NVIDIA SGPIO is supposed to
1076		 * support blinking, but it does not seem to work in practice.
1077		 */
1078		led.led_state = DCL_STATE_ON;
1079
1080		status = ddi_copyout(&led, (void *)arg,
1081		    sizeof (struct dc_led_ctl), mode);
1082		if (status != 0)
1083			return (EFAULT);
1084
1085		break;
1086
1087	default:
1088		return (EINVAL);
1089	}
1090
1091	return (0);
1092}
1093#endif	/* SGPIO_SUPPORT */
1094
1095
1096/*
1097 * Called by sata module to probe a port.  Port and device state
1098 * are not changed here... only reported back to the sata module.
1099 *
1100 * If probe confirms a device is present for the first time, it will
1101 * initiate a device reset, then probe will be called again and the
1102 * signature will be check.  If the signature is valid, data structures
1103 * will be initialized.
1104 */
1105static int
1106nv_sata_probe(dev_info_t *dip, sata_device_t *sd)
1107{
1108	nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip));
1109	uint8_t cport = sd->satadev_addr.cport;
1110	uint8_t pmport = sd->satadev_addr.pmport;
1111	uint8_t qual = sd->satadev_addr.qual;
1112	clock_t nv_lbolt = ddi_get_lbolt();
1113	nv_port_t *nvp;
1114
1115	if (cport >= NV_MAX_PORTS(nvc)) {
1116		sd->satadev_type = SATA_DTYPE_NONE;
1117		sd->satadev_state = SATA_STATE_UNKNOWN;
1118
1119		return (SATA_FAILURE);
1120	}
1121
1122	ASSERT(nvc->nvc_port != NULL);
1123	nvp = &(nvc->nvc_port[cport]);
1124	ASSERT(nvp != NULL);
1125
1126	NVLOG((NVDBG_PROBE, nvc, nvp,
1127	    "nv_sata_probe: enter cport: 0x%x, pmport: 0x%x, "
1128	    "qual: 0x%x", cport, pmport, qual));
1129
1130	mutex_enter(&nvp->nvp_mutex);
1131
1132	/*
1133	 * This check seems to be done in the SATA module.
1134	 * It may not be required here
1135	 */
1136	if (nvp->nvp_state & NV_PORT_INACTIVE) {
1137		nv_cmn_err(CE_WARN, nvc, nvp,
1138		    "port inactive.  Use cfgadm to activate");
1139		sd->satadev_type = SATA_DTYPE_UNKNOWN;
1140		sd->satadev_state = SATA_PSTATE_SHUTDOWN;
1141		mutex_exit(&nvp->nvp_mutex);
1142
1143		return (SATA_FAILURE);
1144	}
1145
1146	if (qual == SATA_ADDR_PMPORT) {
1147		sd->satadev_type = SATA_DTYPE_NONE;
1148		sd->satadev_state = SATA_STATE_UNKNOWN;
1149		mutex_exit(&nvp->nvp_mutex);
1150		nv_cmn_err(CE_WARN, nvc, nvp,
1151		    "controller does not support port multiplier");
1152
1153		return (SATA_FAILURE);
1154	}
1155
1156	sd->satadev_state = SATA_PSTATE_PWRON;
1157
1158	nv_copy_registers(nvp, sd, NULL);
1159
1160	/*
1161	 * determine link status
1162	 */
1163	if (nv_check_link(sd->satadev_scr.sstatus) == B_FALSE) {
1164		uint8_t det;
1165
1166		/*
1167		 * Reset will cause the link to go down for a short period of
1168		 * time.  If link is lost for less than 2 seconds ignore it
1169		 * so that the reset can progress.
1170		 */
1171		if (nvp->nvp_state & NV_PORT_RESET_PROBE) {
1172
1173			if (nvp->nvp_link_lost_time == 0) {
1174				nvp->nvp_link_lost_time = nv_lbolt;
1175			}
1176
1177			if (TICK_TO_SEC(nv_lbolt -
1178			    nvp->nvp_link_lost_time) < NV_LINK_LOST_OK) {
1179				NVLOG((NVDBG_ALWAYS, nvp->nvp_ctlp, nvp,
1180				    "probe: intermittent link lost while"
1181				    " resetting"));
1182				/*
1183				 * fake status of link so that probe continues
1184				 */
1185				SSTATUS_SET_IPM(sd->satadev_scr.sstatus,
1186				    SSTATUS_IPM_ACTIVE);
1187				SSTATUS_SET_DET(sd->satadev_scr.sstatus,
1188				    SSTATUS_DET_DEVPRE_PHYCOM);
1189				sd->satadev_type = SATA_DTYPE_UNKNOWN;
1190				mutex_exit(&nvp->nvp_mutex);
1191
1192				return (SATA_SUCCESS);
1193			} else {
1194				nvp->nvp_state &=
1195				    ~(NV_PORT_RESET_PROBE|NV_PORT_RESET);
1196			}
1197		}
1198
1199		/*
1200		 * no link, so tear down port and abort all active packets
1201		 */
1202
1203		det = (sd->satadev_scr.sstatus & SSTATUS_DET) >>
1204		    SSTATUS_DET_SHIFT;
1205
1206		switch (det) {
1207		case SSTATUS_DET_NODEV:
1208		case SSTATUS_DET_PHYOFFLINE:
1209			sd->satadev_type = SATA_DTYPE_NONE;
1210			break;
1211		default:
1212			sd->satadev_type = SATA_DTYPE_UNKNOWN;
1213			break;
1214		}
1215
1216		NVLOG((NVDBG_PROBE, nvp->nvp_ctlp, nvp,
1217		    "probe: link lost invoking nv_abort_active"));
1218
1219		(void) nv_abort_active(nvp, NULL, SATA_PKT_TIMEOUT);
1220		nv_uninit_port(nvp);
1221
1222		mutex_exit(&nvp->nvp_mutex);
1223
1224		return (SATA_SUCCESS);
1225	} else {
1226		nvp->nvp_link_lost_time = 0;
1227	}
1228
1229	/*
1230	 * A device is present so clear hotremoved flag
1231	 */
1232	nvp->nvp_state &= ~NV_PORT_HOTREMOVED;
1233
1234#ifdef SGPIO_SUPPORT
1235	nv_sgp_drive_connect(nvp->nvp_ctlp, SGP_CTLR_PORT_TO_DRV(
1236	    nvp->nvp_ctlp->nvc_ctlr_num, nvp->nvp_port_num));
1237#endif
1238
1239	/*
1240	 * If the signature was acquired previously there is no need to
1241	 * do it again.
1242	 */
1243	if (nvp->nvp_signature != 0) {
1244		NVLOG((NVDBG_PROBE, nvp->nvp_ctlp, nvp,
1245		    "probe: signature acquired previously"));
1246		sd->satadev_type = nvp->nvp_type;
1247		mutex_exit(&nvp->nvp_mutex);
1248
1249		return (SATA_SUCCESS);
1250	}
1251
1252	/*
1253	 * If NV_PORT_RESET is not set, this is the first time through
1254	 * so perform reset and return.
1255	 */
1256	if ((nvp->nvp_state & NV_PORT_RESET) == 0) {
1257		NVLOG((NVDBG_PROBE, nvp->nvp_ctlp, nvp,
1258		    "probe: first reset to get sig"));
1259		nvp->nvp_state |= NV_PORT_RESET_PROBE;
1260		nv_reset(nvp);
1261		sd->satadev_type = nvp->nvp_type = SATA_DTYPE_UNKNOWN;
1262		nvp->nvp_probe_time = nv_lbolt;
1263		mutex_exit(&nvp->nvp_mutex);
1264
1265		return (SATA_SUCCESS);
1266	}
1267
1268	/*
1269	 * Reset was done previously.  see if the signature is
1270	 * available.
1271	 */
1272	nv_read_signature(nvp);
1273	sd->satadev_type = nvp->nvp_type;
1274
1275	/*
1276	 * Some drives may require additional resets to get a
1277	 * valid signature.  If a drive was not just powered up, the signature
1278	 * should arrive within half a second of reset.  Therefore if more
1279	 * than 5 seconds has elapsed while waiting for a signature, reset
1280	 * again.  These extra resets do not appear to create problems when
1281	 * the drive is spinning up for more than this reset period.
1282	 */
1283	if (nvp->nvp_signature == 0) {
1284		if (TICK_TO_SEC(nv_lbolt - nvp->nvp_reset_time) > 5) {
1285			NVLOG((NVDBG_PROBE, nvc, nvp, "additional reset"
1286			    " during signature acquisition"));
1287			nv_reset(nvp);
1288		}
1289
1290		mutex_exit(&nvp->nvp_mutex);
1291
1292		return (SATA_SUCCESS);
1293	}
1294
1295	NVLOG((NVDBG_PROBE, nvc, nvp, "signature acquired after %d ms",
1296	    TICK_TO_MSEC(nv_lbolt - nvp->nvp_probe_time)));
1297
1298	/*
1299	 * nv_sata only deals with ATA disks and ATAPI CD/DVDs so far.  If
1300	 * it is not either of those, then just return.
1301	 */
1302	if ((nvp->nvp_type != SATA_DTYPE_ATADISK) &&
1303	    (nvp->nvp_type != SATA_DTYPE_ATAPICD)) {
1304		NVLOG((NVDBG_PROBE, nvc, nvp, "Driver currently handles only"
1305		    " disks/CDs/DVDs.  Signature acquired was %X",
1306		    nvp->nvp_signature));
1307		mutex_exit(&nvp->nvp_mutex);
1308
1309		return (SATA_SUCCESS);
1310	}
1311
1312	/*
1313	 * make sure structures are initialized
1314	 */
1315	if (nv_init_port(nvp) == NV_SUCCESS) {
1316		NVLOG((NVDBG_PROBE, nvc, nvp,
1317		    "device detected and set up at port %d", cport));
1318		mutex_exit(&nvp->nvp_mutex);
1319
1320		return (SATA_SUCCESS);
1321	} else {
1322		nv_cmn_err(CE_WARN, nvc, nvp, "failed to set up data "
1323		    "structures for port %d", cport);
1324		mutex_exit(&nvp->nvp_mutex);
1325
1326		return (SATA_FAILURE);
1327	}
1328	/*NOTREACHED*/
1329}
1330
1331
1332/*
1333 * Called by sata module to start a new command.
1334 */
1335static int
1336nv_sata_start(dev_info_t *dip, sata_pkt_t *spkt)
1337{
1338	int cport = spkt->satapkt_device.satadev_addr.cport;
1339	nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip));
1340	nv_port_t *nvp = &(nvc->nvc_port[cport]);
1341	int ret;
1342
1343	NVLOG((NVDBG_ENTRY, nvc, nvp, "nv_sata_start: opmode: 0x%x cmd=%x",
1344	    spkt->satapkt_op_mode, spkt->satapkt_cmd.satacmd_cmd_reg));
1345
1346	mutex_enter(&nvp->nvp_mutex);
1347
1348	/*
1349	 * hotremoved is an intermediate state where the link was lost,
1350	 * but the hotplug event has not yet been processed by the sata
1351	 * module.  Fail the request.
1352	 */
1353	if (nvp->nvp_state & NV_PORT_HOTREMOVED) {
1354		spkt->satapkt_reason = SATA_PKT_PORT_ERROR;
1355		spkt->satapkt_device.satadev_state = SATA_STATE_UNKNOWN;
1356		NVLOG((NVDBG_ERRS, nvc, nvp,
1357		    "nv_sata_start: NV_PORT_HOTREMOVED"));
1358		nv_copy_registers(nvp, &spkt->satapkt_device, NULL);
1359		mutex_exit(&nvp->nvp_mutex);
1360
1361		return (SATA_TRAN_PORT_ERROR);
1362	}
1363
1364	if (nvp->nvp_state & NV_PORT_RESET) {
1365		NVLOG((NVDBG_ERRS, nvc, nvp,
1366		    "still waiting for reset completion"));
1367		spkt->satapkt_reason = SATA_PKT_BUSY;
1368		mutex_exit(&nvp->nvp_mutex);
1369
1370		/*
1371		 * If in panic, timeouts do not occur, so fake one
1372		 * so that the signature can be acquired to complete
1373		 * the reset handling.
1374		 */
1375		if (ddi_in_panic()) {
1376			nv_timeout(nvp);
1377		}
1378
1379		return (SATA_TRAN_BUSY);
1380	}
1381
1382	if (nvp->nvp_type == SATA_DTYPE_NONE) {
1383		spkt->satapkt_reason = SATA_PKT_PORT_ERROR;
1384		NVLOG((NVDBG_ERRS, nvc, nvp,
1385		    "nv_sata_start: SATA_DTYPE_NONE"));
1386		nv_copy_registers(nvp, &spkt->satapkt_device, NULL);
1387		mutex_exit(&nvp->nvp_mutex);
1388
1389		return (SATA_TRAN_PORT_ERROR);
1390	}
1391
1392	if (spkt->satapkt_device.satadev_type == SATA_DTYPE_PMULT) {
1393		ASSERT(nvp->nvp_type == SATA_DTYPE_PMULT);
1394		nv_cmn_err(CE_WARN, nvc, nvp,
1395		    "port multipliers not supported by controller");
1396		spkt->satapkt_reason = SATA_PKT_CMD_UNSUPPORTED;
1397		mutex_exit(&nvp->nvp_mutex);
1398
1399		return (SATA_TRAN_CMD_UNSUPPORTED);
1400	}
1401
1402	if ((nvp->nvp_state & NV_PORT_INIT) == 0) {
1403		spkt->satapkt_reason = SATA_PKT_PORT_ERROR;
1404		NVLOG((NVDBG_ERRS, nvc, nvp,
1405		    "nv_sata_start: port not yet initialized"));
1406		nv_copy_registers(nvp, &spkt->satapkt_device, NULL);
1407		mutex_exit(&nvp->nvp_mutex);
1408
1409		return (SATA_TRAN_PORT_ERROR);
1410	}
1411
1412	if (nvp->nvp_state & NV_PORT_INACTIVE) {
1413		spkt->satapkt_reason = SATA_PKT_PORT_ERROR;
1414		NVLOG((NVDBG_ERRS, nvc, nvp,
1415		    "nv_sata_start: NV_PORT_INACTIVE"));
1416		nv_copy_registers(nvp, &spkt->satapkt_device, NULL);
1417		mutex_exit(&nvp->nvp_mutex);
1418
1419		return (SATA_TRAN_PORT_ERROR);
1420	}
1421
1422	if (nvp->nvp_state & NV_PORT_FAILED) {
1423		spkt->satapkt_reason = SATA_PKT_PORT_ERROR;
1424		NVLOG((NVDBG_ERRS, nvc, nvp,
1425		    "nv_sata_start: NV_PORT_FAILED state"));
1426		nv_copy_registers(nvp, &spkt->satapkt_device, NULL);
1427		mutex_exit(&nvp->nvp_mutex);
1428
1429		return (SATA_TRAN_PORT_ERROR);
1430	}
1431
1432	/*
1433	 * after a device reset, and then when sata module restore processing
1434	 * is complete, the sata module will set sata_clear_dev_reset which
1435	 * indicates that restore processing has completed and normal
1436	 * non-restore related commands should be processed.
1437	 */
1438	if (spkt->satapkt_cmd.satacmd_flags.sata_clear_dev_reset) {
1439		nvp->nvp_state &= ~NV_PORT_RESTORE;
1440		NVLOG((NVDBG_ENTRY, nvc, nvp,
1441		    "nv_sata_start: clearing NV_PORT_RESTORE"));
1442	}
1443
1444	/*
1445	 * if the device was recently reset as indicated by NV_PORT_RESTORE,
1446	 * only allow commands which restore device state.  The sata module
1447	 * marks such commands with with sata_ignore_dev_reset.
1448	 *
1449	 * during coredump, nv_reset is called and but then the restore
1450	 * doesn't happen.  For now, workaround by ignoring the wait for
1451	 * restore if the system is panicing.
1452	 */
1453	if ((nvp->nvp_state & NV_PORT_RESTORE) &&
1454	    !(spkt->satapkt_cmd.satacmd_flags.sata_ignore_dev_reset) &&
1455	    (ddi_in_panic() == 0)) {
1456		spkt->satapkt_reason = SATA_PKT_BUSY;
1457		NVLOG((NVDBG_ENTRY, nvc, nvp,
1458		    "nv_sata_start: waiting for restore "));
1459		mutex_exit(&nvp->nvp_mutex);
1460
1461		return (SATA_TRAN_BUSY);
1462	}
1463
1464	if (nvp->nvp_state & NV_PORT_ABORTING) {
1465		spkt->satapkt_reason = SATA_PKT_BUSY;
1466		NVLOG((NVDBG_ERRS, nvc, nvp,
1467		    "nv_sata_start: NV_PORT_ABORTING"));
1468		mutex_exit(&nvp->nvp_mutex);
1469
1470		return (SATA_TRAN_BUSY);
1471	}
1472
1473	if (spkt->satapkt_op_mode &
1474	    (SATA_OPMODE_POLLING|SATA_OPMODE_SYNCH)) {
1475
1476		ret = nv_start_sync(nvp, spkt);
1477
1478		mutex_exit(&nvp->nvp_mutex);
1479
1480		return (ret);
1481	}
1482
1483	/*
1484	 * start command asynchronous command
1485	 */
1486	ret = nv_start_async(nvp, spkt);
1487
1488	mutex_exit(&nvp->nvp_mutex);
1489
1490	return (ret);
1491}
1492
1493
1494/*
1495 * SATA_OPMODE_POLLING implies the driver is in a
1496 * synchronous mode, and SATA_OPMODE_SYNCH is also set.
1497 * If only SATA_OPMODE_SYNCH is set, the driver can use
1498 * interrupts and sleep wait on a cv.
1499 *
1500 * If SATA_OPMODE_POLLING is set, the driver can't use
1501 * interrupts and must busy wait and simulate the
1502 * interrupts by waiting for BSY to be cleared.
1503 *
1504 * Synchronous mode has to return BUSY if there are
1505 * any other commands already on the drive.
1506 */
1507static int
1508nv_start_sync(nv_port_t *nvp, sata_pkt_t *spkt)
1509{
1510	nv_ctl_t *nvc = nvp->nvp_ctlp;
1511	int ret;
1512
1513	NVLOG((NVDBG_SYNC, nvp->nvp_ctlp, nvp, "nv_sata_satapkt_sync: entry"));
1514
1515	if (nvp->nvp_ncq_run != 0 || nvp->nvp_non_ncq_run != 0) {
1516		spkt->satapkt_reason = SATA_PKT_BUSY;
1517		NVLOG((NVDBG_SYNC, nvp->nvp_ctlp, nvp,
1518		    "nv_sata_satapkt_sync: device is busy, sync cmd rejected"
1519		    "ncq_run: %d non_ncq_run: %d  spkt: %p",
1520		    nvp->nvp_ncq_run, nvp->nvp_non_ncq_run,
1521		    (&(nvp->nvp_slot[0]))->nvslot_spkt));
1522
1523		return (SATA_TRAN_BUSY);
1524	}
1525
1526	/*
1527	 * if SYNC but not POLL, verify that this is not on interrupt thread.
1528	 */
1529	if (!(spkt->satapkt_op_mode & SATA_OPMODE_POLLING) &&
1530	    servicing_interrupt()) {
1531		spkt->satapkt_reason = SATA_PKT_BUSY;
1532		NVLOG((NVDBG_SYNC, nvp->nvp_ctlp, nvp,
1533		    "SYNC mode not allowed during interrupt"));
1534
1535		return (SATA_TRAN_BUSY);
1536
1537	}
1538
1539	/*
1540	 * disable interrupt generation if in polled mode
1541	 */
1542	if (spkt->satapkt_op_mode & SATA_OPMODE_POLLING) {
1543		(*(nvc->nvc_set_intr))(nvp, NV_INTR_DISABLE);
1544	}
1545
1546	if ((ret = nv_start_common(nvp, spkt)) != SATA_TRAN_ACCEPTED) {
1547		if (spkt->satapkt_op_mode & SATA_OPMODE_POLLING) {
1548			(*(nvc->nvc_set_intr))(nvp, NV_INTR_ENABLE);
1549		}
1550
1551		return (ret);
1552	}
1553
1554	if (spkt->satapkt_op_mode & SATA_OPMODE_POLLING) {
1555		mutex_exit(&nvp->nvp_mutex);
1556		ret = nv_poll_wait(nvp, spkt);
1557		mutex_enter(&nvp->nvp_mutex);
1558
1559		(*(nvc->nvc_set_intr))(nvp, NV_INTR_ENABLE);
1560
1561		NVLOG((NVDBG_SYNC, nvp->nvp_ctlp, nvp, "nv_sata_satapkt_sync:"
1562		    " done % reason %d", ret));
1563
1564		return (ret);
1565	}
1566
1567	/*
1568	 * non-polling synchronous mode handling.  The interrupt will signal
1569	 * when the IO is completed.
1570	 */
1571	cv_wait(&nvp->nvp_poll_cv, &nvp->nvp_mutex);
1572
1573	if (spkt->satapkt_reason != SATA_PKT_COMPLETED) {
1574
1575		spkt->satapkt_reason = SATA_PKT_TIMEOUT;
1576	}
1577
1578	NVLOG((NVDBG_SYNC, nvp->nvp_ctlp, nvp, "nv_sata_satapkt_sync:"
1579	    " done % reason %d", spkt->satapkt_reason));
1580
1581	return (SATA_TRAN_ACCEPTED);
1582}
1583
1584
1585static int
1586nv_poll_wait(nv_port_t *nvp, sata_pkt_t *spkt)
1587{
1588	int ret;
1589	nv_ctl_t *nvc = nvp->nvp_ctlp;
1590#if ! defined(__lock_lint)
1591	nv_slot_t *nv_slotp = &(nvp->nvp_slot[0]); /* not NCQ aware */
1592#endif
1593
1594	NVLOG((NVDBG_SYNC, nvc, nvp, "nv_poll_wait: enter"));
1595
1596	for (;;) {
1597
1598		NV_DELAY_NSEC(400);
1599
1600		NVLOG((NVDBG_SYNC, nvc, nvp, "nv_poll_wait: before nv_wait"));
1601		if (nv_wait(nvp, 0, SATA_STATUS_BSY,
1602		    NV_SEC2USEC(spkt->satapkt_time), NV_NOSLEEP) == B_FALSE) {
1603			mutex_enter(&nvp->nvp_mutex);
1604			spkt->satapkt_reason = SATA_PKT_TIMEOUT;
1605			nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
1606			nv_reset(nvp);
1607			nv_complete_io(nvp, spkt, 0);
1608			mutex_exit(&nvp->nvp_mutex);
1609			NVLOG((NVDBG_SYNC, nvc, nvp, "nv_poll_wait: "
1610			    "SATA_STATUS_BSY"));
1611
1612			return (SATA_TRAN_ACCEPTED);
1613		}
1614
1615		NVLOG((NVDBG_SYNC, nvc, nvp, "nv_poll_wait: before nvc_intr"));
1616
1617		/*
1618		 * Simulate interrupt.
1619		 */
1620		ret = (*(nvc->nvc_interrupt))((caddr_t)nvc, NULL);
1621		NVLOG((NVDBG_SYNC, nvc, nvp, "nv_poll_wait: after nvc_intr"));
1622
1623		if (ret != DDI_INTR_CLAIMED) {
1624			NVLOG((NVDBG_SYNC, nvc, nvp, "nv_poll_wait:"
1625			    " unclaimed -- resetting"));
1626			mutex_enter(&nvp->nvp_mutex);
1627			nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
1628			nv_reset(nvp);
1629			spkt->satapkt_reason = SATA_PKT_TIMEOUT;
1630			nv_complete_io(nvp, spkt, 0);
1631			mutex_exit(&nvp->nvp_mutex);
1632
1633			return (SATA_TRAN_ACCEPTED);
1634		}
1635
1636#if ! defined(__lock_lint)
1637		if (nv_slotp->nvslot_flags == NVSLOT_COMPLETE) {
1638			/*
1639			 * packet is complete
1640			 */
1641			return (SATA_TRAN_ACCEPTED);
1642		}
1643#endif
1644	}
1645	/*NOTREACHED*/
1646}
1647
1648
1649/*
1650 * Called by sata module to abort outstanding packets.
1651 */
1652/*ARGSUSED*/
1653static int
1654nv_sata_abort(dev_info_t *dip, sata_pkt_t *spkt, int flag)
1655{
1656	int cport = spkt->satapkt_device.satadev_addr.cport;
1657	nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip));
1658	nv_port_t *nvp = &(nvc->nvc_port[cport]);
1659	int c_a, ret;
1660
1661	ASSERT(cport < NV_MAX_PORTS(nvc));
1662	NVLOG((NVDBG_ENTRY, nvc, nvp, "nv_sata_abort %d %p", flag, spkt));
1663
1664	mutex_enter(&nvp->nvp_mutex);
1665
1666	if (nvp->nvp_state & NV_PORT_INACTIVE) {
1667		mutex_exit(&nvp->nvp_mutex);
1668		nv_cmn_err(CE_WARN, nvc, nvp,
1669		    "abort request failed: port inactive");
1670
1671		return (SATA_FAILURE);
1672	}
1673
1674	/*
1675	 * spkt == NULL then abort all commands
1676	 */
1677	c_a = nv_abort_active(nvp, spkt, SATA_PKT_ABORTED);
1678
1679	if (c_a) {
1680		NVLOG((NVDBG_ENTRY, nvc, nvp,
1681		    "packets aborted running=%d", c_a));
1682		ret = SATA_SUCCESS;
1683	} else {
1684		if (spkt == NULL) {
1685			NVLOG((NVDBG_ENTRY, nvc, nvp, "no spkts to abort"));
1686		} else {
1687			NVLOG((NVDBG_ENTRY, nvc, nvp,
1688			    "can't find spkt to abort"));
1689		}
1690		ret = SATA_FAILURE;
1691	}
1692
1693	mutex_exit(&nvp->nvp_mutex);
1694
1695	return (ret);
1696}
1697
1698
1699/*
1700 * if spkt == NULL abort all pkts running, otherwise
1701 * abort the requested packet.  must be called with nv_mutex
1702 * held and returns with it held.  Not NCQ aware.
1703 */
1704static int
1705nv_abort_active(nv_port_t *nvp, sata_pkt_t *spkt, int abort_reason)
1706{
1707	int aborted = 0, i, reset_once = B_FALSE;
1708	struct nv_slot *nv_slotp;
1709	sata_pkt_t *spkt_slot;
1710
1711	ASSERT(MUTEX_HELD(&nvp->nvp_mutex));
1712
1713	/*
1714	 * return if the port is not configured
1715	 */
1716	if (nvp->nvp_slot == NULL) {
1717		NVLOG((NVDBG_ENTRY, nvp->nvp_ctlp, nvp,
1718		    "nv_abort_active: not configured so returning"));
1719
1720		return (0);
1721	}
1722
1723	NVLOG((NVDBG_ENTRY, nvp->nvp_ctlp, nvp, "nv_abort_active"));
1724
1725	nvp->nvp_state |= NV_PORT_ABORTING;
1726
1727	for (i = 0; i < nvp->nvp_queue_depth; i++) {
1728
1729		nv_slotp = &(nvp->nvp_slot[i]);
1730		spkt_slot = nv_slotp->nvslot_spkt;
1731
1732		/*
1733		 * skip if not active command in slot
1734		 */
1735		if (spkt_slot == NULL) {
1736			continue;
1737		}
1738
1739		/*
1740		 * if a specific packet was requested, skip if
1741		 * this is not a match
1742		 */
1743		if ((spkt != NULL) && (spkt != spkt_slot)) {
1744			continue;
1745		}
1746
1747		/*
1748		 * stop the hardware.  This could need reworking
1749		 * when NCQ is enabled in the driver.
1750		 */
1751		if (reset_once == B_FALSE) {
1752			ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl;
1753
1754			/*
1755			 * stop DMA engine
1756			 */
1757			nv_put8(bmhdl, nvp->nvp_bmicx,  0);
1758
1759			nv_reset(nvp);
1760			reset_once = B_TRUE;
1761		}
1762
1763		spkt_slot->satapkt_reason = abort_reason;
1764		nv_complete_io(nvp, spkt_slot, i);
1765		aborted++;
1766	}
1767
1768	nvp->nvp_state &= ~NV_PORT_ABORTING;
1769
1770	return (aborted);
1771}
1772
1773
1774/*
1775 * Called by sata module to reset a port, device, or the controller.
1776 */
1777static int
1778nv_sata_reset(dev_info_t *dip, sata_device_t *sd)
1779{
1780	int cport = sd->satadev_addr.cport;
1781	nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip));
1782	nv_port_t *nvp = &(nvc->nvc_port[cport]);
1783	int ret = SATA_SUCCESS;
1784
1785	ASSERT(cport < NV_MAX_PORTS(nvc));
1786
1787	NVLOG((NVDBG_ENTRY, nvc, nvp, "nv_sata_reset"));
1788
1789	mutex_enter(&nvp->nvp_mutex);
1790
1791	switch (sd->satadev_addr.qual) {
1792
1793	case SATA_ADDR_CPORT:
1794		/*FALLTHROUGH*/
1795	case SATA_ADDR_DCPORT:
1796		nv_reset(nvp);
1797		(void) nv_abort_active(nvp, NULL, SATA_PKT_RESET);
1798
1799		break;
1800	case SATA_ADDR_CNTRL:
1801		NVLOG((NVDBG_ENTRY, nvc, nvp,
1802		    "nv_sata_reset: constroller reset not supported"));
1803
1804		break;
1805	case SATA_ADDR_PMPORT:
1806	case SATA_ADDR_DPMPORT:
1807		NVLOG((NVDBG_ENTRY, nvc, nvp,
1808		    "nv_sata_reset: port multipliers not supported"));
1809		/*FALLTHROUGH*/
1810	default:
1811		/*
1812		 * unsupported case
1813		 */
1814		ret = SATA_FAILURE;
1815		break;
1816	}
1817
1818	if (ret == SATA_SUCCESS) {
1819		/*
1820		 * If the port is inactive, do a quiet reset and don't attempt
1821		 * to wait for reset completion or do any post reset processing
1822		 */
1823		if (nvp->nvp_state & NV_PORT_INACTIVE) {
1824			nvp->nvp_state &= ~NV_PORT_RESET;
1825			nvp->nvp_reset_time = 0;
1826		}
1827
1828		/*
1829		 * clear the port failed flag
1830		 */
1831		nvp->nvp_state &= ~NV_PORT_FAILED;
1832	}
1833
1834	mutex_exit(&nvp->nvp_mutex);
1835
1836	return (ret);
1837}
1838
1839
1840/*
1841 * Sata entry point to handle port activation.  cfgadm -c connect
1842 */
1843static int
1844nv_sata_activate(dev_info_t *dip, sata_device_t *sd)
1845{
1846	int cport = sd->satadev_addr.cport;
1847	nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip));
1848	nv_port_t *nvp = &(nvc->nvc_port[cport]);
1849
1850	ASSERT(cport < NV_MAX_PORTS(nvc));
1851	NVLOG((NVDBG_ENTRY, nvc, nvp, "nv_sata_activate"));
1852
1853	mutex_enter(&nvp->nvp_mutex);
1854
1855	sd->satadev_state = SATA_STATE_READY;
1856
1857	nv_copy_registers(nvp, sd, NULL);
1858
1859	(*(nvc->nvc_set_intr))(nvp, NV_INTR_ENABLE);
1860
1861	nvp->nvp_state = 0;
1862
1863	mutex_exit(&nvp->nvp_mutex);
1864
1865	return (SATA_SUCCESS);
1866}
1867
1868
1869/*
1870 * Sata entry point to handle port deactivation.  cfgadm -c disconnect
1871 */
1872static int
1873nv_sata_deactivate(dev_info_t *dip, sata_device_t *sd)
1874{
1875	int cport = sd->satadev_addr.cport;
1876	nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip));
1877	nv_port_t *nvp = &(nvc->nvc_port[cport]);
1878
1879	ASSERT(cport < NV_MAX_PORTS(nvc));
1880	NVLOG((NVDBG_ENTRY, nvc, nvp, "nv_sata_deactivate"));
1881
1882	mutex_enter(&nvp->nvp_mutex);
1883
1884	(void) nv_abort_active(nvp, NULL, SATA_PKT_RESET);
1885
1886	/*
1887	 * mark the device as inaccessible
1888	 */
1889	nvp->nvp_state &= ~NV_PORT_INACTIVE;
1890
1891	/*
1892	 * disable the interrupts on port
1893	 */
1894	(*(nvc->nvc_set_intr))(nvp, NV_INTR_DISABLE);
1895
1896	nv_uninit_port(nvp);
1897
1898	sd->satadev_state = SATA_PSTATE_SHUTDOWN;
1899	nv_copy_registers(nvp, sd, NULL);
1900
1901	mutex_exit(&nvp->nvp_mutex);
1902
1903	return (SATA_SUCCESS);
1904}
1905
1906
1907/*
1908 * find an empty slot in the driver's queue, increment counters,
1909 * and then invoke the appropriate PIO or DMA start routine.
1910 */
1911static int
1912nv_start_common(nv_port_t *nvp, sata_pkt_t *spkt)
1913{
1914	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
1915	int on_bit = 0x01, slot, sactive, ret, ncq = 0;
1916	uint8_t cmd = spkt->satapkt_cmd.satacmd_cmd_reg;
1917	int direction = sata_cmdp->satacmd_flags.sata_data_direction;
1918	nv_ctl_t *nvc = nvp->nvp_ctlp;
1919	nv_slot_t *nv_slotp;
1920	boolean_t dma_cmd;
1921
1922	NVLOG((NVDBG_DELIVER, nvc, nvp, "nv_start_common  entered: cmd: 0x%x",
1923	    sata_cmdp->satacmd_cmd_reg));
1924
1925	if ((cmd == SATAC_WRITE_FPDMA_QUEUED) ||
1926	    (cmd == SATAC_READ_FPDMA_QUEUED)) {
1927		nvp->nvp_ncq_run++;
1928		/*
1929		 * search for an empty NCQ slot.  by the time, it's already
1930		 * been determined by the caller that there is room on the
1931		 * queue.
1932		 */
1933		for (slot = 0; slot < nvp->nvp_queue_depth; slot++,
1934		    on_bit <<= 1) {
1935			if ((nvp->nvp_sactive_cache & on_bit) == 0) {
1936				break;
1937			}
1938		}
1939
1940		/*
1941		 * the first empty slot found, should not exceed the queue
1942		 * depth of the drive.  if it does it's an error.
1943		 */
1944		ASSERT(slot != nvp->nvp_queue_depth);
1945
1946		sactive = nv_get32(nvc->nvc_bar_hdl[5],
1947		    nvp->nvp_sactive);
1948		ASSERT((sactive & on_bit) == 0);
1949		nv_put32(nvc->nvc_bar_hdl[5], nvp->nvp_sactive, on_bit);
1950		NVLOG((NVDBG_INIT, nvc, nvp, "setting SACTIVE onbit: %X",
1951		    on_bit));
1952		nvp->nvp_sactive_cache |= on_bit;
1953
1954		ncq = NVSLOT_NCQ;
1955
1956	} else {
1957		nvp->nvp_non_ncq_run++;
1958		slot = 0;
1959	}
1960
1961	nv_slotp = (nv_slot_t *)&nvp->nvp_slot[slot];
1962
1963	ASSERT(nv_slotp->nvslot_spkt == NULL);
1964
1965	nv_slotp->nvslot_spkt = spkt;
1966	nv_slotp->nvslot_flags = ncq;
1967
1968	/*
1969	 * the sata module doesn't indicate which commands utilize the
1970	 * DMA engine, so find out using this switch table.
1971	 */
1972	switch (spkt->satapkt_cmd.satacmd_cmd_reg) {
1973	case SATAC_READ_DMA_EXT:
1974	case SATAC_WRITE_DMA_EXT:
1975	case SATAC_WRITE_DMA:
1976	case SATAC_READ_DMA:
1977	case SATAC_READ_DMA_QUEUED:
1978	case SATAC_READ_DMA_QUEUED_EXT:
1979	case SATAC_WRITE_DMA_QUEUED:
1980	case SATAC_WRITE_DMA_QUEUED_EXT:
1981	case SATAC_READ_FPDMA_QUEUED:
1982	case SATAC_WRITE_FPDMA_QUEUED:
1983		dma_cmd = B_TRUE;
1984		break;
1985	default:
1986		dma_cmd = B_FALSE;
1987	}
1988
1989	if (sata_cmdp->satacmd_num_dma_cookies != 0 && dma_cmd == B_TRUE) {
1990		NVLOG((NVDBG_DELIVER, nvc,  nvp, "DMA command"));
1991		nv_slotp->nvslot_start = nv_start_dma;
1992		nv_slotp->nvslot_intr = nv_intr_dma;
1993	} else if (spkt->satapkt_cmd.satacmd_cmd_reg == SATAC_PACKET) {
1994		NVLOG((NVDBG_DELIVER, nvc,  nvp, "packet command"));
1995		nv_slotp->nvslot_start = nv_start_pkt_pio;
1996		nv_slotp->nvslot_intr = nv_intr_pkt_pio;
1997		if ((direction == SATA_DIR_READ) ||
1998		    (direction == SATA_DIR_WRITE)) {
1999			nv_slotp->nvslot_byte_count =
2000			    spkt->satapkt_cmd.satacmd_bp->b_bcount;
2001			nv_slotp->nvslot_v_addr =
2002			    spkt->satapkt_cmd.satacmd_bp->b_un.b_addr;
2003			/*
2004			 * Freeing DMA resources allocated by the framework
2005			 * now to avoid buffer overwrite (dma sync) problems
2006			 * when the buffer is released at command completion.
2007			 * Primarily an issue on systems with more than
2008			 * 4GB of memory.
2009			 */
2010			sata_free_dma_resources(spkt);
2011		}
2012	} else if (direction == SATA_DIR_NODATA_XFER) {
2013		NVLOG((NVDBG_DELIVER, nvc, nvp, "non-data command"));
2014		nv_slotp->nvslot_start = nv_start_nodata;
2015		nv_slotp->nvslot_intr = nv_intr_nodata;
2016	} else if (direction == SATA_DIR_READ) {
2017		NVLOG((NVDBG_DELIVER, nvc, nvp, "pio in command"));
2018		nv_slotp->nvslot_start = nv_start_pio_in;
2019		nv_slotp->nvslot_intr = nv_intr_pio_in;
2020		nv_slotp->nvslot_byte_count =
2021		    spkt->satapkt_cmd.satacmd_bp->b_bcount;
2022		nv_slotp->nvslot_v_addr =
2023		    spkt->satapkt_cmd.satacmd_bp->b_un.b_addr;
2024		/*
2025		 * Freeing DMA resources allocated by the framework now to
2026		 * avoid buffer overwrite (dma sync) problems when the buffer
2027		 * is released at command completion.  This is not an issue
2028		 * for write because write does not update the buffer.
2029		 * Primarily an issue on systems with more than 4GB of memory.
2030		 */
2031		sata_free_dma_resources(spkt);
2032	} else if (direction == SATA_DIR_WRITE) {
2033		NVLOG((NVDBG_DELIVER, nvc, nvp, "pio out command"));
2034		nv_slotp->nvslot_start = nv_start_pio_out;
2035		nv_slotp->nvslot_intr = nv_intr_pio_out;
2036		nv_slotp->nvslot_byte_count =
2037		    spkt->satapkt_cmd.satacmd_bp->b_bcount;
2038		nv_slotp->nvslot_v_addr =
2039		    spkt->satapkt_cmd.satacmd_bp->b_un.b_addr;
2040	} else {
2041		nv_cmn_err(CE_WARN, nvc, nvp, "malformed command: direction"
2042		    " %d cookies %d cmd %x",
2043		    sata_cmdp->satacmd_flags.sata_data_direction,
2044		    sata_cmdp->satacmd_num_dma_cookies,  cmd);
2045		spkt->satapkt_reason = SATA_PKT_CMD_UNSUPPORTED;
2046		ret = SATA_TRAN_CMD_UNSUPPORTED;
2047
2048		goto fail;
2049	}
2050
2051	if ((ret = (*nv_slotp->nvslot_start)(nvp, slot)) ==
2052	    SATA_TRAN_ACCEPTED) {
2053#ifdef SGPIO_SUPPORT
2054		nv_sgp_drive_active(nvp->nvp_ctlp,
2055		    (nvp->nvp_ctlp->nvc_ctlr_num * 2) + nvp->nvp_port_num);
2056#endif
2057		nv_slotp->nvslot_stime = ddi_get_lbolt();
2058
2059		/*
2060		 * start timer if it's not already running and this packet
2061		 * is not requesting polled mode.
2062		 */
2063		if ((nvp->nvp_timeout_id == 0) &&
2064		    ((spkt->satapkt_op_mode & SATA_OPMODE_POLLING) == 0)) {
2065			nvp->nvp_timeout_id = timeout(nv_timeout, (void *)nvp,
2066			    drv_usectohz(NV_ONE_SEC));
2067		}
2068
2069		return (SATA_TRAN_ACCEPTED);
2070	}
2071
2072	fail:
2073
2074	spkt->satapkt_reason = SATA_TRAN_PORT_ERROR;
2075
2076	if (ncq == NVSLOT_NCQ) {
2077		nvp->nvp_ncq_run--;
2078		nvp->nvp_sactive_cache &= ~on_bit;
2079	} else {
2080		nvp->nvp_non_ncq_run--;
2081	}
2082	nv_slotp->nvslot_spkt = NULL;
2083	nv_slotp->nvslot_flags = 0;
2084
2085	return (ret);
2086}
2087
2088
2089/*
2090 * Check if the signature is ready and if non-zero translate
2091 * it into a solaris sata defined type.
2092 */
2093static void
2094nv_read_signature(nv_port_t *nvp)
2095{
2096	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
2097
2098	nvp->nvp_signature = nv_get8(cmdhdl, nvp->nvp_count);
2099	nvp->nvp_signature |= (nv_get8(cmdhdl, nvp->nvp_sect) << 8);
2100	nvp->nvp_signature |= (nv_get8(cmdhdl, nvp->nvp_lcyl) << 16);
2101	nvp->nvp_signature |= (nv_get8(cmdhdl, nvp->nvp_hcyl) << 24);
2102
2103	switch (nvp->nvp_signature) {
2104
2105	case NV_SIG_DISK:
2106		NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp, "drive is a disk"));
2107		nvp->nvp_type = SATA_DTYPE_ATADISK;
2108		break;
2109	case NV_SIG_ATAPI:
2110		NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp,
2111		    "drive is an optical device"));
2112		nvp->nvp_type = SATA_DTYPE_ATAPICD;
2113		break;
2114	case NV_SIG_PM:
2115		NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp,
2116		    "device is a port multiplier"));
2117		nvp->nvp_type = SATA_DTYPE_PMULT;
2118		break;
2119	case NV_SIG_NOTREADY:
2120		NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp,
2121		    "signature not ready"));
2122		nvp->nvp_type = SATA_DTYPE_UNKNOWN;
2123		break;
2124	default:
2125		nv_cmn_err(CE_WARN, nvp->nvp_ctlp, nvp, "signature %X not"
2126		    " recognized", nvp->nvp_signature);
2127		nvp->nvp_type = SATA_DTYPE_UNKNOWN;
2128		break;
2129	}
2130
2131	if (nvp->nvp_signature) {
2132		nvp->nvp_state &= ~(NV_PORT_RESET_PROBE|NV_PORT_RESET);
2133	}
2134}
2135
2136
2137/*
2138 * Reset the port
2139 */
2140static void
2141nv_reset(nv_port_t *nvp)
2142{
2143	ddi_acc_handle_t bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5];
2144	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
2145	nv_ctl_t *nvc = nvp->nvp_ctlp;
2146	uint32_t sctrl;
2147
2148	NVLOG((NVDBG_ENTRY, nvc, nvp, "nv_reset()"));
2149
2150	ASSERT(mutex_owned(&nvp->nvp_mutex));
2151
2152	/*
2153	 * clear signature registers
2154	 */
2155	nv_put8(cmdhdl, nvp->nvp_sect, 0);
2156	nv_put8(cmdhdl, nvp->nvp_lcyl, 0);
2157	nv_put8(cmdhdl, nvp->nvp_hcyl, 0);
2158	nv_put8(cmdhdl, nvp->nvp_count, 0);
2159
2160	nvp->nvp_signature = 0;
2161	nvp->nvp_type = 0;
2162	nvp->nvp_state |= NV_PORT_RESET;
2163	nvp->nvp_reset_time = ddi_get_lbolt();
2164	nvp->nvp_link_lost_time = 0;
2165
2166	/*
2167	 * assert reset in PHY by writing a 1 to bit 0 scontrol
2168	 */
2169	sctrl = nv_get32(bar5_hdl, nvp->nvp_sctrl);
2170
2171	nv_put32(bar5_hdl, nvp->nvp_sctrl, sctrl | SCONTROL_DET_COMRESET);
2172
2173	/*
2174	 * wait 1ms
2175	 */
2176	drv_usecwait(1000);
2177
2178	/*
2179	 * de-assert reset in PHY
2180	 */
2181	nv_put32(bar5_hdl, nvp->nvp_sctrl, sctrl);
2182
2183	/*
2184	 * make sure timer is running
2185	 */
2186	if (nvp->nvp_timeout_id == 0) {
2187		nvp->nvp_timeout_id = timeout(nv_timeout, (void *)nvp,
2188		    drv_usectohz(NV_ONE_SEC));
2189	}
2190}
2191
2192
2193/*
2194 * Initialize register handling specific to mcp51/mcp55
2195 */
2196/* ARGSUSED */
2197static void
2198mcp5x_reg_init(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle)
2199{
2200	nv_port_t *nvp;
2201	uchar_t *bar5  = nvc->nvc_bar_addr[5];
2202	uint8_t off, port;
2203
2204	nvc->nvc_mcp5x_ctl = (uint32_t *)(bar5 + MCP5X_CTL);
2205	nvc->nvc_mcp5x_ncq = (uint32_t *)(bar5 + MCP5X_NCQ);
2206
2207	for (port = 0, off = 0; port < NV_MAX_PORTS(nvc); port++, off += 2) {
2208		nvp = &(nvc->nvc_port[port]);
2209		nvp->nvp_mcp5x_int_status =
2210		    (uint16_t *)(bar5 + MCP5X_INT_STATUS + off);
2211		nvp->nvp_mcp5x_int_ctl =
2212		    (uint16_t *)(bar5 + MCP5X_INT_CTL + off);
2213
2214		/*
2215		 * clear any previous interrupts asserted
2216		 */
2217		nv_put16(nvc->nvc_bar_hdl[5], nvp->nvp_mcp5x_int_status,
2218		    MCP5X_INT_CLEAR);
2219
2220		/*
2221		 * These are the interrupts to accept for now.  The spec
2222		 * says these are enable bits, but nvidia has indicated
2223		 * these are masking bits.  Even though they may be masked
2224		 * out to prevent asserting the main interrupt, they can
2225		 * still be asserted while reading the interrupt status
2226		 * register, so that needs to be considered in the interrupt
2227		 * handler.
2228		 */
2229		nv_put16(nvc->nvc_bar_hdl[5], nvp->nvp_mcp5x_int_ctl,
2230		    ~(MCP5X_INT_IGNORE));
2231	}
2232
2233	/*
2234	 * Allow the driver to program the BM on the first command instead
2235	 * of waiting for an interrupt.
2236	 */
2237#ifdef NCQ
2238	flags = MCP_SATA_AE_NCQ_PDEV_FIRST_CMD | MCP_SATA_AE_NCQ_SDEV_FIRST_CMD;
2239	nv_put32(nvc->nvc_bar_hdl[5], nvc->nvc_mcp5x_ncq, flags);
2240	flags = MCP_SATA_AE_CTL_PRI_SWNCQ | MCP_SATA_AE_CTL_SEC_SWNCQ;
2241	nv_put32(nvc->nvc_bar_hdl[5], nvc->nvc_mcp5x_ctl, flags);
2242#endif
2243
2244	/*
2245	 * mcp55 rev A03 and above supports 40-bit physical addressing.
2246	 * Enable DMA to take advantage of that.
2247	 *
2248	 */
2249	if (nvc->nvc_revid >= 0xa3) {
2250		if (nv_sata_40bit_dma == B_TRUE) {
2251			uint32_t reg32;
2252			NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp,
2253			    "rev id is %X and"
2254			    " is capable of 40-bit DMA addressing",
2255			    nvc->nvc_revid));
2256			nvc->dma_40bit = B_TRUE;
2257			reg32 = pci_config_get32(pci_conf_handle,
2258			    NV_SATA_CFG_20);
2259			pci_config_put32(pci_conf_handle, NV_SATA_CFG_20,
2260			    reg32 | NV_40BIT_PRD);
2261		} else {
2262			NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp,
2263			    "40-bit DMA disabled by nv_sata_40bit_dma"));
2264		}
2265	} else {
2266		nv_cmn_err(CE_NOTE, nvp->nvp_ctlp, nvp, "rev id is %X and is "
2267		    "not capable of 40-bit DMA addressing", nvc->nvc_revid);
2268	}
2269}
2270
2271
2272/*
2273 * Initialize register handling specific to ck804
2274 */
2275static void
2276ck804_reg_init(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle)
2277{
2278	uchar_t *bar5  = nvc->nvc_bar_addr[5];
2279	uint32_t reg32;
2280	uint16_t reg16;
2281	nv_port_t *nvp;
2282	int j;
2283
2284	/*
2285	 * delay hotplug interrupts until PHYRDY.
2286	 */
2287	reg32 = pci_config_get32(pci_conf_handle, NV_SATA_CFG_42);
2288	pci_config_put32(pci_conf_handle, NV_SATA_CFG_42,
2289	    reg32 | CK804_CFG_DELAY_HOTPLUG_INTR);
2290
2291	/*
2292	 * enable hot plug interrupts for channel x and y
2293	 */
2294	reg16 = nv_get16(nvc->nvc_bar_hdl[5],
2295	    (uint16_t *)(bar5 + NV_ADMACTL_X));
2296	nv_put16(nvc->nvc_bar_hdl[5], (uint16_t *)(bar5 + NV_ADMACTL_X),
2297	    NV_HIRQ_EN | reg16);
2298
2299
2300	reg16 = nv_get16(nvc->nvc_bar_hdl[5],
2301	    (uint16_t *)(bar5 + NV_ADMACTL_Y));
2302	nv_put16(nvc->nvc_bar_hdl[5], (uint16_t *)(bar5 + NV_ADMACTL_Y),
2303	    NV_HIRQ_EN | reg16);
2304
2305	nvc->nvc_ck804_int_status = (uint8_t *)(bar5 + CK804_SATA_INT_STATUS);
2306
2307	/*
2308	 * clear any existing interrupt pending then enable
2309	 */
2310	for (j = 0; j < NV_MAX_PORTS(nvc); j++) {
2311		nvp = &(nvc->nvc_port[j]);
2312		mutex_enter(&nvp->nvp_mutex);
2313		(*(nvp->nvp_ctlp->nvc_set_intr))(nvp,
2314		    NV_INTR_CLEAR_ALL|NV_INTR_ENABLE);
2315		mutex_exit(&nvp->nvp_mutex);
2316	}
2317}
2318
2319
2320/*
2321 * Initialize the controller and set up driver data structures.
2322 * determine if ck804 or mcp5x class.
2323 */
2324static int
2325nv_init_ctl(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle)
2326{
2327	struct sata_hba_tran stran;
2328	nv_port_t *nvp;
2329	int j, ck804;
2330	uchar_t *cmd_addr, *ctl_addr, *bm_addr;
2331	ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5];
2332	uchar_t *bar5  = nvc->nvc_bar_addr[5];
2333	uint32_t reg32;
2334	uint8_t reg8, reg8_save;
2335
2336	NVLOG((NVDBG_INIT, nvc, NULL, "nv_init_ctl entered"));
2337
2338	ck804 = B_TRUE;
2339#ifdef SGPIO_SUPPORT
2340	nvc->nvc_mcp5x_flag = B_FALSE;
2341#endif
2342
2343	/*
2344	 * Need to set bit 2 to 1 at config offset 0x50
2345	 * to enable access to the bar5 registers.
2346	 */
2347	reg32 = pci_config_get32(pci_conf_handle, NV_SATA_CFG_20);
2348	if (!(reg32 & NV_BAR5_SPACE_EN)) {
2349		pci_config_put32(pci_conf_handle, NV_SATA_CFG_20,
2350		    reg32 | NV_BAR5_SPACE_EN);
2351	}
2352
2353	/*
2354	 * Determine if this is ck804 or mcp5x.  ck804 will map in the
2355	 * task file registers into bar5 while mcp5x won't.  The offset of
2356	 * the task file registers in mcp5x's space is unused, so it will
2357	 * return zero.  So check one of the task file registers to see if it is
2358	 * writable and reads back what was written.  If it's mcp5x it will
2359	 * return back 0xff whereas ck804 will return the value written.
2360	 */
2361	reg8_save = nv_get8(bar5_hdl,
2362	    (uint8_t *)(bar5 + NV_BAR5_TRAN_LEN_CH_X));
2363
2364
2365	for (j = 1; j < 3; j++) {
2366
2367		nv_put8(bar5_hdl, (uint8_t *)(bar5 + NV_BAR5_TRAN_LEN_CH_X), j);
2368		reg8 = nv_get8(bar5_hdl,
2369		    (uint8_t *)(bar5 + NV_BAR5_TRAN_LEN_CH_X));
2370
2371		if (reg8 != j) {
2372			ck804 = B_FALSE;
2373			nvc->nvc_mcp5x_flag = B_TRUE;
2374			break;
2375		}
2376	}
2377
2378	nv_put8(bar5_hdl, (uint8_t *)(bar5 + NV_BAR5_TRAN_LEN_CH_X), reg8_save);
2379
2380	if (ck804 == B_TRUE) {
2381		NVLOG((NVDBG_INIT, nvc, NULL, "controller is CK804"));
2382		nvc->nvc_interrupt = ck804_intr;
2383		nvc->nvc_reg_init = ck804_reg_init;
2384		nvc->nvc_set_intr = ck804_set_intr;
2385	} else {
2386		NVLOG((NVDBG_INIT, nvc, NULL, "controller is MCP51/MCP55"));
2387		nvc->nvc_interrupt = mcp5x_intr;
2388		nvc->nvc_reg_init = mcp5x_reg_init;
2389		nvc->nvc_set_intr = mcp5x_set_intr;
2390	}
2391
2392
2393	stran.sata_tran_hba_rev = SATA_TRAN_HBA_REV_2;
2394	stran.sata_tran_hba_dip = nvc->nvc_dip;
2395	stran.sata_tran_hba_num_cports = NV_NUM_CPORTS;
2396	stran.sata_tran_hba_features_support =
2397	    SATA_CTLF_HOTPLUG | SATA_CTLF_ASN | SATA_CTLF_ATAPI;
2398	stran.sata_tran_hba_qdepth = NV_QUEUE_SLOTS;
2399	stran.sata_tran_probe_port = nv_sata_probe;
2400	stran.sata_tran_start = nv_sata_start;
2401	stran.sata_tran_abort = nv_sata_abort;
2402	stran.sata_tran_reset_dport = nv_sata_reset;
2403	stran.sata_tran_selftest = NULL;
2404	stran.sata_tran_hotplug_ops = &nv_hotplug_ops;
2405	stran.sata_tran_pwrmgt_ops = NULL;
2406	stran.sata_tran_ioctl = NULL;
2407	nvc->nvc_sata_hba_tran = stran;
2408
2409	nvc->nvc_port = kmem_zalloc(sizeof (nv_port_t) * NV_MAX_PORTS(nvc),
2410	    KM_SLEEP);
2411
2412	/*
2413	 * initialize registers common to all chipsets
2414	 */
2415	nv_common_reg_init(nvc);
2416
2417	for (j = 0; j < NV_MAX_PORTS(nvc); j++) {
2418		nvp = &(nvc->nvc_port[j]);
2419
2420		cmd_addr = nvp->nvp_cmd_addr;
2421		ctl_addr = nvp->nvp_ctl_addr;
2422		bm_addr = nvp->nvp_bm_addr;
2423
2424		mutex_init(&nvp->nvp_mutex, NULL, MUTEX_DRIVER,
2425		    DDI_INTR_PRI(nvc->nvc_intr_pri));
2426
2427		cv_init(&nvp->nvp_poll_cv, NULL, CV_DRIVER, NULL);
2428
2429		nvp->nvp_data	= cmd_addr + NV_DATA;
2430		nvp->nvp_error	= cmd_addr + NV_ERROR;
2431		nvp->nvp_feature = cmd_addr + NV_FEATURE;
2432		nvp->nvp_count	= cmd_addr + NV_COUNT;
2433		nvp->nvp_sect	= cmd_addr + NV_SECT;
2434		nvp->nvp_lcyl	= cmd_addr + NV_LCYL;
2435		nvp->nvp_hcyl	= cmd_addr + NV_HCYL;
2436		nvp->nvp_drvhd	= cmd_addr + NV_DRVHD;
2437		nvp->nvp_status	= cmd_addr + NV_STATUS;
2438		nvp->nvp_cmd	= cmd_addr + NV_CMD;
2439		nvp->nvp_altstatus = ctl_addr + NV_ALTSTATUS;
2440		nvp->nvp_devctl	= ctl_addr + NV_DEVCTL;
2441
2442		nvp->nvp_bmicx	= bm_addr + BMICX_REG;
2443		nvp->nvp_bmisx	= bm_addr + BMISX_REG;
2444		nvp->nvp_bmidtpx = (uint32_t *)(bm_addr + BMIDTPX_REG);
2445
2446		nvp->nvp_state = 0;
2447	}
2448
2449	/*
2450	 * initialize register by calling chip specific reg initialization
2451	 */
2452	(*(nvc->nvc_reg_init))(nvc, pci_conf_handle);
2453
2454	/* initialize the hba dma attribute */
2455	if (nvc->dma_40bit == B_TRUE)
2456		nvc->nvc_sata_hba_tran.sata_tran_hba_dma_attr =
2457		    &buffer_dma_40bit_attr;
2458	else
2459		nvc->nvc_sata_hba_tran.sata_tran_hba_dma_attr =
2460		    &buffer_dma_attr;
2461
2462	return (NV_SUCCESS);
2463}
2464
2465
2466/*
2467 * Initialize data structures with enough slots to handle queuing, if
2468 * enabled.  NV_QUEUE_SLOTS will be set to 1 or 32, depending on whether
2469 * NCQ support is built into the driver and enabled.  It might have been
2470 * better to derive the true size from the drive itself, but the sata
2471 * module only sends down that information on the first NCQ command,
2472 * which means possibly re-sizing the structures on an interrupt stack,
2473 * making error handling more messy.  The easy way is to just allocate
2474 * all 32 slots, which is what most drives support anyway.
2475 */
2476static int
2477nv_init_port(nv_port_t *nvp)
2478{
2479	nv_ctl_t *nvc = nvp->nvp_ctlp;
2480	size_t	prd_size = sizeof (prde_t) * NV_DMA_NSEGS;
2481	dev_info_t *dip = nvc->nvc_dip;
2482	ddi_device_acc_attr_t dev_attr;
2483	size_t buf_size;
2484	ddi_dma_cookie_t cookie;
2485	uint_t count;
2486	int rc, i;
2487
2488	dev_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
2489	dev_attr.devacc_attr_endian_flags = DDI_NEVERSWAP_ACC;
2490	dev_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
2491
2492	if (nvp->nvp_state & NV_PORT_INIT) {
2493		NVLOG((NVDBG_INIT, nvc, nvp,
2494		    "nv_init_port previously initialized"));
2495
2496		return (NV_SUCCESS);
2497	} else {
2498		NVLOG((NVDBG_INIT, nvc, nvp, "nv_init_port initializing"));
2499	}
2500
2501	nvp->nvp_sg_dma_hdl = kmem_zalloc(sizeof (ddi_dma_handle_t) *
2502	    NV_QUEUE_SLOTS, KM_SLEEP);
2503
2504	nvp->nvp_sg_acc_hdl = kmem_zalloc(sizeof (ddi_acc_handle_t) *
2505	    NV_QUEUE_SLOTS, KM_SLEEP);
2506
2507	nvp->nvp_sg_addr = kmem_zalloc(sizeof (caddr_t) *
2508	    NV_QUEUE_SLOTS, KM_SLEEP);
2509
2510	nvp->nvp_sg_paddr = kmem_zalloc(sizeof (uint32_t) *
2511	    NV_QUEUE_SLOTS, KM_SLEEP);
2512
2513	nvp->nvp_slot = kmem_zalloc(sizeof (nv_slot_t) * NV_QUEUE_SLOTS,
2514	    KM_SLEEP);
2515
2516	for (i = 0; i < NV_QUEUE_SLOTS; i++) {
2517
2518		rc = ddi_dma_alloc_handle(dip, &nv_prd_dma_attr,
2519		    DDI_DMA_SLEEP, NULL, &(nvp->nvp_sg_dma_hdl[i]));
2520
2521		if (rc != DDI_SUCCESS) {
2522			nv_uninit_port(nvp);
2523
2524			return (NV_FAILURE);
2525		}
2526
2527		rc = ddi_dma_mem_alloc(nvp->nvp_sg_dma_hdl[i], prd_size,
2528		    &dev_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
2529		    NULL, &(nvp->nvp_sg_addr[i]), &buf_size,
2530		    &(nvp->nvp_sg_acc_hdl[i]));
2531
2532		if (rc != DDI_SUCCESS) {
2533			nv_uninit_port(nvp);
2534
2535			return (NV_FAILURE);
2536		}
2537
2538		rc = ddi_dma_addr_bind_handle(nvp->nvp_sg_dma_hdl[i], NULL,
2539		    nvp->nvp_sg_addr[i], buf_size,
2540		    DDI_DMA_WRITE | DDI_DMA_CONSISTENT,
2541		    DDI_DMA_SLEEP, NULL, &cookie, &count);
2542
2543		if (rc != DDI_DMA_MAPPED) {
2544			nv_uninit_port(nvp);
2545
2546			return (NV_FAILURE);
2547		}
2548
2549		ASSERT(count == 1);
2550		ASSERT((cookie.dmac_address & (sizeof (int) - 1)) == 0);
2551
2552		ASSERT(cookie.dmac_laddress <= UINT32_MAX);
2553
2554		nvp->nvp_sg_paddr[i] = cookie.dmac_address;
2555	}
2556
2557	/*
2558	 * nvp_queue_depth represents the actual drive queue depth, not the
2559	 * number of slots allocated in the structures (which may be more).
2560	 * Actual queue depth is only learned after the first NCQ command, so
2561	 * initialize it to 1 for now.
2562	 */
2563	nvp->nvp_queue_depth = 1;
2564
2565	nvp->nvp_state |= NV_PORT_INIT;
2566
2567	return (NV_SUCCESS);
2568}
2569
2570
2571/*
2572 * Free dynamically allocated structures for port.
2573 */
2574static void
2575nv_uninit_port(nv_port_t *nvp)
2576{
2577	int i;
2578
2579	/*
2580	 * It is possible to reach here before a port has been initialized or
2581	 * after it has already been uninitialized.  Just return in that case.
2582	 */
2583	if (nvp->nvp_slot == NULL) {
2584
2585		return;
2586	}
2587
2588	NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp,
2589	    "nv_uninit_port uninitializing"));
2590
2591	nvp->nvp_type = SATA_DTYPE_NONE;
2592
2593	for (i = 0; i < NV_QUEUE_SLOTS; i++) {
2594		if (nvp->nvp_sg_paddr[i]) {
2595			(void) ddi_dma_unbind_handle(nvp->nvp_sg_dma_hdl[i]);
2596		}
2597
2598		if (nvp->nvp_sg_acc_hdl[i] != NULL) {
2599			ddi_dma_mem_free(&(nvp->nvp_sg_acc_hdl[i]));
2600		}
2601
2602		if (nvp->nvp_sg_dma_hdl[i] != NULL) {
2603			ddi_dma_free_handle(&(nvp->nvp_sg_dma_hdl[i]));
2604		}
2605	}
2606
2607	kmem_free(nvp->nvp_slot, sizeof (nv_slot_t) * NV_QUEUE_SLOTS);
2608	nvp->nvp_slot = NULL;
2609
2610	kmem_free(nvp->nvp_sg_dma_hdl,
2611	    sizeof (ddi_dma_handle_t) * NV_QUEUE_SLOTS);
2612	nvp->nvp_sg_dma_hdl = NULL;
2613
2614	kmem_free(nvp->nvp_sg_acc_hdl,
2615	    sizeof (ddi_acc_handle_t) * NV_QUEUE_SLOTS);
2616	nvp->nvp_sg_acc_hdl = NULL;
2617
2618	kmem_free(nvp->nvp_sg_addr, sizeof (caddr_t) * NV_QUEUE_SLOTS);
2619	nvp->nvp_sg_addr = NULL;
2620
2621	kmem_free(nvp->nvp_sg_paddr, sizeof (uint32_t) * NV_QUEUE_SLOTS);
2622	nvp->nvp_sg_paddr = NULL;
2623
2624	nvp->nvp_state &= ~NV_PORT_INIT;
2625	nvp->nvp_signature = 0;
2626}
2627
2628
2629/*
2630 * Cache register offsets and access handles to frequently accessed registers
2631 * which are common to either chipset.
2632 */
2633static void
2634nv_common_reg_init(nv_ctl_t *nvc)
2635{
2636	uchar_t *bar5_addr = nvc->nvc_bar_addr[5];
2637	uchar_t *bm_addr_offset, *sreg_offset;
2638	uint8_t bar, port;
2639	nv_port_t *nvp;
2640
2641	for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
2642		if (port == 0) {
2643			bar = NV_BAR_0;
2644			bm_addr_offset = 0;
2645			sreg_offset = (uchar_t *)(CH0_SREG_OFFSET + bar5_addr);
2646		} else {
2647			bar = NV_BAR_2;
2648			bm_addr_offset = (uchar_t *)8;
2649			sreg_offset = (uchar_t *)(CH1_SREG_OFFSET + bar5_addr);
2650		}
2651
2652		nvp = &(nvc->nvc_port[port]);
2653		nvp->nvp_ctlp = nvc;
2654		nvp->nvp_port_num = port;
2655		NVLOG((NVDBG_INIT, nvc, nvp, "setting up port mappings"));
2656
2657		nvp->nvp_cmd_hdl = nvc->nvc_bar_hdl[bar];
2658		nvp->nvp_cmd_addr = nvc->nvc_bar_addr[bar];
2659		nvp->nvp_ctl_hdl = nvc->nvc_bar_hdl[bar + 1];
2660		nvp->nvp_ctl_addr = nvc->nvc_bar_addr[bar + 1];
2661		nvp->nvp_bm_hdl = nvc->nvc_bar_hdl[NV_BAR_4];
2662		nvp->nvp_bm_addr = nvc->nvc_bar_addr[NV_BAR_4] +
2663		    (long)bm_addr_offset;
2664
2665		nvp->nvp_sstatus = (uint32_t *)(sreg_offset + NV_SSTATUS);
2666		nvp->nvp_serror = (uint32_t *)(sreg_offset + NV_SERROR);
2667		nvp->nvp_sactive = (uint32_t *)(sreg_offset + NV_SACTIVE);
2668		nvp->nvp_sctrl = (uint32_t *)(sreg_offset + NV_SCTRL);
2669	}
2670}
2671
2672
2673static void
2674nv_uninit_ctl(nv_ctl_t *nvc)
2675{
2676	int port;
2677	nv_port_t *nvp;
2678
2679	NVLOG((NVDBG_INIT, nvc, NULL, "nv_uninit_ctl entered"));
2680
2681	for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
2682		nvp = &(nvc->nvc_port[port]);
2683		mutex_enter(&nvp->nvp_mutex);
2684		NVLOG((NVDBG_INIT, nvc, nvp, "uninitializing port"));
2685		nv_uninit_port(nvp);
2686		mutex_exit(&nvp->nvp_mutex);
2687		mutex_destroy(&nvp->nvp_mutex);
2688		cv_destroy(&nvp->nvp_poll_cv);
2689	}
2690
2691	kmem_free(nvc->nvc_port, NV_MAX_PORTS(nvc) * sizeof (nv_port_t));
2692	nvc->nvc_port = NULL;
2693}
2694
2695
2696/*
2697 * ck804 interrupt.  This is a wrapper around ck804_intr_process so
2698 * that interrupts from other devices can be disregarded while dtracing.
2699 */
2700/* ARGSUSED */
2701static uint_t
2702ck804_intr(caddr_t arg1, caddr_t arg2)
2703{
2704	nv_ctl_t *nvc = (nv_ctl_t *)arg1;
2705	uint8_t intr_status;
2706	ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5];
2707
2708	if (nvc->nvc_state & NV_CTRL_SUSPEND)
2709		return (DDI_INTR_UNCLAIMED);
2710
2711	intr_status = ddi_get8(bar5_hdl, nvc->nvc_ck804_int_status);
2712
2713	if (intr_status == 0) {
2714
2715		return (DDI_INTR_UNCLAIMED);
2716	}
2717
2718	ck804_intr_process(nvc, intr_status);
2719
2720	return (DDI_INTR_CLAIMED);
2721}
2722
2723
2724/*
2725 * Main interrupt handler for ck804.  handles normal device
2726 * interrupts as well as port hot plug and remove interrupts.
2727 *
2728 */
2729static void
2730ck804_intr_process(nv_ctl_t *nvc, uint8_t intr_status)
2731{
2732
2733	int port, i;
2734	nv_port_t *nvp;
2735	nv_slot_t *nv_slotp;
2736	uchar_t	status;
2737	sata_pkt_t *spkt;
2738	uint8_t bmstatus, clear_bits;
2739	ddi_acc_handle_t bmhdl;
2740	int nvcleared = 0;
2741	ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5];
2742	uint32_t sstatus;
2743	int port_mask_hot[] = {
2744		CK804_INT_PDEV_HOT, CK804_INT_SDEV_HOT,
2745	};
2746	int port_mask_pm[] = {
2747		CK804_INT_PDEV_PM, CK804_INT_SDEV_PM,
2748	};
2749
2750	NVLOG((NVDBG_INTR, nvc, NULL,
2751	    "ck804_intr_process entered intr_status=%x", intr_status));
2752
2753	/*
2754	 * For command completion interrupt, explicit clear is not required.
2755	 * however, for the error cases explicit clear is performed.
2756	 */
2757	for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
2758
2759		int port_mask[] = {CK804_INT_PDEV_INT, CK804_INT_SDEV_INT};
2760
2761		if ((port_mask[port] & intr_status) == 0) {
2762			continue;
2763		}
2764
2765		NVLOG((NVDBG_INTR, nvc, NULL,
2766		    "ck804_intr_process interrupt on port %d", port));
2767
2768		nvp = &(nvc->nvc_port[port]);
2769
2770		mutex_enter(&nvp->nvp_mutex);
2771
2772		/*
2773		 * there was a corner case found where an interrupt
2774		 * arrived before nvp_slot was set.  Should
2775		 * probably should track down why that happens and try
2776		 * to eliminate that source and then get rid of this
2777		 * check.
2778		 */
2779		if (nvp->nvp_slot == NULL) {
2780			status = nv_get8(nvp->nvp_ctl_hdl, nvp->nvp_status);
2781			NVLOG((NVDBG_ALWAYS, nvc, nvp, "spurious interrupt "
2782			    "received before initialization "
2783			    "completed status=%x", status));
2784			mutex_exit(&nvp->nvp_mutex);
2785
2786			/*
2787			 * clear interrupt bits
2788			 */
2789			nv_put8(bar5_hdl, nvc->nvc_ck804_int_status,
2790			    port_mask[port]);
2791
2792			continue;
2793		}
2794
2795		if ((&(nvp->nvp_slot[0]))->nvslot_spkt == NULL)  {
2796			status = nv_get8(nvp->nvp_ctl_hdl, nvp->nvp_status);
2797			NVLOG((NVDBG_ALWAYS, nvc, nvp, "spurious interrupt "
2798			    " no command in progress status=%x", status));
2799			mutex_exit(&nvp->nvp_mutex);
2800
2801			/*
2802			 * clear interrupt bits
2803			 */
2804			nv_put8(bar5_hdl, nvc->nvc_ck804_int_status,
2805			    port_mask[port]);
2806
2807			continue;
2808		}
2809
2810		bmhdl = nvp->nvp_bm_hdl;
2811		bmstatus = nv_get8(bmhdl, nvp->nvp_bmisx);
2812
2813		if (!(bmstatus & BMISX_IDEINTS)) {
2814			mutex_exit(&nvp->nvp_mutex);
2815
2816			continue;
2817		}
2818
2819		status = nv_get8(nvp->nvp_ctl_hdl, nvp->nvp_altstatus);
2820
2821		if (status & SATA_STATUS_BSY) {
2822			mutex_exit(&nvp->nvp_mutex);
2823
2824			continue;
2825		}
2826
2827		nv_slotp = &(nvp->nvp_slot[0]);
2828
2829		ASSERT(nv_slotp);
2830
2831		spkt = nv_slotp->nvslot_spkt;
2832
2833		if (spkt == NULL) {
2834			mutex_exit(&nvp->nvp_mutex);
2835
2836			continue;
2837		}
2838
2839		(*nv_slotp->nvslot_intr)(nvp, nv_slotp);
2840
2841		nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
2842
2843		/*
2844		 * If there is no link cannot be certain about the completion
2845		 * of the packet, so abort it.
2846		 */
2847		if (nv_check_link((&spkt->satapkt_device)->
2848		    satadev_scr.sstatus) == B_FALSE) {
2849
2850			(void) nv_abort_active(nvp, NULL, SATA_PKT_PORT_ERROR);
2851
2852		} else if (nv_slotp->nvslot_flags == NVSLOT_COMPLETE) {
2853
2854			nv_complete_io(nvp, spkt, 0);
2855		}
2856
2857		mutex_exit(&nvp->nvp_mutex);
2858	}
2859
2860	/*
2861	 * ck804 often doesn't correctly distinguish hot add/remove
2862	 * interrupts.  Frequently both the ADD and the REMOVE bits
2863	 * are asserted, whether it was a remove or add.  Use sstatus
2864	 * to distinguish hot add from hot remove.
2865	 */
2866
2867	for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
2868		clear_bits = 0;
2869
2870		nvp = &(nvc->nvc_port[port]);
2871		mutex_enter(&nvp->nvp_mutex);
2872
2873		if ((port_mask_pm[port] & intr_status) != 0) {
2874			clear_bits = port_mask_pm[port];
2875			NVLOG((NVDBG_HOT, nvc, nvp,
2876			    "clearing PM interrupt bit: %x",
2877			    intr_status & port_mask_pm[port]));
2878		}
2879
2880		if ((port_mask_hot[port] & intr_status) == 0) {
2881			if (clear_bits != 0) {
2882				goto clear;
2883			} else {
2884				mutex_exit(&nvp->nvp_mutex);
2885				continue;
2886			}
2887		}
2888
2889		/*
2890		 * reaching here means there was a hot add or remove.
2891		 */
2892		clear_bits |= port_mask_hot[port];
2893
2894		ASSERT(nvc->nvc_port[port].nvp_sstatus);
2895
2896		sstatus = nv_get32(bar5_hdl,
2897		    nvc->nvc_port[port].nvp_sstatus);
2898
2899		if ((sstatus & SSTATUS_DET_DEVPRE_PHYCOM) ==
2900		    SSTATUS_DET_DEVPRE_PHYCOM) {
2901			nv_report_add_remove(nvp, 0);
2902		} else {
2903			nv_report_add_remove(nvp, NV_PORT_HOTREMOVED);
2904		}
2905	clear:
2906		/*
2907		 * clear interrupt bits.  explicit interrupt clear is
2908		 * required for hotplug interrupts.
2909		 */
2910		nv_put8(bar5_hdl, nvc->nvc_ck804_int_status, clear_bits);
2911
2912		/*
2913		 * make sure it's flushed and cleared.  If not try
2914		 * again.  Sometimes it has been observed to not clear
2915		 * on the first try.
2916		 */
2917		intr_status = nv_get8(bar5_hdl, nvc->nvc_ck804_int_status);
2918
2919		/*
2920		 * make 10 additional attempts to clear the interrupt
2921		 */
2922		for (i = 0; (intr_status & clear_bits) && (i < 10); i++) {
2923			NVLOG((NVDBG_ALWAYS, nvc, nvp, "inst_status=%x "
2924			    "still not clear try=%d", intr_status,
2925			    ++nvcleared));
2926			nv_put8(bar5_hdl, nvc->nvc_ck804_int_status,
2927			    clear_bits);
2928			intr_status = nv_get8(bar5_hdl,
2929			    nvc->nvc_ck804_int_status);
2930		}
2931
2932		/*
2933		 * if still not clear, log a message and disable the
2934		 * port. highly unlikely that this path is taken, but it
2935		 * gives protection against a wedged interrupt.
2936		 */
2937		if (intr_status & clear_bits) {
2938			(*(nvc->nvc_set_intr))(nvp, NV_INTR_DISABLE);
2939			nv_port_state_change(nvp, SATA_EVNT_PORT_FAILED,
2940			    SATA_ADDR_CPORT, SATA_PSTATE_FAILED);
2941			nvp->nvp_state |= NV_PORT_FAILED;
2942			(void) nv_abort_active(nvp, NULL, SATA_PKT_DEV_ERROR);
2943			nv_cmn_err(CE_WARN, nvc, nvp, "unable to clear "
2944			    "interrupt.  disabling port intr_status=%X",
2945			    intr_status);
2946		}
2947
2948		mutex_exit(&nvp->nvp_mutex);
2949	}
2950}
2951
2952
2953/*
2954 * Interrupt handler for mcp5x.  It is invoked by the wrapper for each port
2955 * on the controller, to handle completion and hot plug and remove events.
2956 *
2957 */
2958static uint_t
2959mcp5x_intr_port(nv_port_t *nvp)
2960{
2961	nv_ctl_t *nvc = nvp->nvp_ctlp;
2962	ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5];
2963	uint8_t clear = 0, intr_cycles = 0;
2964	int ret = DDI_INTR_UNCLAIMED;
2965	uint16_t int_status;
2966
2967	NVLOG((NVDBG_INTR, nvc, nvp, "mcp5x_intr_port entered"));
2968
2969	for (;;) {
2970		/*
2971		 * read current interrupt status
2972		 */
2973		int_status = nv_get16(bar5_hdl, nvp->nvp_mcp5x_int_status);
2974
2975		NVLOG((NVDBG_INTR, nvc, nvp, "int_status = %x", int_status));
2976
2977		/*
2978		 * MCP5X_INT_IGNORE interrupts will show up in the status,
2979		 * but are masked out from causing an interrupt to be generated
2980		 * to the processor.  Ignore them here by masking them out.
2981		 */
2982		int_status &= ~(MCP5X_INT_IGNORE);
2983
2984		/*
2985		 * exit the loop when no more interrupts to process
2986		 */
2987		if (int_status == 0) {
2988
2989			break;
2990		}
2991
2992		if (int_status & MCP5X_INT_COMPLETE) {
2993			NVLOG((NVDBG_INTR, nvc, nvp,
2994			    "mcp5x_packet_complete_intr"));
2995			/*
2996			 * since int_status was set, return DDI_INTR_CLAIMED
2997			 * from the DDI's perspective even though the packet
2998			 * completion may not have succeeded.  If it fails,
2999			 * need to manually clear the interrupt, otherwise
3000			 * clearing is implicit.
3001			 */
3002			ret = DDI_INTR_CLAIMED;
3003			if (mcp5x_packet_complete_intr(nvc, nvp) ==
3004			    NV_FAILURE) {
3005				clear = MCP5X_INT_COMPLETE;
3006			} else {
3007				intr_cycles = 0;
3008			}
3009		}
3010
3011		if (int_status & MCP5X_INT_DMA_SETUP) {
3012			NVLOG((NVDBG_INTR, nvc, nvp, "mcp5x_dma_setup_intr"));
3013
3014			/*
3015			 * Needs to be cleared before starting the BM, so do it
3016			 * now.  make sure this is still working.
3017			 */
3018			nv_put16(bar5_hdl, nvp->nvp_mcp5x_int_status,
3019			    MCP5X_INT_DMA_SETUP);
3020#ifdef NCQ
3021			ret = mcp5x_dma_setup_intr(nvc, nvp);
3022#endif
3023		}
3024
3025		if (int_status & MCP5X_INT_REM) {
3026			NVLOG((NVDBG_INTR, nvc, nvp, "mcp5x device removed"));
3027			clear = MCP5X_INT_REM;
3028			ret = DDI_INTR_CLAIMED;
3029
3030			mutex_enter(&nvp->nvp_mutex);
3031			nv_report_add_remove(nvp, NV_PORT_HOTREMOVED);
3032			mutex_exit(&nvp->nvp_mutex);
3033
3034		} else if (int_status & MCP5X_INT_ADD) {
3035			NVLOG((NVDBG_HOT, nvc, nvp, "mcp5x device added"));
3036			clear = MCP5X_INT_ADD;
3037			ret = DDI_INTR_CLAIMED;
3038
3039			mutex_enter(&nvp->nvp_mutex);
3040			nv_report_add_remove(nvp, 0);
3041			mutex_exit(&nvp->nvp_mutex);
3042		}
3043
3044		if (clear) {
3045			nv_put16(bar5_hdl, nvp->nvp_mcp5x_int_status, clear);
3046			clear = 0;
3047		}
3048
3049		if (intr_cycles++ == NV_MAX_INTR_LOOP) {
3050			nv_cmn_err(CE_WARN, nvc, nvp, "excessive interrupt "
3051			    "processing.  Disabling port int_status=%X"
3052			    " clear=%X", int_status, clear);
3053			mutex_enter(&nvp->nvp_mutex);
3054			(*(nvc->nvc_set_intr))(nvp, NV_INTR_DISABLE);
3055			nv_port_state_change(nvp, SATA_EVNT_PORT_FAILED,
3056			    SATA_ADDR_CPORT, SATA_PSTATE_FAILED);
3057			nvp->nvp_state |= NV_PORT_FAILED;
3058			(void) nv_abort_active(nvp, NULL, SATA_PKT_DEV_ERROR);
3059			mutex_exit(&nvp->nvp_mutex);
3060		}
3061	}
3062
3063	NVLOG((NVDBG_INTR, nvc, nvp, "mcp5x_intr_port: finished ret=%d", ret));
3064
3065	return (ret);
3066}
3067
3068
3069/* ARGSUSED */
3070static uint_t
3071mcp5x_intr(caddr_t arg1, caddr_t arg2)
3072{
3073	nv_ctl_t *nvc = (nv_ctl_t *)arg1;
3074	int ret;
3075
3076	if (nvc->nvc_state & NV_CTRL_SUSPEND)
3077		return (DDI_INTR_UNCLAIMED);
3078
3079	ret = mcp5x_intr_port(&(nvc->nvc_port[0]));
3080	ret |= mcp5x_intr_port(&(nvc->nvc_port[1]));
3081
3082	return (ret);
3083}
3084
3085
3086#ifdef NCQ
3087/*
3088 * with software driven NCQ on mcp5x, an interrupt occurs right
3089 * before the drive is ready to do a DMA transfer.  At this point,
3090 * the PRD table needs to be programmed and the DMA engine enabled
3091 * and ready to go.
3092 *
3093 * -- MCP_SATA_AE_INT_STATUS_SDEV_DMA_SETUP indicates the interrupt
3094 * -- MCP_SATA_AE_NCQ_PDEV_DMA_SETUP_TAG shows which command is ready
3095 * -- clear bit 0 of master command reg
3096 * -- program PRD
3097 * -- clear the interrupt status bit for the DMA Setup FIS
3098 * -- set bit 0 of the bus master command register
3099 */
3100static int
3101mcp5x_dma_setup_intr(nv_ctl_t *nvc, nv_port_t *nvp)
3102{
3103	int slot;
3104	ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl;
3105	uint8_t bmicx;
3106	int port = nvp->nvp_port_num;
3107	uint8_t tag_shift[] = {MCP_SATA_AE_NCQ_PDEV_DMA_SETUP_TAG_SHIFT,
3108	    MCP_SATA_AE_NCQ_SDEV_DMA_SETUP_TAG_SHIFT};
3109
3110	nv_cmn_err(CE_PANIC, nvc, nvp,
3111	    "this is should not be executed at all until NCQ");
3112
3113	mutex_enter(&nvp->nvp_mutex);
3114
3115	slot = nv_get32(nvc->nvc_bar_hdl[5], nvc->nvc_mcp5x_ncq);
3116
3117	slot = (slot >> tag_shift[port]) & MCP_SATA_AE_NCQ_DMA_SETUP_TAG_MASK;
3118
3119	NVLOG((NVDBG_INTR, nvc, nvp, "mcp5x_dma_setup_intr slot %d"
3120	    " nvp_slot_sactive %X", slot, nvp->nvp_sactive_cache));
3121
3122	/*
3123	 * halt the DMA engine.  This step is necessary according to
3124	 * the mcp5x spec, probably since there may have been a "first" packet
3125	 * that already programmed the DMA engine, but may not turn out to
3126	 * be the first one processed.
3127	 */
3128	bmicx = nv_get8(bmhdl, nvp->nvp_bmicx);
3129
3130#if 0
3131	if (bmicx & BMICX_SSBM) {
3132		NVLOG((NVDBG_INTR, nvc, nvp, "BM was already enabled for "
3133		    "another packet.  Cancelling and reprogramming"));
3134		nv_put8(bmhdl, nvp->nvp_bmicx,  bmicx & ~BMICX_SSBM);
3135	}
3136#endif
3137	nv_put8(bmhdl, nvp->nvp_bmicx,  bmicx & ~BMICX_SSBM);
3138
3139	nv_start_dma_engine(nvp, slot);
3140
3141	mutex_exit(&nvp->nvp_mutex);
3142
3143	return (DDI_INTR_CLAIMED);
3144}
3145#endif /* NCQ */
3146
3147
3148/*
3149 * packet completion interrupt.  If the packet is complete, invoke
3150 * the packet completion callback.
3151 */
3152static int
3153mcp5x_packet_complete_intr(nv_ctl_t *nvc, nv_port_t *nvp)
3154{
3155	uint8_t status, bmstatus;
3156	ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl;
3157	int sactive;
3158	int active_pkt_bit = 0, active_pkt = 0, ncq_command = B_FALSE;
3159	sata_pkt_t *spkt;
3160	nv_slot_t *nv_slotp;
3161
3162	mutex_enter(&nvp->nvp_mutex);
3163
3164	bmstatus = nv_get8(bmhdl, nvp->nvp_bmisx);
3165
3166	if (!(bmstatus & BMISX_IDEINTS)) {
3167		NVLOG((NVDBG_INTR, nvc, nvp, "BMISX_IDEINTS not set"));
3168		mutex_exit(&nvp->nvp_mutex);
3169
3170		return (NV_FAILURE);
3171	}
3172
3173	/*
3174	 * If the just completed item is a non-ncq command, the busy
3175	 * bit should not be set
3176	 */
3177	if (nvp->nvp_non_ncq_run) {
3178		status = nv_get8(nvp->nvp_ctl_hdl, nvp->nvp_altstatus);
3179		if (status & SATA_STATUS_BSY) {
3180			nv_cmn_err(CE_WARN, nvc, nvp,
3181			    "unexpected SATA_STATUS_BSY set");
3182			mutex_exit(&nvp->nvp_mutex);
3183			/*
3184			 * calling function will clear interrupt.  then
3185			 * the real interrupt will either arrive or the
3186			 * packet timeout handling will take over and
3187			 * reset.
3188			 */
3189			return (NV_FAILURE);
3190		}
3191
3192	} else {
3193		/*
3194		 * NCQ check for BSY here and wait if still bsy before
3195		 * continuing. Rather than wait for it to be cleared
3196		 * when starting a packet and wasting CPU time, the starting
3197		 * thread can exit immediate, but might have to spin here
3198		 * for a bit possibly.  Needs more work and experimentation.
3199		 */
3200		ASSERT(nvp->nvp_ncq_run);
3201	}
3202
3203
3204	if (nvp->nvp_ncq_run) {
3205		ncq_command = B_TRUE;
3206		ASSERT(nvp->nvp_non_ncq_run == 0);
3207	} else {
3208		ASSERT(nvp->nvp_non_ncq_run != 0);
3209	}
3210
3211	/*
3212	 * active_pkt_bit will represent the bitmap of the single completed
3213	 * packet.  Because of the nature of sw assisted NCQ, only one
3214	 * command will complete per interrupt.
3215	 */
3216
3217	if (ncq_command == B_FALSE) {
3218		active_pkt = 0;
3219	} else {
3220		/*
3221		 * NCQ: determine which command just completed, by examining
3222		 * which bit cleared in the register since last written.
3223		 */
3224		sactive = nv_get32(nvc->nvc_bar_hdl[5], nvp->nvp_sactive);
3225
3226		active_pkt_bit = ~sactive & nvp->nvp_sactive_cache;
3227
3228		ASSERT(active_pkt_bit);
3229
3230
3231		/*
3232		 * this failure path needs more work to handle the
3233		 * error condition and recovery.
3234		 */
3235		if (active_pkt_bit == 0) {
3236			ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
3237
3238			nv_cmn_err(CE_CONT, nvc, nvp, "ERROR sactive = %X  "
3239			    "nvp->nvp_sactive %X", sactive,
3240			    nvp->nvp_sactive_cache);
3241
3242			(void) nv_get8(cmdhdl, nvp->nvp_status);
3243
3244			mutex_exit(&nvp->nvp_mutex);
3245
3246			return (NV_FAILURE);
3247		}
3248
3249		for (active_pkt = 0; (active_pkt_bit & 0x1) != 0x1;
3250		    active_pkt++, active_pkt_bit >>= 1) {
3251		}
3252
3253		/*
3254		 * make sure only one bit is ever turned on
3255		 */
3256		ASSERT(active_pkt_bit == 1);
3257
3258		nvp->nvp_sactive_cache &= ~(0x01 << active_pkt);
3259	}
3260
3261	nv_slotp = &(nvp->nvp_slot[active_pkt]);
3262
3263	spkt = nv_slotp->nvslot_spkt;
3264
3265	ASSERT(spkt != NULL);
3266
3267	(*nv_slotp->nvslot_intr)(nvp, nv_slotp);
3268
3269	nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
3270
3271	/*
3272	 * If there is no link cannot be certain about the completion
3273	 * of the packet, so abort it.
3274	 */
3275	if (nv_check_link((&spkt->satapkt_device)->
3276	    satadev_scr.sstatus) == B_FALSE) {
3277		(void) nv_abort_active(nvp, NULL, SATA_PKT_PORT_ERROR);
3278
3279	} else if (nv_slotp->nvslot_flags == NVSLOT_COMPLETE) {
3280
3281		nv_complete_io(nvp, spkt, active_pkt);
3282	}
3283
3284	mutex_exit(&nvp->nvp_mutex);
3285
3286	return (NV_SUCCESS);
3287}
3288
3289
3290static void
3291nv_complete_io(nv_port_t *nvp, sata_pkt_t *spkt, int slot)
3292{
3293
3294	ASSERT(MUTEX_HELD(&nvp->nvp_mutex));
3295
3296	if ((&(nvp->nvp_slot[slot]))->nvslot_flags & NVSLOT_NCQ) {
3297		nvp->nvp_ncq_run--;
3298	} else {
3299		nvp->nvp_non_ncq_run--;
3300	}
3301
3302	/*
3303	 * mark the packet slot idle so it can be reused.  Do this before
3304	 * calling satapkt_comp so the slot can be reused.
3305	 */
3306	(&(nvp->nvp_slot[slot]))->nvslot_spkt = NULL;
3307
3308	if (spkt->satapkt_op_mode & SATA_OPMODE_SYNCH) {
3309		/*
3310		 * If this is not timed polled mode cmd, which has an
3311		 * active thread monitoring for completion, then need
3312		 * to signal the sleeping thread that the cmd is complete.
3313		 */
3314		if ((spkt->satapkt_op_mode & SATA_OPMODE_POLLING) == 0) {
3315			cv_signal(&nvp->nvp_poll_cv);
3316		}
3317
3318		return;
3319	}
3320
3321	if (spkt->satapkt_comp != NULL) {
3322		mutex_exit(&nvp->nvp_mutex);
3323		(*spkt->satapkt_comp)(spkt);
3324		mutex_enter(&nvp->nvp_mutex);
3325	}
3326}
3327
3328
3329/*
3330 * check whether packet is ncq command or not.  for ncq command,
3331 * start it if there is still room on queue.  for non-ncq command only
3332 * start if no other command is running.
3333 */
3334static int
3335nv_start_async(nv_port_t *nvp, sata_pkt_t *spkt)
3336{
3337	uint8_t cmd, ncq;
3338
3339	NVLOG((NVDBG_ENTRY, nvp->nvp_ctlp, nvp, "nv_start_async: entry"));
3340
3341	cmd = spkt->satapkt_cmd.satacmd_cmd_reg;
3342
3343	ncq = ((cmd == SATAC_WRITE_FPDMA_QUEUED) ||
3344	    (cmd == SATAC_READ_FPDMA_QUEUED));
3345
3346	if (ncq == B_FALSE) {
3347
3348		if ((nvp->nvp_non_ncq_run == 1) ||
3349		    (nvp->nvp_ncq_run > 0)) {
3350			/*
3351			 * next command is non-ncq which can't run
3352			 * concurrently.  exit and return queue full.
3353			 */
3354			spkt->satapkt_reason = SATA_PKT_QUEUE_FULL;
3355
3356			return (SATA_TRAN_QUEUE_FULL);
3357		}
3358
3359		return (nv_start_common(nvp, spkt));
3360	}
3361
3362	/*
3363	 * ncq == B_TRUE
3364	 */
3365	if (nvp->nvp_non_ncq_run == 1) {
3366		/*
3367		 * cannot start any NCQ commands when there
3368		 * is a non-NCQ command running.
3369		 */
3370		spkt->satapkt_reason = SATA_PKT_QUEUE_FULL;
3371
3372		return (SATA_TRAN_QUEUE_FULL);
3373	}
3374
3375#ifdef NCQ
3376	/*
3377	 * this is not compiled for now as satapkt_device.satadev_qdepth
3378	 * is being pulled out until NCQ support is later addressed
3379	 *
3380	 * nvp_queue_depth is initialized by the first NCQ command
3381	 * received.
3382	 */
3383	if (nvp->nvp_queue_depth == 1) {
3384		nvp->nvp_queue_depth =
3385		    spkt->satapkt_device.satadev_qdepth;
3386
3387		ASSERT(nvp->nvp_queue_depth > 1);
3388
3389		NVLOG((NVDBG_ENTRY, nvp->nvp_ctlp, nvp,
3390		    "nv_process_queue: nvp_queue_depth set to %d",
3391		    nvp->nvp_queue_depth));
3392	}
3393#endif
3394
3395	if (nvp->nvp_ncq_run >= nvp->nvp_queue_depth) {
3396		/*
3397		 * max number of NCQ commands already active
3398		 */
3399		spkt->satapkt_reason = SATA_PKT_QUEUE_FULL;
3400
3401		return (SATA_TRAN_QUEUE_FULL);
3402	}
3403
3404	return (nv_start_common(nvp, spkt));
3405}
3406
3407
3408/*
3409 * configure INTx and legacy interrupts
3410 */
3411static int
3412nv_add_legacy_intrs(nv_ctl_t *nvc)
3413{
3414	dev_info_t	*devinfo = nvc->nvc_dip;
3415	int		actual, count = 0;
3416	int		x, y, rc, inum = 0;
3417
3418	NVLOG((NVDBG_ENTRY, nvc, NULL, "nv_add_legacy_intrs"));
3419
3420	/*
3421	 * get number of interrupts
3422	 */
3423	rc = ddi_intr_get_nintrs(devinfo, DDI_INTR_TYPE_FIXED, &count);
3424	if ((rc != DDI_SUCCESS) || (count == 0)) {
3425		NVLOG((NVDBG_INTR, nvc, NULL,
3426		    "ddi_intr_get_nintrs() failed, "
3427		    "rc %d count %d", rc, count));
3428
3429		return (DDI_FAILURE);
3430	}
3431
3432	/*
3433	 * allocate an array of interrupt handles
3434	 */
3435	nvc->nvc_intr_size = count * sizeof (ddi_intr_handle_t);
3436	nvc->nvc_htable = kmem_zalloc(nvc->nvc_intr_size, KM_SLEEP);
3437
3438	/*
3439	 * call ddi_intr_alloc()
3440	 */
3441	rc = ddi_intr_alloc(devinfo, nvc->nvc_htable, DDI_INTR_TYPE_FIXED,
3442	    inum, count, &actual, DDI_INTR_ALLOC_STRICT);
3443
3444	if ((rc != DDI_SUCCESS) || (actual == 0)) {
3445		nv_cmn_err(CE_WARN, nvc, NULL,
3446		    "ddi_intr_alloc() failed, rc %d", rc);
3447		kmem_free(nvc->nvc_htable, nvc->nvc_intr_size);
3448
3449		return (DDI_FAILURE);
3450	}
3451
3452	if (actual < count) {
3453		nv_cmn_err(CE_WARN, nvc, NULL,
3454		    "ddi_intr_alloc: requested: %d, received: %d",
3455		    count, actual);
3456
3457		goto failure;
3458	}
3459
3460	nvc->nvc_intr_cnt = actual;
3461
3462	/*
3463	 * get intr priority
3464	 */
3465	if (ddi_intr_get_pri(nvc->nvc_htable[0], &nvc->nvc_intr_pri) !=
3466	    DDI_SUCCESS) {
3467		nv_cmn_err(CE_WARN, nvc, NULL, "ddi_intr_get_pri() failed");
3468
3469		goto failure;
3470	}
3471
3472	/*
3473	 * Test for high level mutex
3474	 */
3475	if (nvc->nvc_intr_pri >= ddi_intr_get_hilevel_pri()) {
3476		nv_cmn_err(CE_WARN, nvc, NULL,
3477		    "nv_add_legacy_intrs: high level intr not supported");
3478
3479		goto failure;
3480	}
3481
3482	for (x = 0; x < actual; x++) {
3483		if (ddi_intr_add_handler(nvc->nvc_htable[x],
3484		    nvc->nvc_interrupt, (caddr_t)nvc, NULL) != DDI_SUCCESS) {
3485			nv_cmn_err(CE_WARN, nvc, NULL,
3486			    "ddi_intr_add_handler() failed");
3487
3488			goto failure;
3489		}
3490	}
3491
3492	/*
3493	 * call ddi_intr_enable() for legacy interrupts
3494	 */
3495	for (x = 0; x < nvc->nvc_intr_cnt; x++) {
3496		(void) ddi_intr_enable(nvc->nvc_htable[x]);
3497	}
3498
3499	return (DDI_SUCCESS);
3500
3501	failure:
3502	/*
3503	 * free allocated intr and nvc_htable
3504	 */
3505	for (y = 0; y < actual; y++) {
3506		(void) ddi_intr_free(nvc->nvc_htable[y]);
3507	}
3508
3509	kmem_free(nvc->nvc_htable, nvc->nvc_intr_size);
3510
3511	return (DDI_FAILURE);
3512}
3513
3514#ifdef	NV_MSI_SUPPORTED
3515/*
3516 * configure MSI interrupts
3517 */
3518static int
3519nv_add_msi_intrs(nv_ctl_t *nvc)
3520{
3521	dev_info_t	*devinfo = nvc->nvc_dip;
3522	int		count, avail, actual;
3523	int		x, y, rc, inum = 0;
3524
3525	NVLOG((NVDBG_ENTRY, nvc, NULL, "nv_add_msi_intrs"));
3526
3527	/*
3528	 * get number of interrupts
3529	 */
3530	rc = ddi_intr_get_nintrs(devinfo, DDI_INTR_TYPE_MSI, &count);
3531	if ((rc != DDI_SUCCESS) || (count == 0)) {
3532		nv_cmn_err(CE_WARN, nvc, NULL,
3533		    "ddi_intr_get_nintrs() failed, "
3534		    "rc %d count %d", rc, count);
3535
3536		return (DDI_FAILURE);
3537	}
3538
3539	/*
3540	 * get number of available interrupts
3541	 */
3542	rc = ddi_intr_get_navail(devinfo, DDI_INTR_TYPE_MSI, &avail);
3543	if ((rc != DDI_SUCCESS) || (avail == 0)) {
3544		nv_cmn_err(CE_WARN, nvc, NULL,
3545		    "ddi_intr_get_navail() failed, "
3546		    "rc %d avail %d", rc, avail);
3547
3548		return (DDI_FAILURE);
3549	}
3550
3551	if (avail < count) {
3552		nv_cmn_err(CE_WARN, nvc, NULL,
3553		    "ddi_intr_get_nvail returned %d ddi_intr_get_nintrs: %d",
3554		    avail, count);
3555	}
3556
3557	/*
3558	 * allocate an array of interrupt handles
3559	 */
3560	nvc->nvc_intr_size = count * sizeof (ddi_intr_handle_t);
3561	nvc->nvc_htable = kmem_alloc(nvc->nvc_intr_size, KM_SLEEP);
3562
3563	rc = ddi_intr_alloc(devinfo, nvc->nvc_htable, DDI_INTR_TYPE_MSI,
3564	    inum, count, &actual, DDI_INTR_ALLOC_NORMAL);
3565
3566	if ((rc != DDI_SUCCESS) || (actual == 0)) {
3567		nv_cmn_err(CE_WARN, nvc, NULL,
3568		    "ddi_intr_alloc() failed, rc %d", rc);
3569		kmem_free(nvc->nvc_htable, nvc->nvc_intr_size);
3570
3571		return (DDI_FAILURE);
3572	}
3573
3574	/*
3575	 * Use interrupt count returned or abort?
3576	 */
3577	if (actual < count) {
3578		NVLOG((NVDBG_INIT, nvc, NULL,
3579		    "Requested: %d, Received: %d", count, actual));
3580	}
3581
3582	nvc->nvc_intr_cnt = actual;
3583
3584	/*
3585	 * get priority for first msi, assume remaining are all the same
3586	 */
3587	if (ddi_intr_get_pri(nvc->nvc_htable[0], &nvc->nvc_intr_pri) !=
3588	    DDI_SUCCESS) {
3589		nv_cmn_err(CE_WARN, nvc, NULL, "ddi_intr_get_pri() failed");
3590
3591		goto failure;
3592	}
3593
3594	/*
3595	 * test for high level mutex
3596	 */
3597	if (nvc->nvc_intr_pri >= ddi_intr_get_hilevel_pri()) {
3598		nv_cmn_err(CE_WARN, nvc, NULL,
3599		    "nv_add_msi_intrs: high level intr not supported");
3600
3601		goto failure;
3602	}
3603
3604	/*
3605	 * Call ddi_intr_add_handler()
3606	 */
3607	for (x = 0; x < actual; x++) {
3608		if (ddi_intr_add_handler(nvc->nvc_htable[x],
3609		    nvc->nvc_interrupt, (caddr_t)nvc, NULL) != DDI_SUCCESS) {
3610			nv_cmn_err(CE_WARN, nvc, NULL,
3611			    "ddi_intr_add_handler() failed");
3612
3613			goto failure;
3614		}
3615	}
3616
3617	(void) ddi_intr_get_cap(nvc->nvc_htable[0], &nvc->nvc_intr_cap);
3618
3619	if (nvc->nvc_intr_cap & DDI_INTR_FLAG_BLOCK) {
3620		(void) ddi_intr_block_enable(nvc->nvc_htable,
3621		    nvc->nvc_intr_cnt);
3622	} else {
3623		/*
3624		 * Call ddi_intr_enable() for MSI non block enable
3625		 */
3626		for (x = 0; x < nvc->nvc_intr_cnt; x++) {
3627			(void) ddi_intr_enable(nvc->nvc_htable[x]);
3628		}
3629	}
3630
3631	return (DDI_SUCCESS);
3632
3633	failure:
3634	/*
3635	 * free allocated intr and nvc_htable
3636	 */
3637	for (y = 0; y < actual; y++) {
3638		(void) ddi_intr_free(nvc->nvc_htable[y]);
3639	}
3640
3641	kmem_free(nvc->nvc_htable, nvc->nvc_intr_size);
3642
3643	return (DDI_FAILURE);
3644}
3645#endif
3646
3647
3648static void
3649nv_rem_intrs(nv_ctl_t *nvc)
3650{
3651	int x, i;
3652	nv_port_t *nvp;
3653
3654	NVLOG((NVDBG_ENTRY, nvc, NULL, "nv_rem_intrs"));
3655
3656	/*
3657	 * prevent controller from generating interrupts by
3658	 * masking them out.  This is an extra precaution.
3659	 */
3660	for (i = 0; i < NV_MAX_PORTS(nvc); i++) {
3661		nvp = (&nvc->nvc_port[i]);
3662		mutex_enter(&nvp->nvp_mutex);
3663		(*(nvc->nvc_set_intr))(nvp, NV_INTR_DISABLE);
3664		mutex_exit(&nvp->nvp_mutex);
3665	}
3666
3667	/*
3668	 * disable all interrupts
3669	 */
3670	if ((nvc->nvc_intr_type == DDI_INTR_TYPE_MSI) &&
3671	    (nvc->nvc_intr_cap & DDI_INTR_FLAG_BLOCK)) {
3672		(void) ddi_intr_block_disable(nvc->nvc_htable,
3673		    nvc->nvc_intr_cnt);
3674	} else {
3675		for (x = 0; x < nvc->nvc_intr_cnt; x++) {
3676			(void) ddi_intr_disable(nvc->nvc_htable[x]);
3677		}
3678	}
3679
3680	for (x = 0; x < nvc->nvc_intr_cnt; x++) {
3681		(void) ddi_intr_remove_handler(nvc->nvc_htable[x]);
3682		(void) ddi_intr_free(nvc->nvc_htable[x]);
3683	}
3684
3685	kmem_free(nvc->nvc_htable, nvc->nvc_intr_size);
3686}
3687
3688
3689/*
3690 * variable argument wrapper for cmn_err.  prefixes the instance and port
3691 * number if possible
3692 */
3693static void
3694nv_vcmn_err(int ce, nv_ctl_t *nvc, nv_port_t *nvp, char *fmt, va_list ap)
3695{
3696	char port[NV_STRING_10];
3697	char inst[NV_STRING_10];
3698
3699	mutex_enter(&nv_log_mutex);
3700
3701	if (nvc) {
3702		(void) snprintf(inst, NV_STRING_10, "inst %d",
3703		    ddi_get_instance(nvc->nvc_dip));
3704	} else {
3705		inst[0] = '\0';
3706	}
3707
3708	if (nvp) {
3709		(void) sprintf(port, " port %d", nvp->nvp_port_num);
3710	} else {
3711		port[0] = '\0';
3712	}
3713
3714	(void) sprintf(nv_log_buf, "nv_sata %s%s%s", inst, port,
3715	    (inst[0]|port[0] ? ": " :""));
3716
3717	(void) vsnprintf(&nv_log_buf[strlen(nv_log_buf)],
3718	    NV_STRING_512 - strlen(nv_log_buf), fmt, ap);
3719
3720	/*
3721	 * normally set to log to console but in some debug situations it
3722	 * may be useful to log only to a file.
3723	 */
3724	if (nv_log_to_console) {
3725		if (nv_prom_print) {
3726			prom_printf("%s\n", nv_log_buf);
3727		} else {
3728			cmn_err(ce, "%s", nv_log_buf);
3729		}
3730
3731
3732	} else {
3733		cmn_err(ce, "!%s", nv_log_buf);
3734	}
3735
3736	mutex_exit(&nv_log_mutex);
3737}
3738
3739
3740/*
3741 * wrapper for cmn_err
3742 */
3743static void
3744nv_cmn_err(int ce, nv_ctl_t *nvc, nv_port_t *nvp, char *fmt, ...)
3745{
3746	va_list ap;
3747
3748	va_start(ap, fmt);
3749	nv_vcmn_err(ce, nvc, nvp, fmt, ap);
3750	va_end(ap);
3751}
3752
3753
3754#if defined(DEBUG)
3755/*
3756 * prefixes the instance and port number if possible to the debug message
3757 */
3758static void
3759nv_log(uint_t flag, nv_ctl_t *nvc, nv_port_t *nvp, char *fmt, ...)
3760{
3761	va_list ap;
3762
3763	if ((nv_debug_flags & flag) == 0) {
3764		return;
3765	}
3766
3767	va_start(ap, fmt);
3768	nv_vcmn_err(CE_NOTE, nvc, nvp, fmt, ap);
3769	va_end(ap);
3770
3771	/*
3772	 * useful for some debugging situations
3773	 */
3774	if (nv_log_delay) {
3775		drv_usecwait(nv_log_delay);
3776	}
3777
3778}
3779#endif /* DEBUG */
3780
3781
3782/*
3783 * program registers which are common to all commands
3784 */
3785static void
3786nv_program_taskfile_regs(nv_port_t *nvp, int slot)
3787{
3788	nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
3789	sata_pkt_t *spkt;
3790	sata_cmd_t *satacmd;
3791	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
3792	uint8_t cmd, ncq = B_FALSE;
3793
3794	spkt = nv_slotp->nvslot_spkt;
3795	satacmd = &spkt->satapkt_cmd;
3796	cmd = satacmd->satacmd_cmd_reg;
3797
3798	ASSERT(nvp->nvp_slot);
3799
3800	if ((cmd == SATAC_WRITE_FPDMA_QUEUED) ||
3801	    (cmd == SATAC_READ_FPDMA_QUEUED)) {
3802		ncq = B_TRUE;
3803	}
3804
3805	/*
3806	 * select the drive
3807	 */
3808	nv_put8(cmdhdl, nvp->nvp_drvhd, satacmd->satacmd_device_reg);
3809
3810	/*
3811	 * make certain the drive selected
3812	 */
3813	if (nv_wait(nvp, SATA_STATUS_DRDY, SATA_STATUS_BSY,
3814	    NV_SEC2USEC(5), 0) == B_FALSE) {
3815
3816		return;
3817	}
3818
3819	switch (spkt->satapkt_cmd.satacmd_addr_type) {
3820
3821	case ATA_ADDR_LBA:
3822		NVLOG((NVDBG_DELIVER, nvp->nvp_ctlp, nvp, "ATA_ADDR_LBA mode"));
3823
3824		nv_put8(cmdhdl, nvp->nvp_count, satacmd->satacmd_sec_count_lsb);
3825		nv_put8(cmdhdl, nvp->nvp_hcyl, satacmd->satacmd_lba_high_lsb);
3826		nv_put8(cmdhdl, nvp->nvp_lcyl, satacmd->satacmd_lba_mid_lsb);
3827		nv_put8(cmdhdl, nvp->nvp_sect, satacmd->satacmd_lba_low_lsb);
3828
3829		break;
3830
3831	case ATA_ADDR_LBA28:
3832		NVLOG((NVDBG_DELIVER, nvp->nvp_ctlp, nvp,
3833		    "ATA_ADDR_LBA28 mode"));
3834		/*
3835		 * NCQ only uses 48-bit addressing
3836		 */
3837		ASSERT(ncq != B_TRUE);
3838
3839		nv_put8(cmdhdl, nvp->nvp_count, satacmd->satacmd_sec_count_lsb);
3840		nv_put8(cmdhdl, nvp->nvp_hcyl, satacmd->satacmd_lba_high_lsb);
3841		nv_put8(cmdhdl, nvp->nvp_lcyl, satacmd->satacmd_lba_mid_lsb);
3842		nv_put8(cmdhdl, nvp->nvp_sect, satacmd->satacmd_lba_low_lsb);
3843
3844		break;
3845
3846	case ATA_ADDR_LBA48:
3847		NVLOG((NVDBG_DELIVER, nvp->nvp_ctlp, nvp,
3848		    "ATA_ADDR_LBA48 mode"));
3849
3850		/*
3851		 * for NCQ, tag goes into count register and real sector count
3852		 * into features register.  The sata module does the translation
3853		 * in the satacmd.
3854		 */
3855		if (ncq == B_TRUE) {
3856			nv_put8(cmdhdl, nvp->nvp_count, slot << 3);
3857			nv_put8(cmdhdl, nvp->nvp_feature,
3858			    satacmd->satacmd_features_reg_ext);
3859			nv_put8(cmdhdl, nvp->nvp_feature,
3860			    satacmd->satacmd_features_reg);
3861		} else {
3862			nv_put8(cmdhdl, nvp->nvp_count,
3863			    satacmd->satacmd_sec_count_msb);
3864			nv_put8(cmdhdl, nvp->nvp_count,
3865			    satacmd->satacmd_sec_count_lsb);
3866		}
3867
3868		/*
3869		 * send the high-order half first
3870		 */
3871		nv_put8(cmdhdl, nvp->nvp_hcyl, satacmd->satacmd_lba_high_msb);
3872		nv_put8(cmdhdl, nvp->nvp_lcyl, satacmd->satacmd_lba_mid_msb);
3873		nv_put8(cmdhdl, nvp->nvp_sect, satacmd->satacmd_lba_low_msb);
3874		/*
3875		 * Send the low-order half
3876		 */
3877		nv_put8(cmdhdl, nvp->nvp_hcyl, satacmd->satacmd_lba_high_lsb);
3878		nv_put8(cmdhdl, nvp->nvp_lcyl, satacmd->satacmd_lba_mid_lsb);
3879		nv_put8(cmdhdl, nvp->nvp_sect, satacmd->satacmd_lba_low_lsb);
3880
3881		break;
3882
3883	case 0:
3884		/*
3885		 * non-media access commands such as identify and features
3886		 * take this path.
3887		 */
3888		nv_put8(cmdhdl, nvp->nvp_count, satacmd->satacmd_sec_count_lsb);
3889		nv_put8(cmdhdl, nvp->nvp_feature,
3890		    satacmd->satacmd_features_reg);
3891		nv_put8(cmdhdl, nvp->nvp_hcyl, satacmd->satacmd_lba_high_lsb);
3892		nv_put8(cmdhdl, nvp->nvp_lcyl, satacmd->satacmd_lba_mid_lsb);
3893		nv_put8(cmdhdl, nvp->nvp_sect, satacmd->satacmd_lba_low_lsb);
3894
3895		break;
3896
3897	default:
3898		break;
3899	}
3900
3901	ASSERT(nvp->nvp_slot);
3902}
3903
3904
3905/*
3906 * start a command that involves no media access
3907 */
3908static int
3909nv_start_nodata(nv_port_t *nvp, int slot)
3910{
3911	nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
3912	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
3913	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
3914	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
3915
3916	nv_program_taskfile_regs(nvp, slot);
3917
3918	/*
3919	 * This next one sets the controller in motion
3920	 */
3921	nv_put8(cmdhdl, nvp->nvp_cmd, sata_cmdp->satacmd_cmd_reg);
3922
3923	return (SATA_TRAN_ACCEPTED);
3924}
3925
3926
3927int
3928nv_bm_status_clear(nv_port_t *nvp)
3929{
3930	ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl;
3931	uchar_t	status, ret;
3932
3933	/*
3934	 * Get the current BM status
3935	 */
3936	ret = status = nv_get8(bmhdl, nvp->nvp_bmisx);
3937
3938	status = (status & BMISX_MASK) | BMISX_IDERR | BMISX_IDEINTS;
3939
3940	/*
3941	 * Clear the latches (and preserve the other bits)
3942	 */
3943	nv_put8(bmhdl, nvp->nvp_bmisx, status);
3944
3945	return (ret);
3946}
3947
3948
3949/*
3950 * program the bus master DMA engine with the PRD address for
3951 * the active slot command, and start the DMA engine.
3952 */
3953static void
3954nv_start_dma_engine(nv_port_t *nvp, int slot)
3955{
3956	nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
3957	ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl;
3958	uchar_t direction;
3959
3960	ASSERT(nv_slotp->nvslot_spkt != NULL);
3961
3962	if (nv_slotp->nvslot_spkt->satapkt_cmd.satacmd_flags.sata_data_direction
3963	    == SATA_DIR_READ) {
3964		direction = BMICX_RWCON_WRITE_TO_MEMORY;
3965	} else {
3966		direction = BMICX_RWCON_READ_FROM_MEMORY;
3967	}
3968
3969	NVLOG((NVDBG_DELIVER, nvp->nvp_ctlp, nvp,
3970	    "nv_start_dma_engine entered"));
3971
3972	/*
3973	 * reset the controller's interrupt and error status bits
3974	 */
3975	(void) nv_bm_status_clear(nvp);
3976
3977	/*
3978	 * program the PRD table physical start address
3979	 */
3980	nv_put32(bmhdl, nvp->nvp_bmidtpx, nvp->nvp_sg_paddr[slot]);
3981
3982	/*
3983	 * set the direction control and start the DMA controller
3984	 */
3985	nv_put8(bmhdl, nvp->nvp_bmicx, direction | BMICX_SSBM);
3986}
3987
3988/*
3989 * start dma command, either in or out
3990 */
3991static int
3992nv_start_dma(nv_port_t *nvp, int slot)
3993{
3994	nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
3995	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
3996	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
3997	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
3998	uint8_t cmd = sata_cmdp->satacmd_cmd_reg;
3999#ifdef NCQ
4000	uint8_t ncq = B_FALSE;
4001#endif
4002	ddi_acc_handle_t sghdl = nvp->nvp_sg_acc_hdl[slot];
4003	uint_t *dstp = (uint_t *)nvp->nvp_sg_addr[slot];
4004	int sg_count = sata_cmdp->satacmd_num_dma_cookies, idx;
4005	ddi_dma_cookie_t  *srcp = sata_cmdp->satacmd_dma_cookie_list;
4006
4007	ASSERT(sg_count != 0);
4008
4009	if (sata_cmdp->satacmd_num_dma_cookies > NV_DMA_NSEGS) {
4010		nv_cmn_err(CE_WARN, nvp->nvp_ctlp, nvp, "NV_DMA_NSEGS=%d <"
4011		    " satacmd_num_dma_cookies=%d", NV_DMA_NSEGS,
4012		    sata_cmdp->satacmd_num_dma_cookies);
4013
4014		return (NV_FAILURE);
4015	}
4016
4017	nv_program_taskfile_regs(nvp, slot);
4018
4019	/*
4020	 * start the drive in motion
4021	 */
4022	nv_put8(cmdhdl, nvp->nvp_cmd, cmd);
4023
4024	/*
4025	 * the drive starts processing the transaction when the cmd register
4026	 * is written.  This is done here before programming the DMA engine to
4027	 * parallelize and save some time.  In the event that the drive is ready
4028	 * before DMA, it will wait.
4029	 */
4030#ifdef NCQ
4031	if ((cmd == SATAC_WRITE_FPDMA_QUEUED) ||
4032	    (cmd == SATAC_READ_FPDMA_QUEUED)) {
4033		ncq = B_TRUE;
4034	}
4035#endif
4036
4037	/*
4038	 * copy the PRD list to PRD table in DMA accessible memory
4039	 * so that the controller can access it.
4040	 */
4041	for (idx = 0; idx < sg_count; idx++, srcp++) {
4042		uint32_t size;
4043
4044		nv_put32(sghdl, dstp++, srcp->dmac_address);
4045
4046		/* Set the number of bytes to transfer, 0 implies 64KB */
4047		size = srcp->dmac_size;
4048		if (size == 0x10000)
4049			size = 0;
4050
4051		/*
4052		 * If this is a 40-bit address, copy bits 32-40 of the
4053		 * physical address to bits 16-24 of the PRD count.
4054		 */
4055		if (srcp->dmac_laddress > UINT32_MAX) {
4056			size |= ((srcp->dmac_laddress & 0xff00000000) >> 16);
4057		}
4058
4059		/*
4060		 * set the end of table flag for the last entry
4061		 */
4062		if (idx == (sg_count - 1)) {
4063			size |= PRDE_EOT;
4064		}
4065
4066		nv_put32(sghdl, dstp++, size);
4067	}
4068
4069	(void) ddi_dma_sync(nvp->nvp_sg_dma_hdl[slot], 0,
4070	    sizeof (prde_t) * NV_DMA_NSEGS, DDI_DMA_SYNC_FORDEV);
4071
4072	nv_start_dma_engine(nvp, slot);
4073
4074#ifdef NCQ
4075	/*
4076	 * optimization:  for SWNCQ, start DMA engine if this is the only
4077	 * command running.  Preliminary NCQ efforts indicated this needs
4078	 * more debugging.
4079	 *
4080	 * if (nvp->nvp_ncq_run <= 1)
4081	 */
4082
4083	if (ncq == B_FALSE) {
4084		NVLOG((NVDBG_DELIVER, nvp->nvp_ctlp, nvp,
4085		    "NOT NCQ so starting DMA NOW non_ncq_commands=%d"
4086		    " cmd = %X", non_ncq_commands++, cmd));
4087		nv_start_dma_engine(nvp, slot);
4088	} else {
4089		NVLOG((NVDBG_DELIVER, nvp->nvp_ctlp, nvp, "?NCQ, so program "
4090		    "DMA later ncq_commands=%d cmd = %X", ncq_commands++, cmd));
4091	}
4092#endif /* NCQ */
4093
4094	return (SATA_TRAN_ACCEPTED);
4095}
4096
4097
4098/*
4099 * start a PIO data-in ATA command
4100 */
4101static int
4102nv_start_pio_in(nv_port_t *nvp, int slot)
4103{
4104
4105	nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
4106	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4107	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4108
4109	nv_program_taskfile_regs(nvp, slot);
4110
4111	/*
4112	 * This next one sets the drive in motion
4113	 */
4114	nv_put8(cmdhdl, nvp->nvp_cmd, spkt->satapkt_cmd.satacmd_cmd_reg);
4115
4116	return (SATA_TRAN_ACCEPTED);
4117}
4118
4119
4120/*
4121 * start a PIO data-out ATA command
4122 */
4123static int
4124nv_start_pio_out(nv_port_t *nvp, int slot)
4125{
4126	nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
4127	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4128	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4129
4130	nv_program_taskfile_regs(nvp, slot);
4131
4132	/*
4133	 * this next one sets the drive in motion
4134	 */
4135	nv_put8(cmdhdl, nvp->nvp_cmd, spkt->satapkt_cmd.satacmd_cmd_reg);
4136
4137	/*
4138	 * wait for the busy bit to settle
4139	 */
4140	NV_DELAY_NSEC(400);
4141
4142	/*
4143	 * wait for the drive to assert DRQ to send the first chunk
4144	 * of data. Have to busy wait because there's no interrupt for
4145	 * the first chunk. This is bad... uses a lot of cycles if the
4146	 * drive responds too slowly or if the wait loop granularity
4147	 * is too large. It's even worse if the drive is defective and
4148	 * the loop times out.
4149	 */
4150	if (nv_wait3(nvp, SATA_STATUS_DRQ, SATA_STATUS_BSY, /* okay */
4151	    SATA_STATUS_ERR, SATA_STATUS_BSY, /* cmd failed */
4152	    SATA_STATUS_DF, SATA_STATUS_BSY, /* drive failed */
4153	    4000000, 0) == B_FALSE) {
4154		spkt->satapkt_reason = SATA_PKT_TIMEOUT;
4155
4156		goto error;
4157	}
4158
4159	/*
4160	 * send the first block.
4161	 */
4162	nv_intr_pio_out(nvp, nv_slotp);
4163
4164	/*
4165	 * If nvslot_flags is not set to COMPLETE yet, then processing
4166	 * is OK so far, so return.  Otherwise, fall into error handling
4167	 * below.
4168	 */
4169	if (nv_slotp->nvslot_flags != NVSLOT_COMPLETE) {
4170
4171		return (SATA_TRAN_ACCEPTED);
4172	}
4173
4174	error:
4175	/*
4176	 * there was an error so reset the device and complete the packet.
4177	 */
4178	nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
4179	nv_complete_io(nvp, spkt, 0);
4180	nv_reset(nvp);
4181
4182	return (SATA_TRAN_PORT_ERROR);
4183}
4184
4185
4186/*
4187 * start a ATAPI Packet command (PIO data in or out)
4188 */
4189static int
4190nv_start_pkt_pio(nv_port_t *nvp, int slot)
4191{
4192	nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
4193	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4194	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4195	sata_cmd_t *satacmd = &spkt->satapkt_cmd;
4196
4197	NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4198	    "nv_start_pkt_pio: start"));
4199
4200	/*
4201	 * Write the PACKET command to the command register.  Normally
4202	 * this would be done through nv_program_taskfile_regs().  It
4203	 * is done here because some values need to be overridden.
4204	 */
4205
4206	/* select the drive */
4207	nv_put8(cmdhdl, nvp->nvp_drvhd, satacmd->satacmd_device_reg);
4208
4209	/* make certain the drive selected */
4210	if (nv_wait(nvp, SATA_STATUS_DRDY, SATA_STATUS_BSY,
4211	    NV_SEC2USEC(5), 0) == B_FALSE) {
4212		NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4213		    "nv_start_pkt_pio: drive select failed"));
4214		return (SATA_TRAN_PORT_ERROR);
4215	}
4216
4217	/*
4218	 * The command is always sent via PIO, despite whatever the SATA
4219	 * framework sets in the command.  Overwrite the DMA bit to do this.
4220	 * Also, overwrite the overlay bit to be safe (it shouldn't be set).
4221	 */
4222	nv_put8(cmdhdl, nvp->nvp_feature, 0);	/* deassert DMA and OVL */
4223
4224	/* set appropriately by the sata framework */
4225	nv_put8(cmdhdl, nvp->nvp_hcyl, satacmd->satacmd_lba_high_lsb);
4226	nv_put8(cmdhdl, nvp->nvp_lcyl, satacmd->satacmd_lba_mid_lsb);
4227	nv_put8(cmdhdl, nvp->nvp_sect, satacmd->satacmd_lba_low_lsb);
4228	nv_put8(cmdhdl, nvp->nvp_count, satacmd->satacmd_sec_count_lsb);
4229
4230	/* initiate the command by writing the command register last */
4231	nv_put8(cmdhdl, nvp->nvp_cmd, spkt->satapkt_cmd.satacmd_cmd_reg);
4232
4233	/* Give the host controller time to do its thing */
4234	NV_DELAY_NSEC(400);
4235
4236	/*
4237	 * Wait for the device to indicate that it is ready for the command
4238	 * ATAPI protocol state - HP0: Check_Status_A
4239	 */
4240
4241	if (nv_wait3(nvp, SATA_STATUS_DRQ, SATA_STATUS_BSY, /* okay */
4242	    SATA_STATUS_ERR, SATA_STATUS_BSY, /* cmd failed */
4243	    SATA_STATUS_DF, SATA_STATUS_BSY, /* drive failed */
4244	    4000000, 0) == B_FALSE) {
4245		/*
4246		 * Either an error or device fault occurred or the wait
4247		 * timed out.  According to the ATAPI protocol, command
4248		 * completion is also possible.  Other implementations of
4249		 * this protocol don't handle this last case, so neither
4250		 * does this code.
4251		 */
4252
4253		if (nv_get8(cmdhdl, nvp->nvp_status) &
4254		    (SATA_STATUS_ERR | SATA_STATUS_DF)) {
4255			spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4256
4257			NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4258			    "nv_start_pkt_pio: device error (HP0)"));
4259		} else {
4260			spkt->satapkt_reason = SATA_PKT_TIMEOUT;
4261
4262			NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4263			    "nv_start_pkt_pio: timeout (HP0)"));
4264		}
4265
4266		nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
4267		nv_complete_io(nvp, spkt, 0);
4268		nv_reset(nvp);
4269
4270		return (SATA_TRAN_PORT_ERROR);
4271	}
4272
4273	/*
4274	 * Put the ATAPI command in the data register
4275	 * ATAPI protocol state - HP1: Send_Packet
4276	 */
4277
4278	ddi_rep_put16(cmdhdl, (ushort_t *)spkt->satapkt_cmd.satacmd_acdb,
4279	    (ushort_t *)nvp->nvp_data,
4280	    (spkt->satapkt_cmd.satacmd_acdb_len >> 1), DDI_DEV_NO_AUTOINCR);
4281
4282	/*
4283	 * See you in nv_intr_pkt_pio.
4284	 * ATAPI protocol state - HP3: INTRQ_wait
4285	 */
4286
4287	NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4288	    "nv_start_pkt_pio: exiting into HP3"));
4289
4290	return (SATA_TRAN_ACCEPTED);
4291}
4292
4293
4294/*
4295 * Interrupt processing for a non-data ATA command.
4296 */
4297static void
4298nv_intr_nodata(nv_port_t *nvp, nv_slot_t *nv_slotp)
4299{
4300	uchar_t status;
4301	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4302	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
4303	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
4304	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4305
4306	NVLOG((NVDBG_INTR, nvp->nvp_ctlp, nvp, "nv_intr_nodata entered"));
4307
4308	status = nv_get8(cmdhdl, nvp->nvp_status);
4309
4310	/*
4311	 * check for errors
4312	 */
4313	if (status & (SATA_STATUS_DF | SATA_STATUS_ERR)) {
4314		spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4315		sata_cmdp->satacmd_status_reg = nv_get8(ctlhdl,
4316		    nvp->nvp_altstatus);
4317		sata_cmdp->satacmd_error_reg = nv_get8(cmdhdl, nvp->nvp_error);
4318	} else {
4319		spkt->satapkt_reason = SATA_PKT_COMPLETED;
4320	}
4321
4322	nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4323}
4324
4325
4326/*
4327 * ATA command, PIO data in
4328 */
4329static void
4330nv_intr_pio_in(nv_port_t *nvp, nv_slot_t *nv_slotp)
4331{
4332	uchar_t	status;
4333	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4334	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
4335	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
4336	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4337	int count;
4338
4339	status = nv_get8(cmdhdl, nvp->nvp_status);
4340
4341	if (status & SATA_STATUS_BSY) {
4342		spkt->satapkt_reason = SATA_PKT_TIMEOUT;
4343		nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4344		sata_cmdp->satacmd_status_reg = nv_get8(ctlhdl,
4345		    nvp->nvp_altstatus);
4346		sata_cmdp->satacmd_error_reg = nv_get8(cmdhdl, nvp->nvp_error);
4347		nv_reset(nvp);
4348
4349		return;
4350	}
4351
4352	/*
4353	 * check for errors
4354	 */
4355	if ((status & (SATA_STATUS_DRQ | SATA_STATUS_DF |
4356	    SATA_STATUS_ERR)) != SATA_STATUS_DRQ) {
4357		nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
4358		nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4359		spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4360
4361		return;
4362	}
4363
4364	/*
4365	 * read the next chunk of data (if any)
4366	 */
4367	count = min(nv_slotp->nvslot_byte_count, NV_BYTES_PER_SEC);
4368
4369	/*
4370	 * read count bytes
4371	 */
4372	ASSERT(count != 0);
4373
4374	ddi_rep_get16(cmdhdl, (ushort_t *)nv_slotp->nvslot_v_addr,
4375	    (ushort_t *)nvp->nvp_data, (count >> 1), DDI_DEV_NO_AUTOINCR);
4376
4377	nv_slotp->nvslot_v_addr += count;
4378	nv_slotp->nvslot_byte_count -= count;
4379
4380
4381	if (nv_slotp->nvslot_byte_count != 0) {
4382		/*
4383		 * more to transfer.  Wait for next interrupt.
4384		 */
4385		return;
4386	}
4387
4388	/*
4389	 * transfer is complete. wait for the busy bit to settle.
4390	 */
4391	NV_DELAY_NSEC(400);
4392
4393	spkt->satapkt_reason = SATA_PKT_COMPLETED;
4394	nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4395}
4396
4397
4398/*
4399 * ATA command PIO data out
4400 */
4401static void
4402nv_intr_pio_out(nv_port_t *nvp, nv_slot_t *nv_slotp)
4403{
4404	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4405	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
4406	uchar_t status;
4407	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
4408	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4409	int count;
4410
4411	/*
4412	 * clear the IRQ
4413	 */
4414	status = nv_get8(cmdhdl, nvp->nvp_status);
4415
4416	if (status & SATA_STATUS_BSY) {
4417		/*
4418		 * this should not happen
4419		 */
4420		spkt->satapkt_reason = SATA_PKT_TIMEOUT;
4421		nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4422		sata_cmdp->satacmd_status_reg = nv_get8(ctlhdl,
4423		    nvp->nvp_altstatus);
4424		sata_cmdp->satacmd_error_reg = nv_get8(cmdhdl, nvp->nvp_error);
4425
4426		return;
4427	}
4428
4429	/*
4430	 * check for errors
4431	 */
4432	if (status & (SATA_STATUS_DF | SATA_STATUS_ERR)) {
4433		nv_copy_registers(nvp,  &spkt->satapkt_device, spkt);
4434		nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4435		spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4436
4437		return;
4438	}
4439
4440	/*
4441	 * this is the condition which signals the drive is
4442	 * no longer ready to transfer.  Likely that the transfer
4443	 * completed successfully, but check that byte_count is
4444	 * zero.
4445	 */
4446	if ((status & SATA_STATUS_DRQ) == 0) {
4447
4448		if (nv_slotp->nvslot_byte_count == 0) {
4449			/*
4450			 * complete; successful transfer
4451			 */
4452			spkt->satapkt_reason = SATA_PKT_COMPLETED;
4453		} else {
4454			/*
4455			 * error condition, incomplete transfer
4456			 */
4457			nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
4458			spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4459		}
4460		nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4461
4462		return;
4463	}
4464
4465	/*
4466	 * write the next chunk of data
4467	 */
4468	count = min(nv_slotp->nvslot_byte_count, NV_BYTES_PER_SEC);
4469
4470	/*
4471	 * read or write count bytes
4472	 */
4473
4474	ASSERT(count != 0);
4475
4476	ddi_rep_put16(cmdhdl, (ushort_t *)nv_slotp->nvslot_v_addr,
4477	    (ushort_t *)nvp->nvp_data, (count >> 1), DDI_DEV_NO_AUTOINCR);
4478
4479	nv_slotp->nvslot_v_addr += count;
4480	nv_slotp->nvslot_byte_count -= count;
4481}
4482
4483
4484/*
4485 * ATAPI PACKET command, PIO in/out interrupt
4486 *
4487 * Under normal circumstances, one of four different interrupt scenarios
4488 * will result in this function being called:
4489 *
4490 * 1. Packet command data transfer
4491 * 2. Packet command completion
4492 * 3. Request sense data transfer
4493 * 4. Request sense command completion
4494 */
4495static void
4496nv_intr_pkt_pio(nv_port_t *nvp, nv_slot_t *nv_slotp)
4497{
4498	uchar_t	status;
4499	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4500	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
4501	int direction = sata_cmdp->satacmd_flags.sata_data_direction;
4502	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
4503	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4504	uint16_t ctlr_count;
4505	int count;
4506
4507	/* ATAPI protocol state - HP2: Check_Status_B */
4508
4509	status = nv_get8(cmdhdl, nvp->nvp_status);
4510	NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4511	    "nv_intr_pkt_pio: status 0x%x", status));
4512
4513	if (status & SATA_STATUS_BSY) {
4514		if ((nv_slotp->nvslot_flags & NVSLOT_RQSENSE) != 0) {
4515			nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4516			spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4517		} else {
4518			nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4519			spkt->satapkt_reason = SATA_PKT_TIMEOUT;
4520
4521			nv_reset(nvp);
4522		}
4523
4524		NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4525		    "nv_intr_pkt_pio: busy - status 0x%x", status));
4526
4527		return;
4528	}
4529
4530	if ((status & SATA_STATUS_DF) != 0) {
4531		/*
4532		 * On device fault, just clean up and bail.  Request sense
4533		 * will just default to its NO SENSE initialized value.
4534		 */
4535
4536		if ((nv_slotp->nvslot_flags & NVSLOT_RQSENSE) == 0) {
4537			nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
4538		}
4539
4540		nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4541		spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4542
4543		sata_cmdp->satacmd_status_reg = nv_get8(ctlhdl,
4544		    nvp->nvp_altstatus);
4545		sata_cmdp->satacmd_error_reg = nv_get8(cmdhdl,
4546		    nvp->nvp_error);
4547
4548		NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4549		    "nv_intr_pkt_pio: device fault"));
4550
4551		return;
4552	}
4553
4554	if ((status & SATA_STATUS_ERR) != 0) {
4555		/*
4556		 * On command error, figure out whether we are processing a
4557		 * request sense.  If so, clean up and bail.  Otherwise,
4558		 * do a REQUEST SENSE.
4559		 */
4560
4561		if ((nv_slotp->nvslot_flags & NVSLOT_RQSENSE) == 0) {
4562			nv_slotp->nvslot_flags |= NVSLOT_RQSENSE;
4563			if (nv_start_rqsense_pio(nvp, nv_slotp) ==
4564			    NV_FAILURE) {
4565				nv_copy_registers(nvp, &spkt->satapkt_device,
4566				    spkt);
4567				nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4568				spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4569			}
4570
4571			sata_cmdp->satacmd_status_reg = nv_get8(ctlhdl,
4572			    nvp->nvp_altstatus);
4573			sata_cmdp->satacmd_error_reg = nv_get8(cmdhdl,
4574			    nvp->nvp_error);
4575		} else {
4576			nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4577			spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4578
4579			nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
4580		}
4581
4582		NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4583		    "nv_intr_pkt_pio: error (status 0x%x)", status));
4584
4585		return;
4586	}
4587
4588	if ((nv_slotp->nvslot_flags & NVSLOT_RQSENSE) != 0) {
4589		/*
4590		 * REQUEST SENSE command processing
4591		 */
4592
4593		if ((status & (SATA_STATUS_DRQ)) != 0) {
4594			/* ATAPI state - HP4: Transfer_Data */
4595
4596			/* read the byte count from the controller */
4597			ctlr_count =
4598			    (uint16_t)nv_get8(cmdhdl, nvp->nvp_hcyl) << 8;
4599			ctlr_count |= nv_get8(cmdhdl, nvp->nvp_lcyl);
4600
4601			NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4602			    "nv_intr_pkt_pio: ctlr byte count - %d",
4603			    ctlr_count));
4604
4605			if (ctlr_count == 0) {
4606				/* no data to transfer - some devices do this */
4607
4608				spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4609				nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4610
4611				NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4612				    "nv_intr_pkt_pio: done (no data)"));
4613
4614				return;
4615			}
4616
4617			count = min(ctlr_count, SATA_ATAPI_RQSENSE_LEN);
4618
4619			/* transfer the data */
4620			ddi_rep_get16(cmdhdl,
4621			    (ushort_t *)nv_slotp->nvslot_rqsense_buff,
4622			    (ushort_t *)nvp->nvp_data, (count >> 1),
4623			    DDI_DEV_NO_AUTOINCR);
4624
4625			/* consume residual bytes */
4626			ctlr_count -= count;
4627
4628			if (ctlr_count > 0) {
4629				for (; ctlr_count > 0; ctlr_count -= 2)
4630					(void) ddi_get16(cmdhdl,
4631					    (ushort_t *)nvp->nvp_data);
4632			}
4633
4634			NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4635			    "nv_intr_pkt_pio: transition to HP2"));
4636		} else {
4637			/* still in ATAPI state - HP2 */
4638
4639			/*
4640			 * In order to avoid clobbering the rqsense data
4641			 * set by the SATA framework, the sense data read
4642			 * from the device is put in a separate buffer and
4643			 * copied into the packet after the request sense
4644			 * command successfully completes.
4645			 */
4646			bcopy(nv_slotp->nvslot_rqsense_buff,
4647			    spkt->satapkt_cmd.satacmd_rqsense,
4648			    SATA_ATAPI_RQSENSE_LEN);
4649
4650			nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4651			spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4652
4653			NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4654			    "nv_intr_pkt_pio: request sense done"));
4655		}
4656
4657		return;
4658	}
4659
4660	/*
4661	 * Normal command processing
4662	 */
4663
4664	if ((status & (SATA_STATUS_DRQ)) != 0) {
4665		/* ATAPI protocol state - HP4: Transfer_Data */
4666
4667		/* read the byte count from the controller */
4668		ctlr_count = (uint16_t)nv_get8(cmdhdl, nvp->nvp_hcyl) << 8;
4669		ctlr_count |= nv_get8(cmdhdl, nvp->nvp_lcyl);
4670
4671		if (ctlr_count == 0) {
4672			/* no data to transfer - some devices do this */
4673
4674			spkt->satapkt_reason = SATA_PKT_COMPLETED;
4675			nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4676
4677			NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4678			    "nv_intr_pkt_pio: done (no data)"));
4679
4680			return;
4681		}
4682
4683		count = min(ctlr_count, nv_slotp->nvslot_byte_count);
4684
4685		NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4686		    "nv_intr_pkt_pio: drive_bytes 0x%x", ctlr_count));
4687
4688		NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4689		    "nv_intr_pkt_pio: byte_count 0x%x",
4690		    nv_slotp->nvslot_byte_count));
4691
4692		/* transfer the data */
4693
4694		if (direction == SATA_DIR_READ) {
4695			ddi_rep_get16(cmdhdl,
4696			    (ushort_t *)nv_slotp->nvslot_v_addr,
4697			    (ushort_t *)nvp->nvp_data, (count >> 1),
4698			    DDI_DEV_NO_AUTOINCR);
4699
4700			ctlr_count -= count;
4701
4702			if (ctlr_count > 0) {
4703				/* consume remainding bytes */
4704
4705				for (; ctlr_count > 0;
4706				    ctlr_count -= 2)
4707					(void) ddi_get16(cmdhdl,
4708					    (ushort_t *)nvp->nvp_data);
4709
4710				NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4711				    "nv_intr_pkt_pio: bytes remained"));
4712			}
4713		} else {
4714			ddi_rep_put16(cmdhdl,
4715			    (ushort_t *)nv_slotp->nvslot_v_addr,
4716			    (ushort_t *)nvp->nvp_data, (count >> 1),
4717			    DDI_DEV_NO_AUTOINCR);
4718		}
4719
4720		nv_slotp->nvslot_v_addr += count;
4721		nv_slotp->nvslot_byte_count -= count;
4722
4723		NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4724		    "nv_intr_pkt_pio: transition to HP2"));
4725	} else {
4726		/* still in ATAPI state - HP2 */
4727
4728		spkt->satapkt_reason = SATA_PKT_COMPLETED;
4729		nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4730
4731		NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4732		    "nv_intr_pkt_pio: done"));
4733	}
4734}
4735
4736
4737/*
4738 * ATA command, DMA data in/out
4739 */
4740static void
4741nv_intr_dma(nv_port_t *nvp, struct nv_slot *nv_slotp)
4742{
4743	uchar_t status;
4744	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4745	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
4746	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
4747	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4748	ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl;
4749	uchar_t	bmicx;
4750	uchar_t bm_status;
4751
4752	nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4753
4754	/*
4755	 * stop DMA engine.
4756	 */
4757	bmicx = nv_get8(bmhdl, nvp->nvp_bmicx);
4758	nv_put8(bmhdl, nvp->nvp_bmicx,  bmicx & ~BMICX_SSBM);
4759
4760	/*
4761	 * get the status and clear the IRQ, and check for DMA error
4762	 */
4763	status = nv_get8(cmdhdl, nvp->nvp_status);
4764
4765	/*
4766	 * check for drive errors
4767	 */
4768	if (status & (SATA_STATUS_DF | SATA_STATUS_ERR)) {
4769		nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
4770		spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4771		(void) nv_bm_status_clear(nvp);
4772
4773		return;
4774	}
4775
4776	bm_status = nv_bm_status_clear(nvp);
4777
4778	/*
4779	 * check for bus master errors
4780	 */
4781	if (bm_status & BMISX_IDERR) {
4782		spkt->satapkt_reason = SATA_PKT_RESET;
4783		sata_cmdp->satacmd_status_reg = nv_get8(ctlhdl,
4784		    nvp->nvp_altstatus);
4785		sata_cmdp->satacmd_error_reg = nv_get8(cmdhdl, nvp->nvp_error);
4786		nv_reset(nvp);
4787
4788		return;
4789	}
4790
4791	spkt->satapkt_reason = SATA_PKT_COMPLETED;
4792}
4793
4794
4795/*
4796 * Wait for a register of a controller to achieve a specific state.
4797 * To return normally, all the bits in the first sub-mask must be ON,
4798 * all the bits in the second sub-mask must be OFF.
4799 * If timeout_usec microseconds pass without the controller achieving
4800 * the desired bit configuration, return TRUE, else FALSE.
4801 *
4802 * hybrid waiting algorithm: if not in interrupt context, busy looping will
4803 * occur for the first 250 us, then switch over to a sleeping wait.
4804 *
4805 */
4806int
4807nv_wait(nv_port_t *nvp, uchar_t onbits, uchar_t offbits, uint_t timeout_usec,
4808    int type_wait)
4809{
4810	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
4811	hrtime_t end, cur, start_sleep, start;
4812	int first_time = B_TRUE;
4813	ushort_t val;
4814
4815	for (;;) {
4816		val = nv_get8(ctlhdl, nvp->nvp_altstatus);
4817
4818		if ((val & onbits) == onbits && (val & offbits) == 0) {
4819
4820			return (B_TRUE);
4821		}
4822
4823		cur = gethrtime();
4824
4825		/*
4826		 * store the start time and calculate the end
4827		 * time.  also calculate "start_sleep" which is
4828		 * the point after which the driver will stop busy
4829		 * waiting and change to sleep waiting.
4830		 */
4831		if (first_time) {
4832			first_time = B_FALSE;
4833			/*
4834			 * start and end are in nanoseconds
4835			 */
4836			start = cur;
4837			end = start + timeout_usec * 1000;
4838			/*
4839			 * add 1 ms to start
4840			 */
4841			start_sleep =  start + 250000;
4842
4843			if (servicing_interrupt()) {
4844				type_wait = NV_NOSLEEP;
4845			}
4846		}
4847
4848		if (cur > end) {
4849
4850			break;
4851		}
4852
4853		if ((type_wait != NV_NOSLEEP) && (cur > start_sleep)) {
4854#if ! defined(__lock_lint)
4855			delay(1);
4856#endif
4857		} else {
4858			drv_usecwait(nv_usec_delay);
4859		}
4860	}
4861
4862	return (B_FALSE);
4863}
4864
4865
4866/*
4867 * This is a slightly more complicated version that checks
4868 * for error conditions and bails-out rather than looping
4869 * until the timeout is exceeded.
4870 *
4871 * hybrid waiting algorithm: if not in interrupt context, busy looping will
4872 * occur for the first 250 us, then switch over to a sleeping wait.
4873 */
4874int
4875nv_wait3(
4876	nv_port_t	*nvp,
4877	uchar_t		onbits1,
4878	uchar_t		offbits1,
4879	uchar_t		failure_onbits2,
4880	uchar_t		failure_offbits2,
4881	uchar_t		failure_onbits3,
4882	uchar_t		failure_offbits3,
4883	uint_t		timeout_usec,
4884	int		type_wait)
4885{
4886	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
4887	hrtime_t end, cur, start_sleep, start;
4888	int first_time = B_TRUE;
4889	ushort_t val;
4890
4891	for (;;) {
4892		val = nv_get8(ctlhdl, nvp->nvp_altstatus);
4893
4894		/*
4895		 * check for expected condition
4896		 */
4897		if ((val & onbits1) == onbits1 && (val & offbits1) == 0) {
4898
4899			return (B_TRUE);
4900		}
4901
4902		/*
4903		 * check for error conditions
4904		 */
4905		if ((val & failure_onbits2) == failure_onbits2 &&
4906		    (val & failure_offbits2) == 0) {
4907
4908			return (B_FALSE);
4909		}
4910
4911		if ((val & failure_onbits3) == failure_onbits3 &&
4912		    (val & failure_offbits3) == 0) {
4913
4914			return (B_FALSE);
4915		}
4916
4917		/*
4918		 * store the start time and calculate the end
4919		 * time.  also calculate "start_sleep" which is
4920		 * the point after which the driver will stop busy
4921		 * waiting and change to sleep waiting.
4922		 */
4923		if (first_time) {
4924			first_time = B_FALSE;
4925			/*
4926			 * start and end are in nanoseconds
4927			 */
4928			cur = start = gethrtime();
4929			end = start + timeout_usec * 1000;
4930			/*
4931			 * add 1 ms to start
4932			 */
4933			start_sleep =  start + 250000;
4934
4935			if (servicing_interrupt()) {
4936				type_wait = NV_NOSLEEP;
4937			}
4938		} else {
4939			cur = gethrtime();
4940		}
4941
4942		if (cur > end) {
4943
4944			break;
4945		}
4946
4947		if ((type_wait != NV_NOSLEEP) && (cur > start_sleep)) {
4948#if ! defined(__lock_lint)
4949			delay(1);
4950#endif
4951		} else {
4952			drv_usecwait(nv_usec_delay);
4953		}
4954	}
4955
4956	return (B_FALSE);
4957}
4958
4959
4960/*
4961 * nv_check_link() checks if a specified link is active device present
4962 * and communicating.
4963 */
4964static boolean_t
4965nv_check_link(uint32_t sstatus)
4966{
4967	uint8_t det;
4968
4969	det = (sstatus & SSTATUS_DET) >> SSTATUS_DET_SHIFT;
4970
4971	return (det == SSTATUS_DET_DEVPRE_PHYCOM);
4972}
4973
4974
4975/*
4976 * nv_port_state_change() reports the state of the port to the
4977 * sata module by calling sata_hba_event_notify().  This
4978 * function is called any time the state of the port is changed
4979 */
4980static void
4981nv_port_state_change(nv_port_t *nvp, int event, uint8_t addr_type, int state)
4982{
4983	sata_device_t sd;
4984
4985	bzero((void *)&sd, sizeof (sata_device_t));
4986	sd.satadev_rev = SATA_DEVICE_REV;
4987	nv_copy_registers(nvp, &sd, NULL);
4988
4989	/*
4990	 * When NCQ is implemented sactive and snotific field need to be
4991	 * updated.
4992	 */
4993	sd.satadev_addr.cport = nvp->nvp_port_num;
4994	sd.satadev_addr.qual = addr_type;
4995	sd.satadev_state = state;
4996
4997	sata_hba_event_notify(nvp->nvp_ctlp->nvc_dip, &sd, event);
4998}
4999
5000
5001/*
5002 * timeout processing:
5003 *
5004 * Check if any packets have crossed a timeout threshold.  If so, then
5005 * abort the packet.  This function is not NCQ aware.
5006 *
5007 * If reset was invoked in any other place than nv_sata_probe(), then
5008 * monitor for reset completion here.
5009 *
5010 */
5011static void
5012nv_timeout(void *arg)
5013{
5014	nv_port_t *nvp = arg;
5015	nv_slot_t *nv_slotp;
5016	int restart_timeout = B_FALSE;
5017
5018	mutex_enter(&nvp->nvp_mutex);
5019
5020	/*
5021	 * If the probe entry point is driving the reset and signature
5022	 * acquisition, just return.
5023	 */
5024	if (nvp->nvp_state & NV_PORT_RESET_PROBE) {
5025		goto finished;
5026	}
5027
5028	/*
5029	 * If the port is not in the init state, it likely
5030	 * means the link was lost while a timeout was active.
5031	 */
5032	if ((nvp->nvp_state & NV_PORT_INIT) == 0) {
5033		NVLOG((NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp,
5034		    "nv_timeout: port uninitialized"));
5035
5036		goto finished;
5037	}
5038
5039	if (nvp->nvp_state & NV_PORT_RESET) {
5040		ddi_acc_handle_t bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5];
5041		uint32_t sstatus;
5042
5043		NVLOG((NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp,
5044		    "nv_timeout(): port waiting for signature"));
5045
5046		sstatus = nv_get32(bar5_hdl, nvp->nvp_sstatus);
5047
5048		/*
5049		 * check for link presence.  If the link remains
5050		 * missing for more than 2 seconds, send a remove
5051		 * event and abort signature acquisition.
5052		 */
5053		if (nv_check_link(sstatus) == B_FALSE) {
5054			clock_t e_link_lost = ddi_get_lbolt();
5055
5056			if (nvp->nvp_link_lost_time == 0) {
5057				nvp->nvp_link_lost_time = e_link_lost;
5058			}
5059			if (TICK_TO_SEC(e_link_lost -
5060			    nvp->nvp_link_lost_time) < NV_LINK_LOST_OK) {
5061				NVLOG((NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp,
5062				    "probe: intermittent link lost while"
5063				    " resetting"));
5064				restart_timeout = B_TRUE;
5065			} else {
5066				NVLOG((NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp,
5067				    "link lost during signature acquisition."
5068				    "  Giving up"));
5069				nv_port_state_change(nvp,
5070				    SATA_EVNT_DEVICE_DETACHED|
5071				    SATA_EVNT_LINK_LOST,
5072				    SATA_ADDR_CPORT, 0);
5073				nvp->nvp_state |= NV_PORT_HOTREMOVED;
5074				nvp->nvp_state &= ~NV_PORT_RESET;
5075			}
5076
5077			goto finished;
5078		} else {
5079
5080			nvp->nvp_link_lost_time = 0;
5081		}
5082
5083		nv_read_signature(nvp);
5084
5085		if (nvp->nvp_signature != 0) {
5086			if ((nvp->nvp_type == SATA_DTYPE_ATADISK) ||
5087			    (nvp->nvp_type == SATA_DTYPE_ATAPICD)) {
5088				nvp->nvp_state |= NV_PORT_RESTORE;
5089				nv_port_state_change(nvp,
5090				    SATA_EVNT_DEVICE_RESET,
5091				    SATA_ADDR_DCPORT,
5092				    SATA_DSTATE_RESET|SATA_DSTATE_PWR_ACTIVE);
5093			}
5094
5095			goto finished;
5096		}
5097
5098		/*
5099		 * Reset if more than 5 seconds has passed without
5100		 * acquiring a signature.
5101		 */
5102		if (TICK_TO_SEC(ddi_get_lbolt() - nvp->nvp_reset_time) > 5) {
5103			nv_reset(nvp);
5104		}
5105
5106		restart_timeout = B_TRUE;
5107		goto finished;
5108	}
5109
5110
5111	/*
5112	 * not yet NCQ aware
5113	 */
5114	nv_slotp = &(nvp->nvp_slot[0]);
5115
5116	/*
5117	 * this happens early on before nv_slotp is set
5118	 * up OR when a device was unexpectedly removed and
5119	 * there was an active packet.
5120	 */
5121	if (nv_slotp == NULL) {
5122		NVLOG((NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp,
5123		    "nv_timeout: nv_slotp == NULL"));
5124
5125		goto finished;
5126	}
5127
5128	/*
5129	 * perform timeout checking and processing only if there is an
5130	 * active packet on the port
5131	 */
5132	if (nv_slotp->nvslot_spkt != NULL)  {
5133		sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
5134		sata_cmd_t *satacmd = &spkt->satapkt_cmd;
5135		uint8_t cmd = satacmd->satacmd_cmd_reg;
5136		uint64_t lba;
5137
5138#if ! defined(__lock_lint) && defined(DEBUG)
5139
5140		lba = (uint64_t)satacmd->satacmd_lba_low_lsb |
5141		    ((uint64_t)satacmd->satacmd_lba_mid_lsb << 8) |
5142		    ((uint64_t)satacmd->satacmd_lba_high_lsb << 16) |
5143		    ((uint64_t)satacmd->satacmd_lba_low_msb << 24) |
5144		    ((uint64_t)satacmd->satacmd_lba_mid_msb << 32) |
5145		    ((uint64_t)satacmd->satacmd_lba_high_msb << 40);
5146#endif
5147
5148		/*
5149		 * timeout not needed if there is a polling thread
5150		 */
5151		if (spkt->satapkt_op_mode & SATA_OPMODE_POLLING) {
5152
5153			goto finished;
5154		}
5155
5156		if (TICK_TO_SEC(ddi_get_lbolt() - nv_slotp->nvslot_stime) >
5157		    spkt->satapkt_time) {
5158			NVLOG((NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp,
5159			    "abort timeout: "
5160			    "nvslot_stime: %ld max ticks till timeout: "
5161			    "%ld cur_time: %ld cmd=%x lba=%d",
5162			    nv_slotp->nvslot_stime, drv_usectohz(MICROSEC *
5163			    spkt->satapkt_time), ddi_get_lbolt(), cmd, lba));
5164
5165			(void) nv_abort_active(nvp, spkt, SATA_PKT_TIMEOUT);
5166
5167		} else {
5168			NVLOG((NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp, "nv_timeout:"
5169			    " still in use so restarting timeout"));
5170		}
5171		restart_timeout = B_TRUE;
5172
5173	} else {
5174		/*
5175		 * there was no active packet, so do not re-enable timeout
5176		 */
5177		NVLOG((NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp,
5178		    "nv_timeout: no active packet so not re-arming timeout"));
5179	}
5180
5181	finished:
5182
5183	if (restart_timeout == B_TRUE) {
5184		nvp->nvp_timeout_id = timeout(nv_timeout, (void *)nvp,
5185		    drv_usectohz(NV_ONE_SEC));
5186	} else {
5187		nvp->nvp_timeout_id = 0;
5188	}
5189	mutex_exit(&nvp->nvp_mutex);
5190}
5191
5192
5193/*
5194 * enable or disable the 3 interrupt types the driver is
5195 * interested in: completion, add and remove.
5196 */
5197static void
5198ck804_set_intr(nv_port_t *nvp, int flag)
5199{
5200	nv_ctl_t *nvc = nvp->nvp_ctlp;
5201	ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5];
5202	uchar_t *bar5  = nvc->nvc_bar_addr[5];
5203	uint8_t intr_bits[] = { CK804_INT_PDEV_HOT|CK804_INT_PDEV_INT,
5204	    CK804_INT_SDEV_HOT|CK804_INT_SDEV_INT };
5205	uint8_t clear_all_bits[] = { CK804_INT_PDEV_ALL, CK804_INT_SDEV_ALL };
5206	uint8_t int_en, port = nvp->nvp_port_num, intr_status;
5207
5208	if (flag & NV_INTR_DISABLE_NON_BLOCKING) {
5209		int_en = nv_get8(bar5_hdl,
5210		    (uint8_t *)(bar5 + CK804_SATA_INT_EN));
5211		int_en &= ~intr_bits[port];
5212		nv_put8(bar5_hdl, (uint8_t *)(bar5 + CK804_SATA_INT_EN),
5213		    int_en);
5214		return;
5215	}
5216
5217	ASSERT(mutex_owned(&nvp->nvp_mutex));
5218
5219	/*
5220	 * controller level lock also required since access to an 8-bit
5221	 * interrupt register is shared between both channels.
5222	 */
5223	mutex_enter(&nvc->nvc_mutex);
5224
5225	if (flag & NV_INTR_CLEAR_ALL) {
5226		NVLOG((NVDBG_INTR, nvc, nvp,
5227		    "ck804_set_intr: NV_INTR_CLEAR_ALL"));
5228
5229		intr_status = nv_get8(nvc->nvc_bar_hdl[5],
5230		    (uint8_t *)(nvc->nvc_ck804_int_status));
5231
5232		if (intr_status & clear_all_bits[port]) {
5233
5234			nv_put8(nvc->nvc_bar_hdl[5],
5235			    (uint8_t *)(nvc->nvc_ck804_int_status),
5236			    clear_all_bits[port]);
5237
5238			NVLOG((NVDBG_INTR, nvc, nvp,
5239			    "interrupt bits cleared %x",
5240			    intr_status & clear_all_bits[port]));
5241		}
5242	}
5243
5244	if (flag & NV_INTR_DISABLE) {
5245		NVLOG((NVDBG_INTR, nvc, nvp,
5246		    "ck804_set_intr: NV_INTR_DISABLE"));
5247		int_en = nv_get8(bar5_hdl,
5248		    (uint8_t *)(bar5 + CK804_SATA_INT_EN));
5249		int_en &= ~intr_bits[port];
5250		nv_put8(bar5_hdl, (uint8_t *)(bar5 + CK804_SATA_INT_EN),
5251		    int_en);
5252	}
5253
5254	if (flag & NV_INTR_ENABLE) {
5255		NVLOG((NVDBG_INTR, nvc, nvp, "ck804_set_intr: NV_INTR_ENABLE"));
5256		int_en = nv_get8(bar5_hdl,
5257		    (uint8_t *)(bar5 + CK804_SATA_INT_EN));
5258		int_en |= intr_bits[port];
5259		nv_put8(bar5_hdl, (uint8_t *)(bar5 + CK804_SATA_INT_EN),
5260		    int_en);
5261	}
5262
5263	mutex_exit(&nvc->nvc_mutex);
5264}
5265
5266
5267/*
5268 * enable or disable the 3 interrupts the driver is interested in:
5269 * completion interrupt, hot add, and hot remove interrupt.
5270 */
5271static void
5272mcp5x_set_intr(nv_port_t *nvp, int flag)
5273{
5274	nv_ctl_t *nvc = nvp->nvp_ctlp;
5275	ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5];
5276	uint16_t intr_bits =
5277	    MCP5X_INT_ADD|MCP5X_INT_REM|MCP5X_INT_COMPLETE;
5278	uint16_t int_en;
5279
5280	if (flag & NV_INTR_DISABLE_NON_BLOCKING) {
5281		int_en = nv_get16(bar5_hdl, nvp->nvp_mcp5x_int_ctl);
5282		int_en &= ~intr_bits;
5283		nv_put16(bar5_hdl, nvp->nvp_mcp5x_int_ctl, int_en);
5284		return;
5285	}
5286
5287	ASSERT(mutex_owned(&nvp->nvp_mutex));
5288
5289	NVLOG((NVDBG_HOT, nvc, nvp, "mcp055_set_intr: enter flag: %d", flag));
5290
5291	if (flag & NV_INTR_CLEAR_ALL) {
5292		NVLOG((NVDBG_INTR, nvc, nvp,
5293		    "mcp5x_set_intr: NV_INTR_CLEAR_ALL"));
5294		nv_put16(bar5_hdl, nvp->nvp_mcp5x_int_status, MCP5X_INT_CLEAR);
5295	}
5296
5297	if (flag & NV_INTR_ENABLE) {
5298		NVLOG((NVDBG_INTR, nvc, nvp, "mcp5x_set_intr: NV_INTR_ENABLE"));
5299		int_en = nv_get16(bar5_hdl, nvp->nvp_mcp5x_int_ctl);
5300		int_en |= intr_bits;
5301		nv_put16(bar5_hdl, nvp->nvp_mcp5x_int_ctl, int_en);
5302	}
5303
5304	if (flag & NV_INTR_DISABLE) {
5305		NVLOG((NVDBG_INTR, nvc, nvp,
5306		    "mcp5x_set_intr: NV_INTR_DISABLE"));
5307		int_en = nv_get16(bar5_hdl, nvp->nvp_mcp5x_int_ctl);
5308		int_en &= ~intr_bits;
5309		nv_put16(bar5_hdl, nvp->nvp_mcp5x_int_ctl, int_en);
5310	}
5311}
5312
5313
5314static void
5315nv_resume(nv_port_t *nvp)
5316{
5317	NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp, "nv_resume()"));
5318
5319	mutex_enter(&nvp->nvp_mutex);
5320
5321	if (nvp->nvp_state & NV_PORT_INACTIVE) {
5322		mutex_exit(&nvp->nvp_mutex);
5323		return;
5324	}
5325
5326#ifdef SGPIO_SUPPORT
5327	nv_sgp_drive_connect(nvp->nvp_ctlp, SGP_CTLR_PORT_TO_DRV(
5328	    nvp->nvp_ctlp->nvc_ctlr_num, nvp->nvp_port_num));
5329#endif
5330
5331	/* Enable interrupt */
5332	(*(nvp->nvp_ctlp->nvc_set_intr))(nvp, NV_INTR_CLEAR_ALL|NV_INTR_ENABLE);
5333
5334	/*
5335	 * power may have been removed to the port and the
5336	 * drive, and/or a drive may have been added or removed.
5337	 * Force a reset which will cause a probe and re-establish
5338	 * any state needed on the drive.
5339	 */
5340	nv_reset(nvp);
5341
5342	mutex_exit(&nvp->nvp_mutex);
5343}
5344
5345
5346static void
5347nv_suspend(nv_port_t *nvp)
5348{
5349	NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp, "nv_suspend()"));
5350
5351	mutex_enter(&nvp->nvp_mutex);
5352
5353#ifdef SGPIO_SUPPORT
5354	nv_sgp_drive_disconnect(nvp->nvp_ctlp, SGP_CTLR_PORT_TO_DRV(
5355	    nvp->nvp_ctlp->nvc_ctlr_num, nvp->nvp_port_num));
5356#endif
5357
5358	if (nvp->nvp_state & NV_PORT_INACTIVE) {
5359		mutex_exit(&nvp->nvp_mutex);
5360		return;
5361	}
5362
5363	/*
5364	 * Stop the timeout handler.
5365	 * (It will be restarted in nv_reset() during nv_resume().)
5366	 */
5367	if (nvp->nvp_timeout_id) {
5368		(void) untimeout(nvp->nvp_timeout_id);
5369		nvp->nvp_timeout_id = 0;
5370	}
5371
5372	/* Disable interrupt */
5373	(*(nvp->nvp_ctlp->nvc_set_intr))(nvp,
5374	    NV_INTR_CLEAR_ALL|NV_INTR_DISABLE);
5375
5376	mutex_exit(&nvp->nvp_mutex);
5377}
5378
5379
5380static void
5381nv_copy_registers(nv_port_t *nvp, sata_device_t *sd, sata_pkt_t *spkt)
5382{
5383	ddi_acc_handle_t bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5];
5384	sata_cmd_t *scmd = &spkt->satapkt_cmd;
5385	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
5386	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
5387	uchar_t status;
5388	struct sata_cmd_flags flags;
5389
5390	NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp, "nv_copy_registers()"));
5391
5392	sd->satadev_scr.sstatus = nv_get32(bar5_hdl, nvp->nvp_sstatus);
5393	sd->satadev_scr.serror = nv_get32(bar5_hdl, nvp->nvp_serror);
5394	sd->satadev_scr.scontrol = nv_get32(bar5_hdl, nvp->nvp_sctrl);
5395
5396	if (spkt == NULL) {
5397
5398		return;
5399	}
5400
5401	/*
5402	 * in the error case, implicitly set the return of regs needed
5403	 * for error handling.
5404	 */
5405	status = scmd->satacmd_status_reg = nv_get8(ctlhdl,
5406	    nvp->nvp_altstatus);
5407
5408	flags = scmd->satacmd_flags;
5409
5410	if (status & SATA_STATUS_ERR) {
5411		flags.sata_copy_out_lba_low_msb = B_TRUE;
5412		flags.sata_copy_out_lba_mid_msb = B_TRUE;
5413		flags.sata_copy_out_lba_high_msb = B_TRUE;
5414		flags.sata_copy_out_lba_low_lsb = B_TRUE;
5415		flags.sata_copy_out_lba_mid_lsb = B_TRUE;
5416		flags.sata_copy_out_lba_high_lsb = B_TRUE;
5417		flags.sata_copy_out_error_reg = B_TRUE;
5418		flags.sata_copy_out_sec_count_msb = B_TRUE;
5419		flags.sata_copy_out_sec_count_lsb = B_TRUE;
5420		scmd->satacmd_status_reg = status;
5421	}
5422
5423	if (scmd->satacmd_addr_type & ATA_ADDR_LBA48) {
5424
5425		/*
5426		 * set HOB so that high byte will be read
5427		 */
5428		nv_put8(ctlhdl, nvp->nvp_devctl, ATDC_HOB|ATDC_D3);
5429
5430		/*
5431		 * get the requested high bytes
5432		 */
5433		if (flags.sata_copy_out_sec_count_msb) {
5434			scmd->satacmd_sec_count_msb =
5435			    nv_get8(cmdhdl, nvp->nvp_count);
5436		}
5437
5438		if (flags.sata_copy_out_lba_low_msb) {
5439			scmd->satacmd_lba_low_msb =
5440			    nv_get8(cmdhdl, nvp->nvp_sect);
5441		}
5442
5443		if (flags.sata_copy_out_lba_mid_msb) {
5444			scmd->satacmd_lba_mid_msb =
5445			    nv_get8(cmdhdl, nvp->nvp_lcyl);
5446		}
5447
5448		if (flags.sata_copy_out_lba_high_msb) {
5449			scmd->satacmd_lba_high_msb =
5450			    nv_get8(cmdhdl, nvp->nvp_hcyl);
5451		}
5452	}
5453
5454	/*
5455	 * disable HOB so that low byte is read
5456	 */
5457	nv_put8(ctlhdl, nvp->nvp_devctl, ATDC_D3);
5458
5459	/*
5460	 * get the requested low bytes
5461	 */
5462	if (flags.sata_copy_out_sec_count_lsb) {
5463		scmd->satacmd_sec_count_lsb = nv_get8(cmdhdl, nvp->nvp_count);
5464	}
5465
5466	if (flags.sata_copy_out_lba_low_lsb) {
5467		scmd->satacmd_lba_low_lsb = nv_get8(cmdhdl, nvp->nvp_sect);
5468	}
5469
5470	if (flags.sata_copy_out_lba_mid_lsb) {
5471		scmd->satacmd_lba_mid_lsb = nv_get8(cmdhdl, nvp->nvp_lcyl);
5472	}
5473
5474	if (flags.sata_copy_out_lba_high_lsb) {
5475		scmd->satacmd_lba_high_lsb = nv_get8(cmdhdl, nvp->nvp_hcyl);
5476	}
5477
5478	/*
5479	 * get the device register if requested
5480	 */
5481	if (flags.sata_copy_out_device_reg) {
5482		scmd->satacmd_device_reg =  nv_get8(cmdhdl, nvp->nvp_drvhd);
5483	}
5484
5485	/*
5486	 * get the error register if requested
5487	 */
5488	if (flags.sata_copy_out_error_reg) {
5489		scmd->satacmd_error_reg = nv_get8(cmdhdl, nvp->nvp_error);
5490	}
5491}
5492
5493
5494/*
5495 * Hot plug and remove interrupts can occur when the device is reset.  Just
5496 * masking the interrupt doesn't always work well because if a
5497 * different interrupt arrives on the other port, the driver can still
5498 * end up checking the state of the other port and discover the hot
5499 * interrupt flag is set even though it was masked.  Checking for recent
5500 * reset activity and then ignoring turns out to be the easiest way.
5501 */
5502static void
5503nv_report_add_remove(nv_port_t *nvp, int flags)
5504{
5505	ddi_acc_handle_t bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5];
5506	clock_t time_diff = ddi_get_lbolt() - nvp->nvp_reset_time;
5507	uint32_t sstatus;
5508	int i;
5509
5510	/*
5511	 * If reset within last 1 second ignore.  This should be
5512	 * reworked and improved instead of having this somewhat
5513	 * heavy handed clamping job.
5514	 */
5515	if (time_diff < drv_usectohz(NV_ONE_SEC)) {
5516		NVLOG((NVDBG_HOT, nvp->nvp_ctlp, nvp, "nv_report_add_remove()"
5517		    "ignoring plug interrupt was %dms ago",
5518		    TICK_TO_MSEC(time_diff)));
5519
5520		return;
5521	}
5522
5523	/*
5524	 * wait up to 1ms for sstatus to settle and reflect the true
5525	 * status of the port.  Failure to do so can create confusion
5526	 * in probe, where the incorrect sstatus value can still
5527	 * persist.
5528	 */
5529	for (i = 0; i < 1000; i++) {
5530		sstatus = nv_get32(bar5_hdl, nvp->nvp_sstatus);
5531
5532		if ((flags == NV_PORT_HOTREMOVED) &&
5533		    ((sstatus & SSTATUS_DET_DEVPRE_PHYCOM) !=
5534		    SSTATUS_DET_DEVPRE_PHYCOM)) {
5535			break;
5536		}
5537
5538		if ((flags != NV_PORT_HOTREMOVED) &&
5539		    ((sstatus & SSTATUS_DET_DEVPRE_PHYCOM) ==
5540		    SSTATUS_DET_DEVPRE_PHYCOM)) {
5541			break;
5542		}
5543		drv_usecwait(1);
5544	}
5545
5546	NVLOG((NVDBG_HOT, nvp->nvp_ctlp, nvp,
5547	    "sstatus took %i us for DEVPRE_PHYCOM to settle", i));
5548
5549	if (flags == NV_PORT_HOTREMOVED) {
5550		NVLOG((NVDBG_HOT, nvp->nvp_ctlp, nvp,
5551		    "nv_report_add_remove() hot removed"));
5552		nv_port_state_change(nvp,
5553		    SATA_EVNT_DEVICE_DETACHED,
5554		    SATA_ADDR_CPORT, 0);
5555
5556		nvp->nvp_state |= NV_PORT_HOTREMOVED;
5557	} else {
5558		NVLOG((NVDBG_HOT, nvp->nvp_ctlp, nvp,
5559		    "nv_report_add_remove() hot plugged"));
5560		nv_port_state_change(nvp, SATA_EVNT_DEVICE_ATTACHED,
5561		    SATA_ADDR_CPORT, 0);
5562	}
5563}
5564
5565/*
5566 * Get request sense data and stuff it the command's sense buffer.
5567 * Start a request sense command in order to get sense data to insert
5568 * in the sata packet's rqsense buffer.  The command completion
5569 * processing is in nv_intr_pkt_pio.
5570 *
5571 * The sata framework provides a function to allocate and set-up a
5572 * request sense packet command. The reasons it is not being used here is:
5573 * a) it cannot be called in an interrupt context and this function is
5574 *    called in an interrupt context.
5575 * b) it allocates DMA resources that are not used here because this is
5576 *    implemented using PIO.
5577 *
5578 * If, in the future, this is changed to use DMA, the sata framework should
5579 * be used to allocate and set-up the error retrieval (request sense)
5580 * command.
5581 */
5582static int
5583nv_start_rqsense_pio(nv_port_t *nvp, nv_slot_t *nv_slotp)
5584{
5585	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
5586	sata_cmd_t *satacmd = &spkt->satapkt_cmd;
5587	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
5588	int cdb_len = spkt->satapkt_cmd.satacmd_acdb_len;
5589
5590	NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
5591	    "nv_start_rqsense_pio: start"));
5592
5593	/* clear the local request sense buffer before starting the command */
5594	bzero(nv_slotp->nvslot_rqsense_buff, SATA_ATAPI_RQSENSE_LEN);
5595
5596	/* Write the request sense PACKET command */
5597
5598	/* select the drive */
5599	nv_put8(cmdhdl, nvp->nvp_drvhd, satacmd->satacmd_device_reg);
5600
5601	/* make certain the drive selected */
5602	if (nv_wait(nvp, SATA_STATUS_DRDY, SATA_STATUS_BSY,
5603	    NV_SEC2USEC(5), 0) == B_FALSE) {
5604		NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
5605		    "nv_start_rqsense_pio: drive select failed"));
5606		return (NV_FAILURE);
5607	}
5608
5609	/* set up the command */
5610	nv_put8(cmdhdl, nvp->nvp_feature, 0);	/* deassert DMA and OVL */
5611	nv_put8(cmdhdl, nvp->nvp_hcyl, SATA_ATAPI_MAX_BYTES_PER_DRQ >> 8);
5612	nv_put8(cmdhdl, nvp->nvp_lcyl, SATA_ATAPI_MAX_BYTES_PER_DRQ & 0xff);
5613	nv_put8(cmdhdl, nvp->nvp_sect, 0);
5614	nv_put8(cmdhdl, nvp->nvp_count, 0);	/* no tag */
5615
5616	/* initiate the command by writing the command register last */
5617	nv_put8(cmdhdl, nvp->nvp_cmd, SATAC_PACKET);
5618
5619	/* Give the host ctlr time to do its thing, according to ATA/ATAPI */
5620	NV_DELAY_NSEC(400);
5621
5622	/*
5623	 * Wait for the device to indicate that it is ready for the command
5624	 * ATAPI protocol state - HP0: Check_Status_A
5625	 */
5626
5627	if (nv_wait3(nvp, SATA_STATUS_DRQ, SATA_STATUS_BSY, /* okay */
5628	    SATA_STATUS_ERR, SATA_STATUS_BSY, /* cmd failed */
5629	    SATA_STATUS_DF, SATA_STATUS_BSY, /* drive failed */
5630	    4000000, 0) == B_FALSE) {
5631		if (nv_get8(cmdhdl, nvp->nvp_status) &
5632		    (SATA_STATUS_ERR | SATA_STATUS_DF)) {
5633			NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
5634			    "nv_start_rqsense_pio: rqsense dev error (HP0)"));
5635		} else {
5636			NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
5637			    "nv_start_rqsense_pio: rqsense timeout (HP0)"));
5638		}
5639
5640		nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
5641		nv_complete_io(nvp, spkt, 0);
5642		nv_reset(nvp);
5643
5644		return (NV_FAILURE);
5645	}
5646
5647	/*
5648	 * Put the ATAPI command in the data register
5649	 * ATAPI protocol state - HP1: Send_Packet
5650	 */
5651
5652	ddi_rep_put16(cmdhdl, (ushort_t *)nv_rqsense_cdb,
5653	    (ushort_t *)nvp->nvp_data,
5654	    (cdb_len >> 1), DDI_DEV_NO_AUTOINCR);
5655
5656	NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
5657	    "nv_start_rqsense_pio: exiting into HP3"));
5658
5659	return (NV_SUCCESS);
5660}
5661
5662/*
5663 * quiesce(9E) entry point.
5664 *
5665 * This function is called when the system is single-threaded at high
5666 * PIL with preemption disabled. Therefore, this function must not be
5667 * blocked.
5668 *
5669 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
5670 * DDI_FAILURE indicates an error condition and should almost never happen.
5671 */
5672static int
5673nv_quiesce(dev_info_t *dip)
5674{
5675	int port, instance = ddi_get_instance(dip);
5676	nv_ctl_t *nvc;
5677
5678	if ((nvc = (nv_ctl_t *)ddi_get_soft_state(nv_statep, instance)) == NULL)
5679		return (DDI_FAILURE);
5680
5681	for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
5682		nv_port_t *nvp = &(nvc->nvc_port[port]);
5683		ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
5684		ddi_acc_handle_t bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5];
5685		uint32_t sctrl;
5686
5687		/*
5688		 * Stop the controllers from generating interrupts.
5689		 */
5690		(*(nvc->nvc_set_intr))(nvp, NV_INTR_DISABLE_NON_BLOCKING);
5691
5692		/*
5693		 * clear signature registers
5694		 */
5695		nv_put8(cmdhdl, nvp->nvp_sect, 0);
5696		nv_put8(cmdhdl, nvp->nvp_lcyl, 0);
5697		nv_put8(cmdhdl, nvp->nvp_hcyl, 0);
5698		nv_put8(cmdhdl, nvp->nvp_count, 0);
5699
5700		nvp->nvp_signature = 0;
5701		nvp->nvp_type = 0;
5702		nvp->nvp_state |= NV_PORT_RESET;
5703		nvp->nvp_reset_time = ddi_get_lbolt();
5704		nvp->nvp_link_lost_time = 0;
5705
5706		/*
5707		 * assert reset in PHY by writing a 1 to bit 0 scontrol
5708		 */
5709		sctrl = nv_get32(bar5_hdl, nvp->nvp_sctrl);
5710
5711		nv_put32(bar5_hdl, nvp->nvp_sctrl,
5712		    sctrl | SCONTROL_DET_COMRESET);
5713
5714		/*
5715		 * wait 1ms
5716		 */
5717		drv_usecwait(1000);
5718
5719		/*
5720		 * de-assert reset in PHY
5721		 */
5722		nv_put32(bar5_hdl, nvp->nvp_sctrl, sctrl);
5723	}
5724
5725	return (DDI_SUCCESS);
5726}
5727
5728
5729#ifdef SGPIO_SUPPORT
5730/*
5731 * NVIDIA specific SGPIO LED support
5732 * Please refer to the NVIDIA documentation for additional details
5733 */
5734
5735/*
5736 * nv_sgp_led_init
5737 * Detect SGPIO support.  If present, initialize.
5738 */
5739static void
5740nv_sgp_led_init(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle)
5741{
5742	uint16_t csrp;		/* SGPIO_CSRP from PCI config space */
5743	uint32_t cbp;		/* SGPIO_CBP from PCI config space */
5744	nv_sgp_cmn_t *cmn;	/* shared data structure */
5745	char tqname[SGPIO_TQ_NAME_LEN];
5746	extern caddr_t psm_map_phys_new(paddr_t, size_t, int);
5747
5748	/*
5749	 * Initialize with appropriately invalid values in case this function
5750	 * exits without initializing SGPIO (for example, there is no SGPIO
5751	 * support).
5752	 */
5753	nvc->nvc_sgp_csr = 0;
5754	nvc->nvc_sgp_cbp = NULL;
5755
5756	/*
5757	 * Only try to initialize SGPIO LED support if this property
5758	 * indicates it should be.
5759	 */
5760	if (ddi_getprop(DDI_DEV_T_ANY, nvc->nvc_dip, DDI_PROP_DONTPASS,
5761	    "enable-sgpio-leds", 0) != 1)
5762		return;
5763
5764	/*
5765	 * CK804 can pass the sgpio_detect test even though it does not support
5766	 * SGPIO, so don't even look at a CK804.
5767	 */
5768	if (nvc->nvc_mcp5x_flag != B_TRUE)
5769		return;
5770
5771	/*
5772	 * The NVIDIA SGPIO support can nominally handle 6 drives.
5773	 * However, the current implementation only supports 4 drives.
5774	 * With two drives per controller, that means only look at the
5775	 * first two controllers.
5776	 */
5777	if ((nvc->nvc_ctlr_num != 0) && (nvc->nvc_ctlr_num != 1))
5778		return;
5779
5780	/* confirm that the SGPIO registers are there */
5781	if (nv_sgp_detect(pci_conf_handle, &csrp, &cbp) != NV_SUCCESS) {
5782		NVLOG((NVDBG_INIT, nvc, NULL,
5783		    "SGPIO registers not detected"));
5784		return;
5785	}
5786
5787	/* save off the SGPIO_CSR I/O address */
5788	nvc->nvc_sgp_csr = csrp;
5789
5790	/* map in Command Block */
5791	nvc->nvc_sgp_cbp = (nv_sgp_cb_t *)psm_map_phys_new(cbp,
5792	    sizeof (nv_sgp_cb_t), PROT_READ | PROT_WRITE);
5793
5794	/* initialize the SGPIO h/w */
5795	if (nv_sgp_init(nvc) == NV_FAILURE) {
5796		nv_cmn_err(CE_WARN, nvc, NULL,
5797		    "!Unable to initialize SGPIO");
5798	}
5799
5800	if (nvc->nvc_ctlr_num == 0) {
5801		/*
5802		 * Controller 0 on the MCP5X/IO55 initialized the SGPIO
5803		 * and the data that is shared between the controllers.
5804		 * The clever thing to do would be to let the first controller
5805		 * that comes up be the one that initializes all this.
5806		 * However, SGPIO state is not necessarily zeroed between
5807		 * between OS reboots, so there might be old data there.
5808		 */
5809
5810		/* allocate shared space */
5811		cmn = (nv_sgp_cmn_t *)kmem_zalloc(sizeof (nv_sgp_cmn_t),
5812		    KM_SLEEP);
5813		if (cmn == NULL) {
5814			nv_cmn_err(CE_WARN, nvc, NULL,
5815			    "!Failed to allocate shared data");
5816			return;
5817		}
5818
5819		nvc->nvc_sgp_cmn = cmn;
5820
5821		/* initialize the shared data structure */
5822		cmn->nvs_magic = SGPIO_MAGIC;
5823		cmn->nvs_in_use = (1 << nvc->nvc_ctlr_num);
5824		cmn->nvs_connected = 0;
5825		cmn->nvs_activity = 0;
5826
5827		mutex_init(&cmn->nvs_slock, NULL, MUTEX_DRIVER, NULL);
5828		mutex_init(&cmn->nvs_tlock, NULL, MUTEX_DRIVER, NULL);
5829		cv_init(&cmn->nvs_cv, NULL, CV_DRIVER, NULL);
5830
5831		/* put the address in the SGPIO scratch register */
5832#if defined(__amd64)
5833		nvc->nvc_sgp_cbp->sgpio_sr = (uint64_t)cmn;
5834#else
5835		nvc->nvc_sgp_cbp->sgpio_sr = (uint32_t)cmn;
5836#endif
5837
5838		/* start the activity LED taskq */
5839
5840		/*
5841		 * The taskq name should be unique and the time
5842		 */
5843		(void) snprintf(tqname, SGPIO_TQ_NAME_LEN,
5844		    "nvSataLed%x", (short)(ddi_get_lbolt() & 0xffff));
5845		cmn->nvs_taskq = ddi_taskq_create(nvc->nvc_dip, tqname, 1,
5846		    TASKQ_DEFAULTPRI, 0);
5847		if (cmn->nvs_taskq == NULL) {
5848			cmn->nvs_taskq_delay = 0;
5849			nv_cmn_err(CE_WARN, nvc, NULL,
5850			    "!Failed to start activity LED taskq");
5851		} else {
5852			cmn->nvs_taskq_delay = SGPIO_LOOP_WAIT_USECS;
5853			(void) ddi_taskq_dispatch(cmn->nvs_taskq,
5854			    nv_sgp_activity_led_ctl, nvc, DDI_SLEEP);
5855		}
5856
5857	} else if (nvc->nvc_ctlr_num == 1) {
5858		/*
5859		 * Controller 1 confirms that SGPIO has been initialized
5860		 * and, if so, try to get the shared data pointer, otherwise
5861		 * get the shared data pointer when accessing the data.
5862		 */
5863
5864		if (nvc->nvc_sgp_cbp->sgpio_sr != 0) {
5865			cmn = (nv_sgp_cmn_t *)nvc->nvc_sgp_cbp->sgpio_sr;
5866
5867			/*
5868			 * It looks like a pointer, but is it the shared data?
5869			 */
5870			if (cmn->nvs_magic == SGPIO_MAGIC) {
5871				nvc->nvc_sgp_cmn = cmn;
5872
5873				cmn->nvs_in_use |= (1 << nvc->nvc_ctlr_num);
5874			}
5875		}
5876	}
5877}
5878
5879/*
5880 * nv_sgp_detect
5881 * Read the SGPIO_CSR and SGPIO_CBP values from PCI config space and
5882 * report back whether both were readable.
5883 */
5884static int
5885nv_sgp_detect(ddi_acc_handle_t pci_conf_handle, uint16_t *csrpp,
5886    uint32_t *cbpp)
5887{
5888	/* get the SGPIO_CSRP */
5889	*csrpp = pci_config_get16(pci_conf_handle, SGPIO_CSRP);
5890	if (*csrpp == 0) {
5891		return (NV_FAILURE);
5892	}
5893
5894	/* SGPIO_CSRP is good, get the SGPIO_CBP */
5895	*cbpp = pci_config_get32(pci_conf_handle, SGPIO_CBP);
5896	if (*cbpp == 0) {
5897		return (NV_FAILURE);
5898	}
5899
5900	/* SGPIO_CBP is good, so we must support SGPIO */
5901	return (NV_SUCCESS);
5902}
5903
5904/*
5905 * nv_sgp_init
5906 * Initialize SGPIO.  The process is specified by NVIDIA.
5907 */
5908static int
5909nv_sgp_init(nv_ctl_t *nvc)
5910{
5911	uint32_t status;
5912	int drive_count;
5913
5914	/*
5915	 * if SGPIO status set to SGPIO_STATE_RESET, logic has been
5916	 * reset and needs to be initialized.
5917	 */
5918	status = nv_sgp_csr_read(nvc);
5919	if (SGPIO_CSR_SSTAT(status) == SGPIO_STATE_RESET) {
5920		if (nv_sgp_init_cmd(nvc) == NV_FAILURE) {
5921			/* reset and try again */
5922			nv_sgp_reset(nvc);
5923			if (nv_sgp_init_cmd(nvc) == NV_FAILURE) {
5924				NVLOG((NVDBG_ALWAYS, nvc, NULL,
5925				    "SGPIO init failed"));
5926				return (NV_FAILURE);
5927			}
5928		}
5929	}
5930
5931	/*
5932	 * NVIDIA recommends reading the supported drive count even
5933	 * though they also indicate that it is 4 at this time.
5934	 */
5935	drive_count = SGP_CR0_DRV_CNT(nvc->nvc_sgp_cbp->sgpio_cr0);
5936	if (drive_count != SGPIO_DRV_CNT_VALUE) {
5937		NVLOG((NVDBG_ALWAYS, nvc, NULL,
5938		    "SGPIO reported undocumented drive count - %d",
5939		    drive_count));
5940	}
5941
5942	NVLOG((NVDBG_INIT, nvc, NULL,
5943	    "initialized ctlr: %d csr: 0x%08x",
5944	    nvc->nvc_ctlr_num, nvc->nvc_sgp_csr));
5945
5946	return (NV_SUCCESS);
5947}
5948
5949static void
5950nv_sgp_reset(nv_ctl_t *nvc)
5951{
5952	uint32_t cmd;
5953	uint32_t status;
5954
5955	cmd = SGPIO_CMD_RESET;
5956	nv_sgp_csr_write(nvc, cmd);
5957
5958	status = nv_sgp_csr_read(nvc);
5959
5960	if (SGPIO_CSR_CSTAT(status) != SGPIO_CMD_OK) {
5961		NVLOG((NVDBG_ALWAYS, nvc, NULL,
5962		    "SGPIO reset failed: CSR - 0x%x", status));
5963	}
5964}
5965
5966static int
5967nv_sgp_init_cmd(nv_ctl_t *nvc)
5968{
5969	int seq;
5970	hrtime_t start, end;
5971	uint32_t status;
5972	uint32_t cmd;
5973
5974	/* get the old sequence value */
5975	status = nv_sgp_csr_read(nvc);
5976	seq = SGPIO_CSR_SEQ(status);
5977
5978	/* check the state since we have the info anyway */
5979	if (SGPIO_CSR_SSTAT(status) != SGPIO_STATE_OPERATIONAL) {
5980		NVLOG((NVDBG_ALWAYS, nvc, NULL,
5981		    "SGPIO init_cmd: state not operational"));
5982	}
5983
5984	/* issue command */
5985	cmd = SGPIO_CSR_CMD_SET(SGPIO_CMD_READ_PARAMS);
5986	nv_sgp_csr_write(nvc, cmd);
5987
5988	DTRACE_PROBE2(sgpio__cmd, int, cmd, int, status);
5989
5990	/* poll for completion */
5991	start = gethrtime();
5992	end = start + NV_SGP_CMD_TIMEOUT;
5993	for (;;) {
5994		status = nv_sgp_csr_read(nvc);
5995
5996		/* break on error */
5997		if (SGPIO_CSR_CSTAT(status) == SGPIO_CMD_ERROR)
5998			break;
5999
6000		/* break on command completion (seq changed) */
6001		if (SGPIO_CSR_SEQ(status) != seq) {
6002			if (SGPIO_CSR_CSTAT(status) == SGPIO_CMD_ACTIVE) {
6003				NVLOG((NVDBG_ALWAYS, nvc, NULL,
6004				    "Seq changed but command still active"));
6005			}
6006
6007			break;
6008		}
6009
6010		/* Wait 400 ns and try again */
6011		NV_DELAY_NSEC(400);
6012
6013		if (gethrtime() > end)
6014			break;
6015	}
6016
6017	if (SGPIO_CSR_CSTAT(status) == SGPIO_CMD_OK)
6018		return (NV_SUCCESS);
6019
6020	return (NV_FAILURE);
6021}
6022
6023static int
6024nv_sgp_check_set_cmn(nv_ctl_t *nvc)
6025{
6026	nv_sgp_cmn_t *cmn;
6027
6028	if (nvc->nvc_sgp_cbp == NULL)
6029		return (NV_FAILURE);
6030
6031	/* check to see if Scratch Register is set */
6032	if (nvc->nvc_sgp_cbp->sgpio_sr != 0) {
6033		nvc->nvc_sgp_cmn =
6034		    (nv_sgp_cmn_t *)nvc->nvc_sgp_cbp->sgpio_sr;
6035
6036		if (nvc->nvc_sgp_cmn->nvs_magic != SGPIO_MAGIC)
6037			return (NV_FAILURE);
6038
6039		cmn = nvc->nvc_sgp_cmn;
6040
6041		mutex_enter(&cmn->nvs_slock);
6042		cmn->nvs_in_use |= (1 << nvc->nvc_ctlr_num);
6043		mutex_exit(&cmn->nvs_slock);
6044
6045		return (NV_SUCCESS);
6046	}
6047
6048	return (NV_FAILURE);
6049}
6050
6051/*
6052 * nv_sgp_csr_read
6053 * This is just a 32-bit port read from the value that was obtained from the
6054 * PCI config space.
6055 *
6056 * XXX It was advised to use the in[bwl] function for this, even though they
6057 * are obsolete interfaces.
6058 */
6059static int
6060nv_sgp_csr_read(nv_ctl_t *nvc)
6061{
6062	return (inl(nvc->nvc_sgp_csr));
6063}
6064
6065/*
6066 * nv_sgp_csr_write
6067 * This is just a 32-bit I/O port write.  The port number was obtained from
6068 * the PCI config space.
6069 *
6070 * XXX It was advised to use the out[bwl] function for this, even though they
6071 * are obsolete interfaces.
6072 */
6073static void
6074nv_sgp_csr_write(nv_ctl_t *nvc, uint32_t val)
6075{
6076	outl(nvc->nvc_sgp_csr, val);
6077}
6078
6079/*
6080 * nv_sgp_write_data
6081 * Cause SGPIO to send Command Block data
6082 */
6083static int
6084nv_sgp_write_data(nv_ctl_t *nvc)
6085{
6086	hrtime_t start, end;
6087	uint32_t status;
6088	uint32_t cmd;
6089
6090	/* issue command */
6091	cmd = SGPIO_CSR_CMD_SET(SGPIO_CMD_WRITE_DATA);
6092	nv_sgp_csr_write(nvc, cmd);
6093
6094	/* poll for completion */
6095	start = gethrtime();
6096	end = start + NV_SGP_CMD_TIMEOUT;
6097	for (;;) {
6098		status = nv_sgp_csr_read(nvc);
6099
6100		/* break on error completion */
6101		if (SGPIO_CSR_CSTAT(status) == SGPIO_CMD_ERROR)
6102			break;
6103
6104		/* break on successful completion */
6105		if (SGPIO_CSR_CSTAT(status) == SGPIO_CMD_OK)
6106			break;
6107
6108		/* Wait 400 ns and try again */
6109		NV_DELAY_NSEC(400);
6110
6111		if (gethrtime() > end)
6112			break;
6113	}
6114
6115	if (SGPIO_CSR_CSTAT(status) == SGPIO_CMD_OK)
6116		return (NV_SUCCESS);
6117
6118	return (NV_FAILURE);
6119}
6120
6121/*
6122 * nv_sgp_activity_led_ctl
6123 * This is run as a taskq.  It wakes up at a fixed interval and checks to
6124 * see if any of the activity LEDs need to be changed.
6125 */
6126static void
6127nv_sgp_activity_led_ctl(void *arg)
6128{
6129	nv_ctl_t *nvc = (nv_ctl_t *)arg;
6130	nv_sgp_cmn_t *cmn;
6131	volatile nv_sgp_cb_t *cbp;
6132	clock_t ticks;
6133	uint8_t drv_leds;
6134	uint32_t old_leds;
6135	uint32_t new_led_state;
6136	int i;
6137
6138	cmn = nvc->nvc_sgp_cmn;
6139	cbp = nvc->nvc_sgp_cbp;
6140
6141	do {
6142		/* save off the old state of all of the LEDs */
6143		old_leds = cbp->sgpio0_tr;
6144
6145		DTRACE_PROBE3(sgpio__activity__state,
6146		    int, cmn->nvs_connected, int, cmn->nvs_activity,
6147		    int, old_leds);
6148
6149		new_led_state = 0;
6150
6151		/* for each drive */
6152		for (i = 0; i < SGPIO_DRV_CNT_VALUE; i++) {
6153
6154			/* get the current state of the LEDs for the drive */
6155			drv_leds = SGPIO0_TR_DRV(old_leds, i);
6156
6157			if ((cmn->nvs_connected & (1 << i)) == 0) {
6158				/* if not connected, turn off activity */
6159				drv_leds &= ~TR_ACTIVE_MASK;
6160				drv_leds |= TR_ACTIVE_SET(TR_ACTIVE_DISABLE);
6161
6162				new_led_state &= SGPIO0_TR_DRV_CLR(i);
6163				new_led_state |=
6164				    SGPIO0_TR_DRV_SET(drv_leds, i);
6165
6166				continue;
6167			}
6168
6169			if ((cmn->nvs_activity & (1 << i)) == 0) {
6170				/* connected, but not active */
6171				drv_leds &= ~TR_ACTIVE_MASK;
6172				drv_leds |= TR_ACTIVE_SET(TR_ACTIVE_ENABLE);
6173
6174				new_led_state &= SGPIO0_TR_DRV_CLR(i);
6175				new_led_state |=
6176				    SGPIO0_TR_DRV_SET(drv_leds, i);
6177
6178				continue;
6179			}
6180
6181			/* connected and active */
6182			if (TR_ACTIVE(drv_leds) == TR_ACTIVE_ENABLE) {
6183				/* was enabled, so disable */
6184				drv_leds &= ~TR_ACTIVE_MASK;
6185				drv_leds |=
6186				    TR_ACTIVE_SET(TR_ACTIVE_DISABLE);
6187
6188				new_led_state &= SGPIO0_TR_DRV_CLR(i);
6189				new_led_state |=
6190				    SGPIO0_TR_DRV_SET(drv_leds, i);
6191			} else {
6192				/* was disabled, so enable */
6193				drv_leds &= ~TR_ACTIVE_MASK;
6194				drv_leds |= TR_ACTIVE_SET(TR_ACTIVE_ENABLE);
6195
6196				new_led_state &= SGPIO0_TR_DRV_CLR(i);
6197				new_led_state |=
6198				    SGPIO0_TR_DRV_SET(drv_leds, i);
6199			}
6200
6201			/*
6202			 * clear the activity bit
6203			 * if there is drive activity again within the
6204			 * loop interval (now 1/16 second), nvs_activity
6205			 * will be reset and the "connected and active"
6206			 * condition above will cause the LED to blink
6207			 * off and on at the loop interval rate.  The
6208			 * rate may be increased (interval shortened) as
6209			 * long as it is not more than 1/30 second.
6210			 */
6211			mutex_enter(&cmn->nvs_slock);
6212			cmn->nvs_activity &= ~(1 << i);
6213			mutex_exit(&cmn->nvs_slock);
6214		}
6215
6216		DTRACE_PROBE1(sgpio__new__led__state, int, new_led_state);
6217
6218		/* write out LED values */
6219
6220		mutex_enter(&cmn->nvs_slock);
6221		cbp->sgpio0_tr &= ~TR_ACTIVE_MASK_ALL;
6222		cbp->sgpio0_tr |= new_led_state;
6223		cbp->sgpio_cr0 = SGP_CR0_ENABLE_MASK;
6224		mutex_exit(&cmn->nvs_slock);
6225
6226		if (nv_sgp_write_data(nvc) == NV_FAILURE) {
6227			NVLOG((NVDBG_VERBOSE, nvc, NULL,
6228			    "nv_sgp_write_data failure updating active LED"));
6229		}
6230
6231		/* now rest for the interval */
6232		mutex_enter(&cmn->nvs_tlock);
6233		ticks = drv_usectohz(cmn->nvs_taskq_delay);
6234		if (ticks > 0)
6235			(void) cv_timedwait(&cmn->nvs_cv, &cmn->nvs_tlock,
6236			    ddi_get_lbolt() + ticks);
6237		mutex_exit(&cmn->nvs_tlock);
6238	} while (ticks > 0);
6239}
6240
6241/*
6242 * nv_sgp_drive_connect
6243 * Set the flag used to indicate that the drive is attached to the HBA.
6244 * Used to let the taskq know that it should turn the Activity LED on.
6245 */
6246static void
6247nv_sgp_drive_connect(nv_ctl_t *nvc, int drive)
6248{
6249	nv_sgp_cmn_t *cmn;
6250
6251	if (nv_sgp_check_set_cmn(nvc) == NV_FAILURE)
6252		return;
6253	cmn = nvc->nvc_sgp_cmn;
6254
6255	mutex_enter(&cmn->nvs_slock);
6256	cmn->nvs_connected |= (1 << drive);
6257	mutex_exit(&cmn->nvs_slock);
6258}
6259
6260/*
6261 * nv_sgp_drive_disconnect
6262 * Clears the flag used to indicate that the drive is no longer attached
6263 * to the HBA.  Used to let the taskq know that it should turn the
6264 * Activity LED off.  The flag that indicates that the drive is in use is
6265 * also cleared.
6266 */
6267static void
6268nv_sgp_drive_disconnect(nv_ctl_t *nvc, int drive)
6269{
6270	nv_sgp_cmn_t *cmn;
6271
6272	if (nv_sgp_check_set_cmn(nvc) == NV_FAILURE)
6273		return;
6274	cmn = nvc->nvc_sgp_cmn;
6275
6276	mutex_enter(&cmn->nvs_slock);
6277	cmn->nvs_connected &= ~(1 << drive);
6278	cmn->nvs_activity &= ~(1 << drive);
6279	mutex_exit(&cmn->nvs_slock);
6280}
6281
6282/*
6283 * nv_sgp_drive_active
6284 * Sets the flag used to indicate that the drive has been accessed and the
6285 * LED should be flicked off, then on.  It is cleared at a fixed time
6286 * interval by the LED taskq and set by the sata command start.
6287 */
6288static void
6289nv_sgp_drive_active(nv_ctl_t *nvc, int drive)
6290{
6291	nv_sgp_cmn_t *cmn;
6292
6293	if (nv_sgp_check_set_cmn(nvc) == NV_FAILURE)
6294		return;
6295	cmn = nvc->nvc_sgp_cmn;
6296
6297	DTRACE_PROBE1(sgpio__active, int, drive);
6298
6299	mutex_enter(&cmn->nvs_slock);
6300	cmn->nvs_connected |= (1 << drive);
6301	cmn->nvs_activity |= (1 << drive);
6302	mutex_exit(&cmn->nvs_slock);
6303}
6304
6305
6306/*
6307 * nv_sgp_locate
6308 * Turns the Locate/OK2RM LED off or on for a particular drive.  State is
6309 * maintained in the SGPIO Command Block.
6310 */
6311static void
6312nv_sgp_locate(nv_ctl_t *nvc, int drive, int value)
6313{
6314	uint8_t leds;
6315	volatile nv_sgp_cb_t *cb = nvc->nvc_sgp_cbp;
6316	nv_sgp_cmn_t *cmn;
6317
6318	if (nv_sgp_check_set_cmn(nvc) == NV_FAILURE)
6319		return;
6320	cmn = nvc->nvc_sgp_cmn;
6321
6322	if ((drive < 0) || (drive >= SGPIO_DRV_CNT_VALUE))
6323		return;
6324
6325	DTRACE_PROBE2(sgpio__locate, int, drive, int, value);
6326
6327	mutex_enter(&cmn->nvs_slock);
6328
6329	leds = SGPIO0_TR_DRV(cb->sgpio0_tr, drive);
6330
6331	leds &= ~TR_LOCATE_MASK;
6332	leds |= TR_LOCATE_SET(value);
6333
6334	cb->sgpio0_tr &= SGPIO0_TR_DRV_CLR(drive);
6335	cb->sgpio0_tr |= SGPIO0_TR_DRV_SET(leds, drive);
6336
6337	cb->sgpio_cr0 = SGP_CR0_ENABLE_MASK;
6338
6339	mutex_exit(&cmn->nvs_slock);
6340
6341	if (nv_sgp_write_data(nvc) == NV_FAILURE) {
6342		nv_cmn_err(CE_WARN, nvc, NULL,
6343		    "!nv_sgp_write_data failure updating OK2RM/Locate LED");
6344	}
6345}
6346
6347/*
6348 * nv_sgp_error
6349 * Turns the Error/Failure LED off or on for a particular drive.  State is
6350 * maintained in the SGPIO Command Block.
6351 */
6352static void
6353nv_sgp_error(nv_ctl_t *nvc, int drive, int value)
6354{
6355	uint8_t leds;
6356	volatile nv_sgp_cb_t *cb = nvc->nvc_sgp_cbp;
6357	nv_sgp_cmn_t *cmn;
6358
6359	if (nv_sgp_check_set_cmn(nvc) == NV_FAILURE)
6360		return;
6361	cmn = nvc->nvc_sgp_cmn;
6362
6363	if ((drive < 0) || (drive >= SGPIO_DRV_CNT_VALUE))
6364		return;
6365
6366	DTRACE_PROBE2(sgpio__error, int, drive, int, value);
6367
6368	mutex_enter(&cmn->nvs_slock);
6369
6370	leds = SGPIO0_TR_DRV(cb->sgpio0_tr, drive);
6371
6372	leds &= ~TR_ERROR_MASK;
6373	leds |= TR_ERROR_SET(value);
6374
6375	cb->sgpio0_tr &= SGPIO0_TR_DRV_CLR(drive);
6376	cb->sgpio0_tr |= SGPIO0_TR_DRV_SET(leds, drive);
6377
6378	cb->sgpio_cr0 = SGP_CR0_ENABLE_MASK;
6379
6380	mutex_exit(&cmn->nvs_slock);
6381
6382	if (nv_sgp_write_data(nvc) == NV_FAILURE) {
6383		nv_cmn_err(CE_WARN, nvc, NULL,
6384		    "!nv_sgp_write_data failure updating Fail/Error LED");
6385	}
6386}
6387
6388static void
6389nv_sgp_cleanup(nv_ctl_t *nvc)
6390{
6391	int drive;
6392	uint8_t drv_leds;
6393	uint32_t led_state;
6394	volatile nv_sgp_cb_t *cb = nvc->nvc_sgp_cbp;
6395	nv_sgp_cmn_t *cmn = nvc->nvc_sgp_cmn;
6396	extern void psm_unmap_phys(caddr_t, size_t);
6397
6398	/*
6399	 * If the SGPIO command block isn't mapped or the shared data
6400	 * structure isn't present in this instance, there isn't much that
6401	 * can be cleaned up.
6402	 */
6403	if ((cb == NULL) || (cmn == NULL))
6404		return;
6405
6406	/* turn off activity LEDs for this controller */
6407	drv_leds = TR_ACTIVE_SET(TR_ACTIVE_DISABLE);
6408
6409	/* get the existing LED state */
6410	led_state = cb->sgpio0_tr;
6411
6412	/* turn off port 0 */
6413	drive = SGP_CTLR_PORT_TO_DRV(nvc->nvc_ctlr_num, 0);
6414	led_state &= SGPIO0_TR_DRV_CLR(drive);
6415	led_state |= SGPIO0_TR_DRV_SET(drv_leds, drive);
6416
6417	/* turn off port 1 */
6418	drive = SGP_CTLR_PORT_TO_DRV(nvc->nvc_ctlr_num, 1);
6419	led_state &= SGPIO0_TR_DRV_CLR(drive);
6420	led_state |= SGPIO0_TR_DRV_SET(drv_leds, drive);
6421
6422	/* set the new led state, which should turn off this ctrl's LEDs */
6423	cb->sgpio_cr0 = SGP_CR0_ENABLE_MASK;
6424	(void) nv_sgp_write_data(nvc);
6425
6426	/* clear the controller's in use bit */
6427	mutex_enter(&cmn->nvs_slock);
6428	cmn->nvs_in_use &= ~(1 << nvc->nvc_ctlr_num);
6429	mutex_exit(&cmn->nvs_slock);
6430
6431	if (cmn->nvs_in_use == 0) {
6432		/* if all "in use" bits cleared, take everything down */
6433
6434		if (cmn->nvs_taskq != NULL) {
6435			/* allow activity taskq to exit */
6436			cmn->nvs_taskq_delay = 0;
6437			cv_broadcast(&cmn->nvs_cv);
6438
6439			/* then destroy it */
6440			ddi_taskq_destroy(cmn->nvs_taskq);
6441		}
6442
6443		/* turn off all of the LEDs */
6444		cb->sgpio0_tr = 0;
6445		cb->sgpio_cr0 = SGP_CR0_ENABLE_MASK;
6446		(void) nv_sgp_write_data(nvc);
6447
6448		cb->sgpio_sr = NULL;
6449
6450		/* free resources */
6451		cv_destroy(&cmn->nvs_cv);
6452		mutex_destroy(&cmn->nvs_tlock);
6453		mutex_destroy(&cmn->nvs_slock);
6454
6455		kmem_free(nvc->nvc_sgp_cmn, sizeof (nv_sgp_cmn_t));
6456	}
6457
6458	nvc->nvc_sgp_cmn = NULL;
6459
6460	/* unmap the SGPIO Command Block */
6461	psm_unmap_phys((caddr_t)nvc->nvc_sgp_cbp, sizeof (nv_sgp_cb_t));
6462}
6463#endif	/* SGPIO_SUPPORT */
6464