nv_sata.c revision 9280:7e4c63247060
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22/*
23 * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24 * Use is subject to license terms.
25 */
26
27/*
28 *
29 * nv_sata is a combo SATA HBA driver for ck804/mcp5x (mcp5x = mcp55/mcp51)
30 * based chipsets.
31 *
32 * NCQ
33 * ---
34 *
35 * A portion of the NCQ is in place, but is incomplete.  NCQ is disabled
36 * and is likely to be revisited in the future.
37 *
38 *
39 * Power Management
40 * ----------------
41 *
42 * Normally power management would be responsible for ensuring the device
43 * is quiescent and then changing power states to the device, such as
44 * powering down parts or all of the device.  mcp5x/ck804 is unique in
45 * that it is only available as part of a larger southbridge chipset, so
46 * removing power to the device isn't possible.  Switches to control
47 * power management states D0/D3 in the PCI configuration space appear to
48 * be supported but changes to these states are apparently are ignored.
49 * The only further PM that the driver _could_ do is shut down the PHY,
50 * but in order to deliver the first rev of the driver sooner than later,
51 * that will be deferred until some future phase.
52 *
53 * Since the driver currently will not directly change any power state to
54 * the device, no power() entry point will be required.  However, it is
55 * possible that in ACPI power state S3, aka suspend to RAM, that power
56 * can be removed to the device, and the driver cannot rely on BIOS to
57 * have reset any state.  For the time being, there is no known
58 * non-default configurations that need to be programmed.  This judgement
59 * is based on the port of the legacy ata driver not having any such
60 * functionality and based on conversations with the PM team.  If such a
61 * restoration is later deemed necessary it can be incorporated into the
62 * DDI_RESUME processing.
63 *
64 */
65
66#include <sys/scsi/scsi.h>
67#include <sys/pci.h>
68#include <sys/byteorder.h>
69#include <sys/sunddi.h>
70#include <sys/sata/sata_hba.h>
71#ifdef SGPIO_SUPPORT
72#include <sys/sata/adapters/nv_sata/nv_sgpio.h>
73#include <sys/devctl.h>
74#include <sys/sdt.h>
75#endif
76#include <sys/sata/adapters/nv_sata/nv_sata.h>
77#include <sys/disp.h>
78#include <sys/note.h>
79#include <sys/promif.h>
80
81
82/*
83 * Function prototypes for driver entry points
84 */
85static int nv_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
86static int nv_detach(dev_info_t *dip, ddi_detach_cmd_t cmd);
87static int nv_quiesce(dev_info_t *dip);
88static int nv_getinfo(dev_info_t *dip, ddi_info_cmd_t infocmd,
89    void *arg, void **result);
90
91/*
92 * Function prototypes for entry points from sata service module
93 * These functions are distinguished from other local functions
94 * by the prefix "nv_sata_"
95 */
96static int nv_sata_start(dev_info_t *dip, sata_pkt_t *spkt);
97static int nv_sata_abort(dev_info_t *dip, sata_pkt_t *spkt, int);
98static int nv_sata_reset(dev_info_t *dip, sata_device_t *sd);
99static int nv_sata_activate(dev_info_t *dip, sata_device_t *sd);
100static int nv_sata_deactivate(dev_info_t *dip, sata_device_t *sd);
101
102/*
103 * Local function prototypes
104 */
105static uint_t mcp5x_intr(caddr_t arg1, caddr_t arg2);
106static uint_t ck804_intr(caddr_t arg1, caddr_t arg2);
107static int nv_add_legacy_intrs(nv_ctl_t *nvc);
108#ifdef NV_MSI_SUPPORTED
109static int nv_add_msi_intrs(nv_ctl_t *nvc);
110#endif
111static void nv_rem_intrs(nv_ctl_t *nvc);
112static int nv_start_common(nv_port_t *nvp, sata_pkt_t *spkt);
113static int nv_start_nodata(nv_port_t *nvp, int slot);
114static void nv_intr_nodata(nv_port_t *nvp, nv_slot_t *spkt);
115static int nv_start_pio_in(nv_port_t *nvp, int slot);
116static int nv_start_pio_out(nv_port_t *nvp, int slot);
117static void nv_intr_pio_in(nv_port_t *nvp, nv_slot_t *spkt);
118static void nv_intr_pio_out(nv_port_t *nvp, nv_slot_t *spkt);
119static int nv_start_pkt_pio(nv_port_t *nvp, int slot);
120static void nv_intr_pkt_pio(nv_port_t *nvp, nv_slot_t *nv_slotp);
121static int nv_start_dma(nv_port_t *nvp, int slot);
122static void nv_intr_dma(nv_port_t *nvp, struct nv_slot *spkt);
123static void nv_log(uint_t flag, nv_ctl_t *nvc, nv_port_t *nvp, char *fmt, ...);
124static void nv_uninit_ctl(nv_ctl_t *nvc);
125static void mcp5x_reg_init(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle);
126static void ck804_reg_init(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle);
127static void nv_uninit_port(nv_port_t *nvp);
128static int nv_init_port(nv_port_t *nvp);
129static int nv_init_ctl(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle);
130static int mcp5x_packet_complete_intr(nv_ctl_t *nvc, nv_port_t *nvp);
131#ifdef NCQ
132static int mcp5x_dma_setup_intr(nv_ctl_t *nvc, nv_port_t *nvp);
133#endif
134static void nv_start_dma_engine(nv_port_t *nvp, int slot);
135static void nv_port_state_change(nv_port_t *nvp, int event, uint8_t addr_type,
136    int state);
137static boolean_t nv_check_link(uint32_t sstatus);
138static void nv_common_reg_init(nv_ctl_t *nvc);
139static void ck804_intr_process(nv_ctl_t *nvc, uint8_t intr_status);
140static void nv_reset(nv_port_t *nvp);
141static void nv_complete_io(nv_port_t *nvp,  sata_pkt_t *spkt, int slot);
142static void nv_timeout(void *);
143static int nv_poll_wait(nv_port_t *nvp, sata_pkt_t *spkt);
144static void nv_cmn_err(int ce, nv_ctl_t *nvc, nv_port_t *nvp, char *fmt, ...);
145static void nv_read_signature(nv_port_t *nvp);
146static void mcp5x_set_intr(nv_port_t *nvp, int flag);
147static void ck804_set_intr(nv_port_t *nvp, int flag);
148static void nv_resume(nv_port_t *nvp);
149static void nv_suspend(nv_port_t *nvp);
150static int nv_start_sync(nv_port_t *nvp, sata_pkt_t *spkt);
151static int nv_abort_active(nv_port_t *nvp, sata_pkt_t *spkt, int abort_reason);
152static void nv_copy_registers(nv_port_t *nvp, sata_device_t *sd,
153    sata_pkt_t *spkt);
154static void nv_report_add_remove(nv_port_t *nvp, int flags);
155static int nv_start_async(nv_port_t *nvp, sata_pkt_t *spkt);
156static int nv_wait3(nv_port_t *nvp, uchar_t onbits1, uchar_t offbits1,
157    uchar_t failure_onbits2, uchar_t failure_offbits2,
158    uchar_t failure_onbits3, uchar_t failure_offbits3,
159    uint_t timeout_usec, int type_wait);
160static int nv_wait(nv_port_t *nvp, uchar_t onbits, uchar_t offbits,
161    uint_t timeout_usec, int type_wait);
162static int nv_start_rqsense_pio(nv_port_t *nvp, nv_slot_t *nv_slotp);
163
164#ifdef SGPIO_SUPPORT
165static int nv_open(dev_t *devp, int flag, int otyp, cred_t *credp);
166static int nv_close(dev_t dev, int flag, int otyp, cred_t *credp);
167static int nv_ioctl(dev_t dev, int cmd, intptr_t arg, int mode,
168    cred_t *credp, int *rvalp);
169
170static void nv_sgp_led_init(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle);
171static int nv_sgp_detect(ddi_acc_handle_t pci_conf_handle, uint16_t *csrpp,
172    uint32_t *cbpp);
173static int nv_sgp_init(nv_ctl_t *nvc);
174static void nv_sgp_reset(nv_ctl_t *nvc);
175static int nv_sgp_init_cmd(nv_ctl_t *nvc);
176static int nv_sgp_check_set_cmn(nv_ctl_t *nvc);
177static int nv_sgp_csr_read(nv_ctl_t *nvc);
178static void nv_sgp_csr_write(nv_ctl_t *nvc, uint32_t val);
179static int nv_sgp_write_data(nv_ctl_t *nvc);
180static void nv_sgp_activity_led_ctl(void *arg);
181static void nv_sgp_drive_connect(nv_ctl_t *nvc, int drive);
182static void nv_sgp_drive_disconnect(nv_ctl_t *nvc, int drive);
183static void nv_sgp_drive_active(nv_ctl_t *nvc, int drive);
184static void nv_sgp_locate(nv_ctl_t *nvc, int drive, int value);
185static void nv_sgp_error(nv_ctl_t *nvc, int drive, int value);
186static void nv_sgp_cleanup(nv_ctl_t *nvc);
187#endif
188
189
190/*
191 * DMA attributes for the data buffer for x86.  dma_attr_burstsizes is unused.
192 * Verify if needed if ported to other ISA.
193 */
194static ddi_dma_attr_t buffer_dma_attr = {
195	DMA_ATTR_V0,		/* dma_attr_version */
196	0,			/* dma_attr_addr_lo: lowest bus address */
197	0xffffffffull,		/* dma_attr_addr_hi: */
198	NV_BM_64K_BOUNDARY - 1,	/* dma_attr_count_max i.e for one cookie */
199	4,			/* dma_attr_align */
200	1,			/* dma_attr_burstsizes. */
201	1,			/* dma_attr_minxfer */
202	0xffffffffull,		/* dma_attr_max xfer including all cookies */
203	0xffffffffull,		/* dma_attr_seg */
204	NV_DMA_NSEGS,		/* dma_attr_sgllen */
205	512,			/* dma_attr_granular */
206	0,			/* dma_attr_flags */
207};
208
209
210/*
211 * DMA attributes for PRD tables
212 */
213ddi_dma_attr_t nv_prd_dma_attr = {
214	DMA_ATTR_V0,		/* dma_attr_version */
215	0,			/* dma_attr_addr_lo */
216	0xffffffffull,		/* dma_attr_addr_hi */
217	NV_BM_64K_BOUNDARY - 1,	/* dma_attr_count_max */
218	4,			/* dma_attr_align */
219	1,			/* dma_attr_burstsizes */
220	1,			/* dma_attr_minxfer */
221	NV_BM_64K_BOUNDARY,	/* dma_attr_maxxfer */
222	NV_BM_64K_BOUNDARY - 1,	/* dma_attr_seg */
223	1,			/* dma_attr_sgllen */
224	1,			/* dma_attr_granular */
225	0			/* dma_attr_flags */
226};
227
228/*
229 * Device access attributes
230 */
231static ddi_device_acc_attr_t accattr = {
232    DDI_DEVICE_ATTR_V0,
233    DDI_STRUCTURE_LE_ACC,
234    DDI_STRICTORDER_ACC
235};
236
237
238#ifdef SGPIO_SUPPORT
239static struct cb_ops nv_cb_ops = {
240	nv_open,		/* open */
241	nv_close,		/* close */
242	nodev,			/* strategy (block) */
243	nodev,			/* print (block) */
244	nodev,			/* dump (block) */
245	nodev,			/* read */
246	nodev,			/* write */
247	nv_ioctl,		/* ioctl */
248	nodev,			/* devmap */
249	nodev,			/* mmap */
250	nodev,			/* segmap */
251	nochpoll,		/* chpoll */
252	ddi_prop_op,		/* prop_op */
253	NULL,			/* streams */
254	D_NEW | D_MP |
255	D_64BIT | D_HOTPLUG,	/* flags */
256	CB_REV			/* rev */
257};
258#endif  /* SGPIO_SUPPORT */
259
260
261static struct dev_ops nv_dev_ops = {
262	DEVO_REV,		/* devo_rev */
263	0,			/* refcnt  */
264	nv_getinfo,		/* info */
265	nulldev,		/* identify */
266	nulldev,		/* probe */
267	nv_attach,		/* attach */
268	nv_detach,		/* detach */
269	nodev,			/* no reset */
270#ifdef SGPIO_SUPPORT
271	&nv_cb_ops,		/* driver operations */
272#else
273	(struct cb_ops *)0,	/* driver operations */
274#endif
275	NULL,			/* bus operations */
276	NULL,			/* power */
277	nv_quiesce		/* quiesce */
278};
279
280
281/*
282 * Request Sense CDB for ATAPI
283 */
284static const uint8_t nv_rqsense_cdb[16] = {
285	SCMD_REQUEST_SENSE,
286	0,
287	0,
288	0,
289	SATA_ATAPI_MIN_RQSENSE_LEN,
290	0,
291	0, 0, 0, 0, 0, 0, 0, 0, 0, 0	/* pad out to max CDB length */
292};
293
294
295static sata_tran_hotplug_ops_t nv_hotplug_ops;
296
297extern struct mod_ops mod_driverops;
298
299static  struct modldrv modldrv = {
300	&mod_driverops,	/* driverops */
301	"Nvidia ck804/mcp51/mcp55 HBA",
302	&nv_dev_ops,	/* driver ops */
303};
304
305static  struct modlinkage modlinkage = {
306	MODREV_1,
307	&modldrv,
308	NULL
309};
310
311
312/*
313 * wait between checks of reg status
314 */
315int nv_usec_delay = NV_WAIT_REG_CHECK;
316
317/*
318 * The following is needed for nv_vcmn_err()
319 */
320static kmutex_t nv_log_mutex; /* protects nv_log_buf */
321static char nv_log_buf[NV_STRING_512];
322int nv_debug_flags = NVDBG_ALWAYS;
323int nv_log_to_console = B_FALSE;
324
325int nv_log_delay = 0;
326int nv_prom_print = B_FALSE;
327
328/*
329 * for debugging
330 */
331#ifdef DEBUG
332int ncq_commands = 0;
333int non_ncq_commands = 0;
334#endif
335
336/*
337 * Opaque state pointer to be initialized by ddi_soft_state_init()
338 */
339static void *nv_statep	= NULL;
340
341/* This can be disabled if there are any problems with 40-bit DMA */
342int nv_sata_40bit_dma = B_TRUE;
343
344static sata_tran_hotplug_ops_t nv_hotplug_ops = {
345	SATA_TRAN_HOTPLUG_OPS_REV_1,	/* structure version */
346	nv_sata_activate,	/* activate port. cfgadm -c connect */
347	nv_sata_deactivate	/* deactivate port. cfgadm -c disconnect */
348};
349
350
351/*
352 *  nv module initialization
353 */
354int
355_init(void)
356{
357	int	error;
358
359	error = ddi_soft_state_init(&nv_statep, sizeof (nv_ctl_t), 0);
360
361	if (error != 0) {
362
363		return (error);
364	}
365
366	mutex_init(&nv_log_mutex, NULL, MUTEX_DRIVER, NULL);
367
368	if ((error = sata_hba_init(&modlinkage)) != 0) {
369		ddi_soft_state_fini(&nv_statep);
370		mutex_destroy(&nv_log_mutex);
371
372		return (error);
373	}
374
375	error = mod_install(&modlinkage);
376	if (error != 0) {
377		sata_hba_fini(&modlinkage);
378		ddi_soft_state_fini(&nv_statep);
379		mutex_destroy(&nv_log_mutex);
380
381		return (error);
382	}
383
384	return (error);
385}
386
387
388/*
389 * nv module uninitialize
390 */
391int
392_fini(void)
393{
394	int	error;
395
396	error = mod_remove(&modlinkage);
397
398	if (error != 0) {
399		return (error);
400	}
401
402	/*
403	 * remove the resources allocated in _init()
404	 */
405	mutex_destroy(&nv_log_mutex);
406	sata_hba_fini(&modlinkage);
407	ddi_soft_state_fini(&nv_statep);
408
409	return (error);
410}
411
412
413/*
414 * nv _info entry point
415 */
416int
417_info(struct modinfo *modinfop)
418{
419	return (mod_info(&modlinkage, modinfop));
420}
421
422
423/*
424 * these wrappers for ddi_{get,put}8 are for observability
425 * with dtrace
426 */
427#ifdef DEBUG
428
429static void
430nv_put8(ddi_acc_handle_t handle, uint8_t *dev_addr, uint8_t value)
431{
432	ddi_put8(handle, dev_addr, value);
433}
434
435static void
436nv_put32(ddi_acc_handle_t handle, uint32_t *dev_addr, uint32_t value)
437{
438	ddi_put32(handle, dev_addr, value);
439}
440
441static uint32_t
442nv_get32(ddi_acc_handle_t handle, uint32_t *dev_addr)
443{
444	return (ddi_get32(handle, dev_addr));
445}
446
447static void
448nv_put16(ddi_acc_handle_t handle, uint16_t *dev_addr, uint16_t value)
449{
450	ddi_put16(handle, dev_addr, value);
451}
452
453static uint16_t
454nv_get16(ddi_acc_handle_t handle, uint16_t *dev_addr)
455{
456	return (ddi_get16(handle, dev_addr));
457}
458
459static uint8_t
460nv_get8(ddi_acc_handle_t handle, uint8_t *dev_addr)
461{
462	return (ddi_get8(handle, dev_addr));
463}
464
465#else
466
467#define	nv_put8 ddi_put8
468#define	nv_put32 ddi_put32
469#define	nv_get32 ddi_get32
470#define	nv_put16 ddi_put16
471#define	nv_get16 ddi_get16
472#define	nv_get8 ddi_get8
473
474#endif
475
476
477/*
478 * Driver attach
479 */
480static int
481nv_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
482{
483	int status, attach_state, intr_types, bar, i, command;
484	int inst = ddi_get_instance(dip);
485	ddi_acc_handle_t pci_conf_handle;
486	nv_ctl_t *nvc;
487	uint8_t subclass;
488	uint32_t reg32;
489#ifdef SGPIO_SUPPORT
490	pci_regspec_t *regs;
491	int rlen;
492#endif
493
494	switch (cmd) {
495
496	case DDI_ATTACH:
497
498		NVLOG((NVDBG_INIT, NULL, NULL,
499		    "nv_attach(): DDI_ATTACH inst %d", inst));
500
501		attach_state = ATTACH_PROGRESS_NONE;
502
503		status = ddi_soft_state_zalloc(nv_statep, inst);
504
505		if (status != DDI_SUCCESS) {
506			break;
507		}
508
509		nvc = ddi_get_soft_state(nv_statep, inst);
510
511		nvc->nvc_dip = dip;
512
513		attach_state |= ATTACH_PROGRESS_STATEP_ALLOC;
514
515		if (pci_config_setup(dip, &pci_conf_handle) == DDI_SUCCESS) {
516			nvc->nvc_revid = pci_config_get8(pci_conf_handle,
517			    PCI_CONF_REVID);
518			NVLOG((NVDBG_INIT, NULL, NULL,
519			    "inst %d: silicon revid is %x nv_debug_flags=%x",
520			    inst, nvc->nvc_revid, nv_debug_flags));
521		} else {
522			break;
523		}
524
525		attach_state |= ATTACH_PROGRESS_CONF_HANDLE;
526
527		/*
528		 * If a device is attached after a suspend/resume, sometimes
529		 * the command register is zero, as it might not be set by
530		 * BIOS or a parent.  Set it again here.
531		 */
532		command = pci_config_get16(pci_conf_handle, PCI_CONF_COMM);
533
534		if (command == 0) {
535			cmn_err(CE_WARN, "nv_sata%d: restoring PCI command"
536			    " register", inst);
537			pci_config_put16(pci_conf_handle, PCI_CONF_COMM,
538			    PCI_COMM_IO|PCI_COMM_MAE|PCI_COMM_ME);
539		}
540
541		subclass = pci_config_get8(pci_conf_handle, PCI_CONF_SUBCLASS);
542
543		if (subclass & PCI_MASS_RAID) {
544			cmn_err(CE_WARN,
545			    "attach failed: RAID mode not supported");
546			break;
547		}
548
549		/*
550		 * the 6 bars of the controller are:
551		 * 0: port 0 task file
552		 * 1: port 0 status
553		 * 2: port 1 task file
554		 * 3: port 1 status
555		 * 4: bus master for both ports
556		 * 5: extended registers for SATA features
557		 */
558		for (bar = 0; bar < 6; bar++) {
559			status = ddi_regs_map_setup(dip, bar + 1,
560			    (caddr_t *)&nvc->nvc_bar_addr[bar], 0, 0, &accattr,
561			    &nvc->nvc_bar_hdl[bar]);
562
563			if (status != DDI_SUCCESS) {
564				NVLOG((NVDBG_INIT, nvc, NULL,
565				    "ddi_regs_map_setup failure for bar"
566				    " %d status = %d", bar, status));
567				break;
568			}
569		}
570
571		attach_state |= ATTACH_PROGRESS_BARS;
572
573		/*
574		 * initialize controller and driver core
575		 */
576		status = nv_init_ctl(nvc, pci_conf_handle);
577
578		if (status == NV_FAILURE) {
579			NVLOG((NVDBG_INIT, nvc, NULL, "nv_init_ctl failed"));
580
581			break;
582		}
583
584		attach_state |= ATTACH_PROGRESS_CTL_SETUP;
585
586		/*
587		 * initialize mutexes
588		 */
589		mutex_init(&nvc->nvc_mutex, NULL, MUTEX_DRIVER,
590		    DDI_INTR_PRI(nvc->nvc_intr_pri));
591
592		attach_state |= ATTACH_PROGRESS_MUTEX_INIT;
593
594		/*
595		 * get supported interrupt types
596		 */
597		if (ddi_intr_get_supported_types(dip, &intr_types) !=
598		    DDI_SUCCESS) {
599			nv_cmn_err(CE_WARN, nvc, NULL,
600			    "!ddi_intr_get_supported_types failed");
601			NVLOG((NVDBG_INIT, nvc, NULL,
602			    "interrupt supported types failed"));
603
604			break;
605		}
606
607		NVLOG((NVDBG_INIT, nvc, NULL,
608		    "ddi_intr_get_supported_types() returned: 0x%x",
609		    intr_types));
610
611#ifdef NV_MSI_SUPPORTED
612		if (intr_types & DDI_INTR_TYPE_MSI) {
613			NVLOG((NVDBG_INIT, nvc, NULL,
614			    "using MSI interrupt type"));
615
616			/*
617			 * Try MSI first, but fall back to legacy if MSI
618			 * attach fails
619			 */
620			if (nv_add_msi_intrs(nvc) == DDI_SUCCESS) {
621				nvc->nvc_intr_type = DDI_INTR_TYPE_MSI;
622				attach_state |= ATTACH_PROGRESS_INTR_ADDED;
623				NVLOG((NVDBG_INIT, nvc, NULL,
624				    "MSI interrupt setup done"));
625			} else {
626				nv_cmn_err(CE_CONT, nvc, NULL,
627				    "!MSI registration failed "
628				    "will try Legacy interrupts");
629			}
630		}
631#endif
632
633		/*
634		 * Either the MSI interrupt setup has failed or only
635		 * the fixed interrupts are available on the system.
636		 */
637		if (!(attach_state & ATTACH_PROGRESS_INTR_ADDED) &&
638		    (intr_types & DDI_INTR_TYPE_FIXED)) {
639
640			NVLOG((NVDBG_INIT, nvc, NULL,
641			    "using Legacy interrupt type"));
642
643			if (nv_add_legacy_intrs(nvc) == DDI_SUCCESS) {
644				nvc->nvc_intr_type = DDI_INTR_TYPE_FIXED;
645				attach_state |= ATTACH_PROGRESS_INTR_ADDED;
646				NVLOG((NVDBG_INIT, nvc, NULL,
647				    "Legacy interrupt setup done"));
648			} else {
649				nv_cmn_err(CE_WARN, nvc, NULL,
650				    "!legacy interrupt setup failed");
651				NVLOG((NVDBG_INIT, nvc, NULL,
652				    "legacy interrupt setup failed"));
653				break;
654			}
655		}
656
657		if (!(attach_state & ATTACH_PROGRESS_INTR_ADDED)) {
658			NVLOG((NVDBG_INIT, nvc, NULL,
659			    "no interrupts registered"));
660			break;
661		}
662
663#ifdef SGPIO_SUPPORT
664		/*
665		 * save off the controller number
666		 */
667		(void) ddi_getlongprop(DDI_DEV_T_NONE, dip, DDI_PROP_DONTPASS,
668		    "reg", (caddr_t)&regs, &rlen);
669		nvc->nvc_ctlr_num = PCI_REG_FUNC_G(regs->pci_phys_hi);
670		kmem_free(regs, rlen);
671
672		/*
673		 * initialize SGPIO
674		 */
675		nv_sgp_led_init(nvc, pci_conf_handle);
676#endif	/* SGPIO_SUPPORT */
677
678		/*
679		 * attach to sata module
680		 */
681		if (sata_hba_attach(nvc->nvc_dip,
682		    &nvc->nvc_sata_hba_tran,
683		    DDI_ATTACH) != DDI_SUCCESS) {
684			attach_state |= ATTACH_PROGRESS_SATA_MODULE;
685
686			break;
687		}
688
689		pci_config_teardown(&pci_conf_handle);
690
691		NVLOG((NVDBG_INIT, nvc, NULL, "nv_attach DDI_SUCCESS"));
692
693		return (DDI_SUCCESS);
694
695	case DDI_RESUME:
696
697		nvc = ddi_get_soft_state(nv_statep, inst);
698
699		NVLOG((NVDBG_INIT, nvc, NULL,
700		    "nv_attach(): DDI_RESUME inst %d", inst));
701
702		if (pci_config_setup(dip, &pci_conf_handle) != DDI_SUCCESS) {
703			return (DDI_FAILURE);
704		}
705
706		/*
707		 * If a device is attached after a suspend/resume, sometimes
708		 * the command register is zero, as it might not be set by
709		 * BIOS or a parent.  Set it again here.
710		 */
711		command = pci_config_get16(pci_conf_handle, PCI_CONF_COMM);
712
713		if (command == 0) {
714			pci_config_put16(pci_conf_handle, PCI_CONF_COMM,
715			    PCI_COMM_IO|PCI_COMM_MAE|PCI_COMM_ME);
716		}
717
718		/*
719		 * Need to set bit 2 to 1 at config offset 0x50
720		 * to enable access to the bar5 registers.
721		 */
722		reg32 = pci_config_get32(pci_conf_handle, NV_SATA_CFG_20);
723
724		if ((reg32 & NV_BAR5_SPACE_EN) != NV_BAR5_SPACE_EN) {
725			pci_config_put32(pci_conf_handle, NV_SATA_CFG_20,
726			    reg32 | NV_BAR5_SPACE_EN);
727		}
728
729		nvc->nvc_state &= ~NV_CTRL_SUSPEND;
730
731		for (i = 0; i < NV_MAX_PORTS(nvc); i++) {
732			nv_resume(&(nvc->nvc_port[i]));
733		}
734
735		pci_config_teardown(&pci_conf_handle);
736
737		return (DDI_SUCCESS);
738
739	default:
740		return (DDI_FAILURE);
741	}
742
743
744	/*
745	 * DDI_ATTACH failure path starts here
746	 */
747
748	if (attach_state & ATTACH_PROGRESS_INTR_ADDED) {
749		nv_rem_intrs(nvc);
750	}
751
752	if (attach_state & ATTACH_PROGRESS_SATA_MODULE) {
753		/*
754		 * Remove timers
755		 */
756		int port = 0;
757		nv_port_t *nvp;
758
759		for (; port < NV_MAX_PORTS(nvc); port++) {
760			nvp = &(nvc->nvc_port[port]);
761			if (nvp->nvp_timeout_id != 0) {
762				(void) untimeout(nvp->nvp_timeout_id);
763			}
764		}
765	}
766
767	if (attach_state & ATTACH_PROGRESS_MUTEX_INIT) {
768		mutex_destroy(&nvc->nvc_mutex);
769	}
770
771	if (attach_state & ATTACH_PROGRESS_CTL_SETUP) {
772		nv_uninit_ctl(nvc);
773	}
774
775	if (attach_state & ATTACH_PROGRESS_BARS) {
776		while (--bar >= 0) {
777			ddi_regs_map_free(&nvc->nvc_bar_hdl[bar]);
778		}
779	}
780
781	if (attach_state & ATTACH_PROGRESS_STATEP_ALLOC) {
782		ddi_soft_state_free(nv_statep, inst);
783	}
784
785	if (attach_state & ATTACH_PROGRESS_CONF_HANDLE) {
786		pci_config_teardown(&pci_conf_handle);
787	}
788
789	cmn_err(CE_WARN, "nv_sata%d attach failed", inst);
790
791	return (DDI_FAILURE);
792}
793
794
795static int
796nv_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
797{
798	int i, port, inst = ddi_get_instance(dip);
799	nv_ctl_t *nvc;
800	nv_port_t *nvp;
801
802	nvc = ddi_get_soft_state(nv_statep, inst);
803
804	switch (cmd) {
805
806	case DDI_DETACH:
807
808		NVLOG((NVDBG_INIT, nvc, NULL, "nv_detach: DDI_DETACH"));
809
810		/*
811		 * Remove interrupts
812		 */
813		nv_rem_intrs(nvc);
814
815		/*
816		 * Remove timers
817		 */
818		for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
819			nvp = &(nvc->nvc_port[port]);
820			if (nvp->nvp_timeout_id != 0) {
821				(void) untimeout(nvp->nvp_timeout_id);
822			}
823		}
824
825		/*
826		 * Remove maps
827		 */
828		for (i = 0; i < 6; i++) {
829			ddi_regs_map_free(&nvc->nvc_bar_hdl[i]);
830		}
831
832		/*
833		 * Destroy mutexes
834		 */
835		mutex_destroy(&nvc->nvc_mutex);
836
837		/*
838		 * Uninitialize the controller
839		 */
840		nv_uninit_ctl(nvc);
841
842#ifdef SGPIO_SUPPORT
843		/*
844		 * release SGPIO resources
845		 */
846		nv_sgp_cleanup(nvc);
847#endif
848
849		/*
850		 * unregister from the sata module
851		 */
852		(void) sata_hba_detach(nvc->nvc_dip, DDI_DETACH);
853
854		/*
855		 * Free soft state
856		 */
857		ddi_soft_state_free(nv_statep, inst);
858
859		return (DDI_SUCCESS);
860
861	case DDI_SUSPEND:
862		/*
863		 * The PM functions for suspend and resume are incomplete
864		 * and need additional work.  It may or may not work in
865		 * the current state.
866		 */
867		NVLOG((NVDBG_INIT, nvc, NULL, "nv_detach: DDI_SUSPEND"));
868
869		for (i = 0; i < NV_MAX_PORTS(nvc); i++) {
870			nv_suspend(&(nvc->nvc_port[i]));
871		}
872
873		nvc->nvc_state |= NV_CTRL_SUSPEND;
874
875		return (DDI_SUCCESS);
876
877	default:
878		return (DDI_FAILURE);
879	}
880}
881
882
883/*ARGSUSED*/
884static int
885nv_getinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
886{
887	nv_ctl_t *nvc;
888	int instance;
889	dev_t dev;
890
891	dev = (dev_t)arg;
892	instance = getminor(dev);
893
894	switch (infocmd) {
895	case DDI_INFO_DEVT2DEVINFO:
896		nvc = ddi_get_soft_state(nv_statep,  instance);
897		if (nvc != NULL) {
898			*result = nvc->nvc_dip;
899			return (DDI_SUCCESS);
900		} else {
901			*result = NULL;
902			return (DDI_FAILURE);
903		}
904	case DDI_INFO_DEVT2INSTANCE:
905		*(int *)result = instance;
906		break;
907	default:
908		break;
909	}
910	return (DDI_SUCCESS);
911}
912
913
914#ifdef SGPIO_SUPPORT
915/* ARGSUSED */
916static int
917nv_open(dev_t *devp, int flag, int otyp, cred_t *credp)
918{
919	nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, getminor(*devp));
920
921	if (nvc == NULL) {
922		return (ENXIO);
923	}
924
925	return (0);
926}
927
928
929/* ARGSUSED */
930static int
931nv_close(dev_t dev, int flag, int otyp, cred_t *credp)
932{
933	return (0);
934}
935
936
937/* ARGSUSED */
938static int
939nv_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp, int *rvalp)
940{
941	nv_ctl_t *nvc;
942	int inst;
943	int status;
944	int ctlr, port;
945	int drive;
946	uint8_t curr_led;
947	struct dc_led_ctl led;
948
949	inst = getminor(dev);
950	if (inst == -1) {
951		return (EBADF);
952	}
953
954	nvc = ddi_get_soft_state(nv_statep, inst);
955	if (nvc == NULL) {
956		return (EBADF);
957	}
958
959	if ((nvc->nvc_sgp_cbp == NULL) || (nvc->nvc_sgp_cmn == NULL)) {
960		return (EIO);
961	}
962
963	switch (cmd) {
964	case DEVCTL_SET_LED:
965		status = ddi_copyin((void *)arg, &led,
966		    sizeof (struct dc_led_ctl), mode);
967		if (status != 0)
968			return (EFAULT);
969
970		/*
971		 * Since only the first two controller currently support
972		 * SGPIO (as per NVIDIA docs), this code will as well.
973		 * Note that this validate the port value within led_state
974		 * as well.
975		 */
976
977		ctlr = SGP_DRV_TO_CTLR(led.led_number);
978		if ((ctlr != 0) && (ctlr != 1))
979			return (ENXIO);
980
981		if ((led.led_state & DCL_STATE_FAST_BLNK) ||
982		    (led.led_state & DCL_STATE_SLOW_BLNK)) {
983			return (EINVAL);
984		}
985
986		drive = led.led_number;
987
988		if ((led.led_ctl_active == DCL_CNTRL_OFF) ||
989		    (led.led_state == DCL_STATE_OFF)) {
990
991			if (led.led_type == DCL_TYPE_DEVICE_FAIL) {
992				nv_sgp_error(nvc, drive, TR_ERROR_DISABLE);
993			} else if (led.led_type == DCL_TYPE_DEVICE_OK2RM) {
994				nv_sgp_locate(nvc, drive, TR_LOCATE_DISABLE);
995			} else {
996				return (ENXIO);
997			}
998
999			port = SGP_DRV_TO_PORT(led.led_number);
1000			nvc->nvc_port[port].nvp_sgp_ioctl_mod |= led.led_type;
1001		}
1002
1003		if (led.led_ctl_active == DCL_CNTRL_ON) {
1004			if (led.led_type == DCL_TYPE_DEVICE_FAIL) {
1005				nv_sgp_error(nvc, drive, TR_ERROR_ENABLE);
1006			} else if (led.led_type == DCL_TYPE_DEVICE_OK2RM) {
1007				nv_sgp_locate(nvc, drive, TR_LOCATE_ENABLE);
1008			} else {
1009				return (ENXIO);
1010			}
1011
1012			port = SGP_DRV_TO_PORT(led.led_number);
1013			nvc->nvc_port[port].nvp_sgp_ioctl_mod |= led.led_type;
1014		}
1015
1016		break;
1017
1018	case DEVCTL_GET_LED:
1019		status = ddi_copyin((void *)arg, &led,
1020		    sizeof (struct dc_led_ctl), mode);
1021		if (status != 0)
1022			return (EFAULT);
1023
1024		/*
1025		 * Since only the first two controller currently support
1026		 * SGPIO (as per NVIDIA docs), this code will as well.
1027		 * Note that this validate the port value within led_state
1028		 * as well.
1029		 */
1030
1031		ctlr = SGP_DRV_TO_CTLR(led.led_number);
1032		if ((ctlr != 0) && (ctlr != 1))
1033			return (ENXIO);
1034
1035		curr_led = SGPIO0_TR_DRV(nvc->nvc_sgp_cbp->sgpio0_tr,
1036		    led.led_number);
1037
1038		port = SGP_DRV_TO_PORT(led.led_number);
1039		if (nvc->nvc_port[port].nvp_sgp_ioctl_mod & led.led_type) {
1040			led.led_ctl_active = DCL_CNTRL_ON;
1041
1042			if (led.led_type == DCL_TYPE_DEVICE_FAIL) {
1043				if (TR_ERROR(curr_led) == TR_ERROR_DISABLE)
1044					led.led_state = DCL_STATE_OFF;
1045				else
1046					led.led_state = DCL_STATE_ON;
1047			} else if (led.led_type == DCL_TYPE_DEVICE_OK2RM) {
1048				if (TR_LOCATE(curr_led) == TR_LOCATE_DISABLE)
1049					led.led_state = DCL_STATE_OFF;
1050				else
1051					led.led_state = DCL_STATE_ON;
1052			} else {
1053				return (ENXIO);
1054			}
1055		} else {
1056			led.led_ctl_active = DCL_CNTRL_OFF;
1057			/*
1058			 * Not really off, but never set and no constant for
1059			 * tri-state
1060			 */
1061			led.led_state = DCL_STATE_OFF;
1062		}
1063
1064		status = ddi_copyout(&led, (void *)arg,
1065		    sizeof (struct dc_led_ctl), mode);
1066		if (status != 0)
1067			return (EFAULT);
1068
1069		break;
1070
1071	case DEVCTL_NUM_LEDS:
1072		led.led_number = SGPIO_DRV_CNT_VALUE;
1073		led.led_ctl_active = 1;
1074		led.led_type = 3;
1075
1076		/*
1077		 * According to documentation, NVIDIA SGPIO is supposed to
1078		 * support blinking, but it does not seem to work in practice.
1079		 */
1080		led.led_state = DCL_STATE_ON;
1081
1082		status = ddi_copyout(&led, (void *)arg,
1083		    sizeof (struct dc_led_ctl), mode);
1084		if (status != 0)
1085			return (EFAULT);
1086
1087		break;
1088
1089	default:
1090		return (EINVAL);
1091	}
1092
1093	return (0);
1094}
1095#endif	/* SGPIO_SUPPORT */
1096
1097
1098/*
1099 * Called by sata module to probe a port.  Port and device state
1100 * are not changed here... only reported back to the sata module.
1101 *
1102 * If probe confirms a device is present for the first time, it will
1103 * initiate a device reset, then probe will be called again and the
1104 * signature will be check.  If the signature is valid, data structures
1105 * will be initialized.
1106 */
1107static int
1108nv_sata_probe(dev_info_t *dip, sata_device_t *sd)
1109{
1110	nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip));
1111	uint8_t cport = sd->satadev_addr.cport;
1112	uint8_t pmport = sd->satadev_addr.pmport;
1113	uint8_t qual = sd->satadev_addr.qual;
1114	clock_t nv_lbolt = ddi_get_lbolt();
1115	nv_port_t *nvp;
1116
1117	if (cport >= NV_MAX_PORTS(nvc)) {
1118		sd->satadev_type = SATA_DTYPE_NONE;
1119		sd->satadev_state = SATA_STATE_UNKNOWN;
1120
1121		return (SATA_FAILURE);
1122	}
1123
1124	ASSERT(nvc->nvc_port != NULL);
1125	nvp = &(nvc->nvc_port[cport]);
1126	ASSERT(nvp != NULL);
1127
1128	NVLOG((NVDBG_PROBE, nvc, nvp,
1129	    "nv_sata_probe: enter cport: 0x%x, pmport: 0x%x, "
1130	    "qual: 0x%x", cport, pmport, qual));
1131
1132	mutex_enter(&nvp->nvp_mutex);
1133
1134	/*
1135	 * This check seems to be done in the SATA module.
1136	 * It may not be required here
1137	 */
1138	if (nvp->nvp_state & NV_PORT_INACTIVE) {
1139		nv_cmn_err(CE_WARN, nvc, nvp,
1140		    "port inactive.  Use cfgadm to activate");
1141		sd->satadev_type = SATA_DTYPE_UNKNOWN;
1142		sd->satadev_state = SATA_PSTATE_SHUTDOWN;
1143		mutex_exit(&nvp->nvp_mutex);
1144
1145		return (SATA_FAILURE);
1146	}
1147
1148	if (qual == SATA_ADDR_PMPORT) {
1149		sd->satadev_type = SATA_DTYPE_NONE;
1150		sd->satadev_state = SATA_STATE_UNKNOWN;
1151		mutex_exit(&nvp->nvp_mutex);
1152		nv_cmn_err(CE_WARN, nvc, nvp,
1153		    "controller does not support port multiplier");
1154
1155		return (SATA_FAILURE);
1156	}
1157
1158	sd->satadev_state = SATA_PSTATE_PWRON;
1159
1160	nv_copy_registers(nvp, sd, NULL);
1161
1162	/*
1163	 * determine link status
1164	 */
1165	if (nv_check_link(sd->satadev_scr.sstatus) == B_FALSE) {
1166		uint8_t det;
1167
1168		/*
1169		 * Reset will cause the link to go down for a short period of
1170		 * time.  If link is lost for less than 2 seconds ignore it
1171		 * so that the reset can progress.
1172		 */
1173		if (nvp->nvp_state & NV_PORT_RESET_PROBE) {
1174
1175			if (nvp->nvp_link_lost_time == 0) {
1176				nvp->nvp_link_lost_time = nv_lbolt;
1177			}
1178
1179			if (TICK_TO_SEC(nv_lbolt -
1180			    nvp->nvp_link_lost_time) < NV_LINK_LOST_OK) {
1181				NVLOG((NVDBG_ALWAYS, nvp->nvp_ctlp, nvp,
1182				    "probe: intermittent link lost while"
1183				    " resetting"));
1184				/*
1185				 * fake status of link so that probe continues
1186				 */
1187				SSTATUS_SET_IPM(sd->satadev_scr.sstatus,
1188				    SSTATUS_IPM_ACTIVE);
1189				SSTATUS_SET_DET(sd->satadev_scr.sstatus,
1190				    SSTATUS_DET_DEVPRE_PHYCOM);
1191				sd->satadev_type = SATA_DTYPE_UNKNOWN;
1192				mutex_exit(&nvp->nvp_mutex);
1193
1194				return (SATA_SUCCESS);
1195			} else {
1196				nvp->nvp_state &=
1197				    ~(NV_PORT_RESET_PROBE|NV_PORT_RESET);
1198			}
1199		}
1200
1201		/*
1202		 * no link, so tear down port and abort all active packets
1203		 */
1204
1205		det = (sd->satadev_scr.sstatus & SSTATUS_DET) >>
1206		    SSTATUS_DET_SHIFT;
1207
1208		switch (det) {
1209		case SSTATUS_DET_NODEV:
1210		case SSTATUS_DET_PHYOFFLINE:
1211			sd->satadev_type = SATA_DTYPE_NONE;
1212			break;
1213		default:
1214			sd->satadev_type = SATA_DTYPE_UNKNOWN;
1215			break;
1216		}
1217
1218		NVLOG((NVDBG_PROBE, nvp->nvp_ctlp, nvp,
1219		    "probe: link lost invoking nv_abort_active"));
1220
1221		(void) nv_abort_active(nvp, NULL, SATA_PKT_TIMEOUT);
1222		nv_uninit_port(nvp);
1223
1224		mutex_exit(&nvp->nvp_mutex);
1225
1226		return (SATA_SUCCESS);
1227	} else {
1228		nvp->nvp_link_lost_time = 0;
1229	}
1230
1231	/*
1232	 * A device is present so clear hotremoved flag
1233	 */
1234	nvp->nvp_state &= ~NV_PORT_HOTREMOVED;
1235
1236#ifdef SGPIO_SUPPORT
1237	nv_sgp_drive_connect(nvp->nvp_ctlp, SGP_CTLR_PORT_TO_DRV(
1238	    nvp->nvp_ctlp->nvc_ctlr_num, nvp->nvp_port_num));
1239#endif
1240
1241	/*
1242	 * If the signature was acquired previously there is no need to
1243	 * do it again.
1244	 */
1245	if (nvp->nvp_signature != 0) {
1246		NVLOG((NVDBG_PROBE, nvp->nvp_ctlp, nvp,
1247		    "probe: signature acquired previously"));
1248		sd->satadev_type = nvp->nvp_type;
1249		mutex_exit(&nvp->nvp_mutex);
1250
1251		return (SATA_SUCCESS);
1252	}
1253
1254	/*
1255	 * If NV_PORT_RESET is not set, this is the first time through
1256	 * so perform reset and return.
1257	 */
1258	if ((nvp->nvp_state & NV_PORT_RESET) == 0) {
1259		NVLOG((NVDBG_PROBE, nvp->nvp_ctlp, nvp,
1260		    "probe: first reset to get sig"));
1261		nvp->nvp_state |= NV_PORT_RESET_PROBE;
1262		nv_reset(nvp);
1263		sd->satadev_type = nvp->nvp_type = SATA_DTYPE_UNKNOWN;
1264		nvp->nvp_probe_time = nv_lbolt;
1265		mutex_exit(&nvp->nvp_mutex);
1266
1267		return (SATA_SUCCESS);
1268	}
1269
1270	/*
1271	 * Reset was done previously.  see if the signature is
1272	 * available.
1273	 */
1274	nv_read_signature(nvp);
1275	sd->satadev_type = nvp->nvp_type;
1276
1277	/*
1278	 * Some drives may require additional resets to get a
1279	 * valid signature.  If a drive was not just powered up, the signature
1280	 * should arrive within half a second of reset.  Therefore if more
1281	 * than 5 seconds has elapsed while waiting for a signature, reset
1282	 * again.  These extra resets do not appear to create problems when
1283	 * the drive is spinning up for more than this reset period.
1284	 */
1285	if (nvp->nvp_signature == 0) {
1286		if (TICK_TO_SEC(nv_lbolt - nvp->nvp_reset_time) > 5) {
1287			NVLOG((NVDBG_PROBE, nvc, nvp, "additional reset"
1288			    " during signature acquisition"));
1289			nv_reset(nvp);
1290		}
1291
1292		mutex_exit(&nvp->nvp_mutex);
1293
1294		return (SATA_SUCCESS);
1295	}
1296
1297	NVLOG((NVDBG_PROBE, nvc, nvp, "signature acquired after %d ms",
1298	    TICK_TO_MSEC(nv_lbolt - nvp->nvp_probe_time)));
1299
1300	/*
1301	 * nv_sata only deals with ATA disks and ATAPI CD/DVDs so far.  If
1302	 * it is not either of those, then just return.
1303	 */
1304	if ((nvp->nvp_type != SATA_DTYPE_ATADISK) &&
1305	    (nvp->nvp_type != SATA_DTYPE_ATAPICD)) {
1306		NVLOG((NVDBG_PROBE, nvc, nvp, "Driver currently handles only"
1307		    " disks/CDs/DVDs.  Signature acquired was %X",
1308		    nvp->nvp_signature));
1309		mutex_exit(&nvp->nvp_mutex);
1310
1311		return (SATA_SUCCESS);
1312	}
1313
1314	/*
1315	 * make sure structures are initialized
1316	 */
1317	if (nv_init_port(nvp) == NV_SUCCESS) {
1318		NVLOG((NVDBG_PROBE, nvc, nvp,
1319		    "device detected and set up at port %d", cport));
1320		mutex_exit(&nvp->nvp_mutex);
1321
1322		return (SATA_SUCCESS);
1323	} else {
1324		nv_cmn_err(CE_WARN, nvc, nvp, "failed to set up data "
1325		    "structures for port %d", cport);
1326		mutex_exit(&nvp->nvp_mutex);
1327
1328		return (SATA_FAILURE);
1329	}
1330	/*NOTREACHED*/
1331}
1332
1333
1334/*
1335 * Called by sata module to start a new command.
1336 */
1337static int
1338nv_sata_start(dev_info_t *dip, sata_pkt_t *spkt)
1339{
1340	int cport = spkt->satapkt_device.satadev_addr.cport;
1341	nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip));
1342	nv_port_t *nvp = &(nvc->nvc_port[cport]);
1343	int ret;
1344
1345	NVLOG((NVDBG_ENTRY, nvc, nvp, "nv_sata_start: opmode: 0x%x cmd=%x",
1346	    spkt->satapkt_op_mode, spkt->satapkt_cmd.satacmd_cmd_reg));
1347
1348	mutex_enter(&nvp->nvp_mutex);
1349
1350	/*
1351	 * hotremoved is an intermediate state where the link was lost,
1352	 * but the hotplug event has not yet been processed by the sata
1353	 * module.  Fail the request.
1354	 */
1355	if (nvp->nvp_state & NV_PORT_HOTREMOVED) {
1356		spkt->satapkt_reason = SATA_PKT_PORT_ERROR;
1357		spkt->satapkt_device.satadev_state = SATA_STATE_UNKNOWN;
1358		NVLOG((NVDBG_ERRS, nvc, nvp,
1359		    "nv_sata_start: NV_PORT_HOTREMOVED"));
1360		nv_copy_registers(nvp, &spkt->satapkt_device, NULL);
1361		mutex_exit(&nvp->nvp_mutex);
1362
1363		return (SATA_TRAN_PORT_ERROR);
1364	}
1365
1366	if (nvp->nvp_state & NV_PORT_RESET) {
1367		NVLOG((NVDBG_ERRS, nvc, nvp,
1368		    "still waiting for reset completion"));
1369		spkt->satapkt_reason = SATA_PKT_BUSY;
1370		mutex_exit(&nvp->nvp_mutex);
1371
1372		/*
1373		 * If in panic, timeouts do not occur, so fake one
1374		 * so that the signature can be acquired to complete
1375		 * the reset handling.
1376		 */
1377		if (ddi_in_panic()) {
1378			nv_timeout(nvp);
1379		}
1380
1381		return (SATA_TRAN_BUSY);
1382	}
1383
1384	if (nvp->nvp_type == SATA_DTYPE_NONE) {
1385		spkt->satapkt_reason = SATA_PKT_PORT_ERROR;
1386		NVLOG((NVDBG_ERRS, nvc, nvp,
1387		    "nv_sata_start: SATA_DTYPE_NONE"));
1388		nv_copy_registers(nvp, &spkt->satapkt_device, NULL);
1389		mutex_exit(&nvp->nvp_mutex);
1390
1391		return (SATA_TRAN_PORT_ERROR);
1392	}
1393
1394	if (spkt->satapkt_device.satadev_type == SATA_DTYPE_PMULT) {
1395		ASSERT(nvp->nvp_type == SATA_DTYPE_PMULT);
1396		nv_cmn_err(CE_WARN, nvc, nvp,
1397		    "port multipliers not supported by controller");
1398		spkt->satapkt_reason = SATA_PKT_CMD_UNSUPPORTED;
1399		mutex_exit(&nvp->nvp_mutex);
1400
1401		return (SATA_TRAN_CMD_UNSUPPORTED);
1402	}
1403
1404	if ((nvp->nvp_state & NV_PORT_INIT) == 0) {
1405		spkt->satapkt_reason = SATA_PKT_PORT_ERROR;
1406		NVLOG((NVDBG_ERRS, nvc, nvp,
1407		    "nv_sata_start: port not yet initialized"));
1408		nv_copy_registers(nvp, &spkt->satapkt_device, NULL);
1409		mutex_exit(&nvp->nvp_mutex);
1410
1411		return (SATA_TRAN_PORT_ERROR);
1412	}
1413
1414	if (nvp->nvp_state & NV_PORT_INACTIVE) {
1415		spkt->satapkt_reason = SATA_PKT_PORT_ERROR;
1416		NVLOG((NVDBG_ERRS, nvc, nvp,
1417		    "nv_sata_start: NV_PORT_INACTIVE"));
1418		nv_copy_registers(nvp, &spkt->satapkt_device, NULL);
1419		mutex_exit(&nvp->nvp_mutex);
1420
1421		return (SATA_TRAN_PORT_ERROR);
1422	}
1423
1424	if (nvp->nvp_state & NV_PORT_FAILED) {
1425		spkt->satapkt_reason = SATA_PKT_PORT_ERROR;
1426		NVLOG((NVDBG_ERRS, nvc, nvp,
1427		    "nv_sata_start: NV_PORT_FAILED state"));
1428		nv_copy_registers(nvp, &spkt->satapkt_device, NULL);
1429		mutex_exit(&nvp->nvp_mutex);
1430
1431		return (SATA_TRAN_PORT_ERROR);
1432	}
1433
1434	/*
1435	 * after a device reset, and then when sata module restore processing
1436	 * is complete, the sata module will set sata_clear_dev_reset which
1437	 * indicates that restore processing has completed and normal
1438	 * non-restore related commands should be processed.
1439	 */
1440	if (spkt->satapkt_cmd.satacmd_flags.sata_clear_dev_reset) {
1441		nvp->nvp_state &= ~NV_PORT_RESTORE;
1442		NVLOG((NVDBG_ENTRY, nvc, nvp,
1443		    "nv_sata_start: clearing NV_PORT_RESTORE"));
1444	}
1445
1446	/*
1447	 * if the device was recently reset as indicated by NV_PORT_RESTORE,
1448	 * only allow commands which restore device state.  The sata module
1449	 * marks such commands with with sata_ignore_dev_reset.
1450	 *
1451	 * during coredump, nv_reset is called and but then the restore
1452	 * doesn't happen.  For now, workaround by ignoring the wait for
1453	 * restore if the system is panicing.
1454	 */
1455	if ((nvp->nvp_state & NV_PORT_RESTORE) &&
1456	    !(spkt->satapkt_cmd.satacmd_flags.sata_ignore_dev_reset) &&
1457	    (ddi_in_panic() == 0)) {
1458		spkt->satapkt_reason = SATA_PKT_BUSY;
1459		NVLOG((NVDBG_ENTRY, nvc, nvp,
1460		    "nv_sata_start: waiting for restore "));
1461		mutex_exit(&nvp->nvp_mutex);
1462
1463		return (SATA_TRAN_BUSY);
1464	}
1465
1466	if (nvp->nvp_state & NV_PORT_ABORTING) {
1467		spkt->satapkt_reason = SATA_PKT_BUSY;
1468		NVLOG((NVDBG_ERRS, nvc, nvp,
1469		    "nv_sata_start: NV_PORT_ABORTING"));
1470		mutex_exit(&nvp->nvp_mutex);
1471
1472		return (SATA_TRAN_BUSY);
1473	}
1474
1475	if (spkt->satapkt_op_mode &
1476	    (SATA_OPMODE_POLLING|SATA_OPMODE_SYNCH)) {
1477
1478		ret = nv_start_sync(nvp, spkt);
1479
1480		mutex_exit(&nvp->nvp_mutex);
1481
1482		return (ret);
1483	}
1484
1485	/*
1486	 * start command asynchronous command
1487	 */
1488	ret = nv_start_async(nvp, spkt);
1489
1490	mutex_exit(&nvp->nvp_mutex);
1491
1492	return (ret);
1493}
1494
1495
1496/*
1497 * SATA_OPMODE_POLLING implies the driver is in a
1498 * synchronous mode, and SATA_OPMODE_SYNCH is also set.
1499 * If only SATA_OPMODE_SYNCH is set, the driver can use
1500 * interrupts and sleep wait on a cv.
1501 *
1502 * If SATA_OPMODE_POLLING is set, the driver can't use
1503 * interrupts and must busy wait and simulate the
1504 * interrupts by waiting for BSY to be cleared.
1505 *
1506 * Synchronous mode has to return BUSY if there are
1507 * any other commands already on the drive.
1508 */
1509static int
1510nv_start_sync(nv_port_t *nvp, sata_pkt_t *spkt)
1511{
1512	nv_ctl_t *nvc = nvp->nvp_ctlp;
1513	int ret;
1514
1515	NVLOG((NVDBG_SYNC, nvp->nvp_ctlp, nvp, "nv_sata_satapkt_sync: entry"));
1516
1517	if (nvp->nvp_ncq_run != 0 || nvp->nvp_non_ncq_run != 0) {
1518		spkt->satapkt_reason = SATA_PKT_BUSY;
1519		NVLOG((NVDBG_SYNC, nvp->nvp_ctlp, nvp,
1520		    "nv_sata_satapkt_sync: device is busy, sync cmd rejected"
1521		    "ncq_run: %d non_ncq_run: %d  spkt: %p",
1522		    nvp->nvp_ncq_run, nvp->nvp_non_ncq_run,
1523		    (&(nvp->nvp_slot[0]))->nvslot_spkt));
1524
1525		return (SATA_TRAN_BUSY);
1526	}
1527
1528	/*
1529	 * if SYNC but not POLL, verify that this is not on interrupt thread.
1530	 */
1531	if (!(spkt->satapkt_op_mode & SATA_OPMODE_POLLING) &&
1532	    servicing_interrupt()) {
1533		spkt->satapkt_reason = SATA_PKT_BUSY;
1534		NVLOG((NVDBG_SYNC, nvp->nvp_ctlp, nvp,
1535		    "SYNC mode not allowed during interrupt"));
1536
1537		return (SATA_TRAN_BUSY);
1538
1539	}
1540
1541	/*
1542	 * disable interrupt generation if in polled mode
1543	 */
1544	if (spkt->satapkt_op_mode & SATA_OPMODE_POLLING) {
1545		(*(nvc->nvc_set_intr))(nvp, NV_INTR_DISABLE);
1546	}
1547
1548	if ((ret = nv_start_common(nvp, spkt)) != SATA_TRAN_ACCEPTED) {
1549		if (spkt->satapkt_op_mode & SATA_OPMODE_POLLING) {
1550			(*(nvc->nvc_set_intr))(nvp, NV_INTR_ENABLE);
1551		}
1552
1553		return (ret);
1554	}
1555
1556	if (spkt->satapkt_op_mode & SATA_OPMODE_POLLING) {
1557		mutex_exit(&nvp->nvp_mutex);
1558		ret = nv_poll_wait(nvp, spkt);
1559		mutex_enter(&nvp->nvp_mutex);
1560
1561		(*(nvc->nvc_set_intr))(nvp, NV_INTR_ENABLE);
1562
1563		NVLOG((NVDBG_SYNC, nvp->nvp_ctlp, nvp, "nv_sata_satapkt_sync:"
1564		    " done % reason %d", ret));
1565
1566		return (ret);
1567	}
1568
1569	/*
1570	 * non-polling synchronous mode handling.  The interrupt will signal
1571	 * when the IO is completed.
1572	 */
1573	cv_wait(&nvp->nvp_poll_cv, &nvp->nvp_mutex);
1574
1575	if (spkt->satapkt_reason != SATA_PKT_COMPLETED) {
1576
1577		spkt->satapkt_reason = SATA_PKT_TIMEOUT;
1578	}
1579
1580	NVLOG((NVDBG_SYNC, nvp->nvp_ctlp, nvp, "nv_sata_satapkt_sync:"
1581	    " done % reason %d", spkt->satapkt_reason));
1582
1583	return (SATA_TRAN_ACCEPTED);
1584}
1585
1586
1587static int
1588nv_poll_wait(nv_port_t *nvp, sata_pkt_t *spkt)
1589{
1590	int ret;
1591	nv_ctl_t *nvc = nvp->nvp_ctlp;
1592#if ! defined(__lock_lint)
1593	nv_slot_t *nv_slotp = &(nvp->nvp_slot[0]); /* not NCQ aware */
1594#endif
1595
1596	NVLOG((NVDBG_SYNC, nvc, nvp, "nv_poll_wait: enter"));
1597
1598	for (;;) {
1599
1600		NV_DELAY_NSEC(400);
1601
1602		NVLOG((NVDBG_SYNC, nvc, nvp, "nv_poll_wait: before nv_wait"));
1603		if (nv_wait(nvp, 0, SATA_STATUS_BSY,
1604		    NV_SEC2USEC(spkt->satapkt_time), NV_NOSLEEP) == B_FALSE) {
1605			mutex_enter(&nvp->nvp_mutex);
1606			spkt->satapkt_reason = SATA_PKT_TIMEOUT;
1607			nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
1608			nv_reset(nvp);
1609			nv_complete_io(nvp, spkt, 0);
1610			mutex_exit(&nvp->nvp_mutex);
1611			NVLOG((NVDBG_SYNC, nvc, nvp, "nv_poll_wait: "
1612			    "SATA_STATUS_BSY"));
1613
1614			return (SATA_TRAN_ACCEPTED);
1615		}
1616
1617		NVLOG((NVDBG_SYNC, nvc, nvp, "nv_poll_wait: before nvc_intr"));
1618
1619		/*
1620		 * Simulate interrupt.
1621		 */
1622		ret = (*(nvc->nvc_interrupt))((caddr_t)nvc, NULL);
1623		NVLOG((NVDBG_SYNC, nvc, nvp, "nv_poll_wait: after nvc_intr"));
1624
1625		if (ret != DDI_INTR_CLAIMED) {
1626			NVLOG((NVDBG_SYNC, nvc, nvp, "nv_poll_wait:"
1627			    " unclaimed -- resetting"));
1628			mutex_enter(&nvp->nvp_mutex);
1629			nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
1630			nv_reset(nvp);
1631			spkt->satapkt_reason = SATA_PKT_TIMEOUT;
1632			nv_complete_io(nvp, spkt, 0);
1633			mutex_exit(&nvp->nvp_mutex);
1634
1635			return (SATA_TRAN_ACCEPTED);
1636		}
1637
1638#if ! defined(__lock_lint)
1639		if (nv_slotp->nvslot_flags == NVSLOT_COMPLETE) {
1640			/*
1641			 * packet is complete
1642			 */
1643			return (SATA_TRAN_ACCEPTED);
1644		}
1645#endif
1646	}
1647	/*NOTREACHED*/
1648}
1649
1650
1651/*
1652 * Called by sata module to abort outstanding packets.
1653 */
1654/*ARGSUSED*/
1655static int
1656nv_sata_abort(dev_info_t *dip, sata_pkt_t *spkt, int flag)
1657{
1658	int cport = spkt->satapkt_device.satadev_addr.cport;
1659	nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip));
1660	nv_port_t *nvp = &(nvc->nvc_port[cport]);
1661	int c_a, ret;
1662
1663	ASSERT(cport < NV_MAX_PORTS(nvc));
1664	NVLOG((NVDBG_ENTRY, nvc, nvp, "nv_sata_abort %d %p", flag, spkt));
1665
1666	mutex_enter(&nvp->nvp_mutex);
1667
1668	if (nvp->nvp_state & NV_PORT_INACTIVE) {
1669		mutex_exit(&nvp->nvp_mutex);
1670		nv_cmn_err(CE_WARN, nvc, nvp,
1671		    "abort request failed: port inactive");
1672
1673		return (SATA_FAILURE);
1674	}
1675
1676	/*
1677	 * spkt == NULL then abort all commands
1678	 */
1679	c_a = nv_abort_active(nvp, spkt, SATA_PKT_ABORTED);
1680
1681	if (c_a) {
1682		NVLOG((NVDBG_ENTRY, nvc, nvp,
1683		    "packets aborted running=%d", c_a));
1684		ret = SATA_SUCCESS;
1685	} else {
1686		if (spkt == NULL) {
1687			NVLOG((NVDBG_ENTRY, nvc, nvp, "no spkts to abort"));
1688		} else {
1689			NVLOG((NVDBG_ENTRY, nvc, nvp,
1690			    "can't find spkt to abort"));
1691		}
1692		ret = SATA_FAILURE;
1693	}
1694
1695	mutex_exit(&nvp->nvp_mutex);
1696
1697	return (ret);
1698}
1699
1700
1701/*
1702 * if spkt == NULL abort all pkts running, otherwise
1703 * abort the requested packet.  must be called with nv_mutex
1704 * held and returns with it held.  Not NCQ aware.
1705 */
1706static int
1707nv_abort_active(nv_port_t *nvp, sata_pkt_t *spkt, int abort_reason)
1708{
1709	int aborted = 0, i, reset_once = B_FALSE;
1710	struct nv_slot *nv_slotp;
1711	sata_pkt_t *spkt_slot;
1712
1713	ASSERT(MUTEX_HELD(&nvp->nvp_mutex));
1714
1715	/*
1716	 * return if the port is not configured
1717	 */
1718	if (nvp->nvp_slot == NULL) {
1719		NVLOG((NVDBG_ENTRY, nvp->nvp_ctlp, nvp,
1720		    "nv_abort_active: not configured so returning"));
1721
1722		return (0);
1723	}
1724
1725	NVLOG((NVDBG_ENTRY, nvp->nvp_ctlp, nvp, "nv_abort_active"));
1726
1727	nvp->nvp_state |= NV_PORT_ABORTING;
1728
1729	for (i = 0; i < nvp->nvp_queue_depth; i++) {
1730
1731		nv_slotp = &(nvp->nvp_slot[i]);
1732		spkt_slot = nv_slotp->nvslot_spkt;
1733
1734		/*
1735		 * skip if not active command in slot
1736		 */
1737		if (spkt_slot == NULL) {
1738			continue;
1739		}
1740
1741		/*
1742		 * if a specific packet was requested, skip if
1743		 * this is not a match
1744		 */
1745		if ((spkt != NULL) && (spkt != spkt_slot)) {
1746			continue;
1747		}
1748
1749		/*
1750		 * stop the hardware.  This could need reworking
1751		 * when NCQ is enabled in the driver.
1752		 */
1753		if (reset_once == B_FALSE) {
1754			ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl;
1755
1756			/*
1757			 * stop DMA engine
1758			 */
1759			nv_put8(bmhdl, nvp->nvp_bmicx,  0);
1760
1761			nv_reset(nvp);
1762			reset_once = B_TRUE;
1763		}
1764
1765		spkt_slot->satapkt_reason = abort_reason;
1766		nv_complete_io(nvp, spkt_slot, i);
1767		aborted++;
1768	}
1769
1770	nvp->nvp_state &= ~NV_PORT_ABORTING;
1771
1772	return (aborted);
1773}
1774
1775
1776/*
1777 * Called by sata module to reset a port, device, or the controller.
1778 */
1779static int
1780nv_sata_reset(dev_info_t *dip, sata_device_t *sd)
1781{
1782	int cport = sd->satadev_addr.cport;
1783	nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip));
1784	nv_port_t *nvp = &(nvc->nvc_port[cport]);
1785	int ret = SATA_SUCCESS;
1786
1787	ASSERT(cport < NV_MAX_PORTS(nvc));
1788
1789	NVLOG((NVDBG_ENTRY, nvc, nvp, "nv_sata_reset"));
1790
1791	mutex_enter(&nvp->nvp_mutex);
1792
1793	switch (sd->satadev_addr.qual) {
1794
1795	case SATA_ADDR_CPORT:
1796		/*FALLTHROUGH*/
1797	case SATA_ADDR_DCPORT:
1798		nv_reset(nvp);
1799		(void) nv_abort_active(nvp, NULL, SATA_PKT_RESET);
1800
1801		break;
1802	case SATA_ADDR_CNTRL:
1803		NVLOG((NVDBG_ENTRY, nvc, nvp,
1804		    "nv_sata_reset: constroller reset not supported"));
1805
1806		break;
1807	case SATA_ADDR_PMPORT:
1808	case SATA_ADDR_DPMPORT:
1809		NVLOG((NVDBG_ENTRY, nvc, nvp,
1810		    "nv_sata_reset: port multipliers not supported"));
1811		/*FALLTHROUGH*/
1812	default:
1813		/*
1814		 * unsupported case
1815		 */
1816		ret = SATA_FAILURE;
1817		break;
1818	}
1819
1820	if (ret == SATA_SUCCESS) {
1821		/*
1822		 * If the port is inactive, do a quiet reset and don't attempt
1823		 * to wait for reset completion or do any post reset processing
1824		 */
1825		if (nvp->nvp_state & NV_PORT_INACTIVE) {
1826			nvp->nvp_state &= ~NV_PORT_RESET;
1827			nvp->nvp_reset_time = 0;
1828		}
1829
1830		/*
1831		 * clear the port failed flag
1832		 */
1833		nvp->nvp_state &= ~NV_PORT_FAILED;
1834	}
1835
1836	mutex_exit(&nvp->nvp_mutex);
1837
1838	return (ret);
1839}
1840
1841
1842/*
1843 * Sata entry point to handle port activation.  cfgadm -c connect
1844 */
1845static int
1846nv_sata_activate(dev_info_t *dip, sata_device_t *sd)
1847{
1848	int cport = sd->satadev_addr.cport;
1849	nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip));
1850	nv_port_t *nvp = &(nvc->nvc_port[cport]);
1851
1852	ASSERT(cport < NV_MAX_PORTS(nvc));
1853	NVLOG((NVDBG_ENTRY, nvc, nvp, "nv_sata_activate"));
1854
1855	mutex_enter(&nvp->nvp_mutex);
1856
1857	sd->satadev_state = SATA_STATE_READY;
1858
1859	nv_copy_registers(nvp, sd, NULL);
1860
1861	(*(nvc->nvc_set_intr))(nvp, NV_INTR_ENABLE);
1862
1863	nvp->nvp_state = 0;
1864
1865	mutex_exit(&nvp->nvp_mutex);
1866
1867	return (SATA_SUCCESS);
1868}
1869
1870
1871/*
1872 * Sata entry point to handle port deactivation.  cfgadm -c disconnect
1873 */
1874static int
1875nv_sata_deactivate(dev_info_t *dip, sata_device_t *sd)
1876{
1877	int cport = sd->satadev_addr.cport;
1878	nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip));
1879	nv_port_t *nvp = &(nvc->nvc_port[cport]);
1880
1881	ASSERT(cport < NV_MAX_PORTS(nvc));
1882	NVLOG((NVDBG_ENTRY, nvc, nvp, "nv_sata_deactivate"));
1883
1884	mutex_enter(&nvp->nvp_mutex);
1885
1886	(void) nv_abort_active(nvp, NULL, SATA_PKT_RESET);
1887
1888	/*
1889	 * mark the device as inaccessible
1890	 */
1891	nvp->nvp_state &= ~NV_PORT_INACTIVE;
1892
1893	/*
1894	 * disable the interrupts on port
1895	 */
1896	(*(nvc->nvc_set_intr))(nvp, NV_INTR_DISABLE);
1897
1898	nv_uninit_port(nvp);
1899
1900	sd->satadev_state = SATA_PSTATE_SHUTDOWN;
1901	nv_copy_registers(nvp, sd, NULL);
1902
1903	mutex_exit(&nvp->nvp_mutex);
1904
1905	return (SATA_SUCCESS);
1906}
1907
1908
1909/*
1910 * find an empty slot in the driver's queue, increment counters,
1911 * and then invoke the appropriate PIO or DMA start routine.
1912 */
1913static int
1914nv_start_common(nv_port_t *nvp, sata_pkt_t *spkt)
1915{
1916	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
1917	int on_bit = 0x01, slot, sactive, ret, ncq = 0;
1918	uint8_t cmd = spkt->satapkt_cmd.satacmd_cmd_reg;
1919	int direction = sata_cmdp->satacmd_flags.sata_data_direction;
1920	nv_ctl_t *nvc = nvp->nvp_ctlp;
1921	nv_slot_t *nv_slotp;
1922	boolean_t dma_cmd;
1923
1924	NVLOG((NVDBG_DELIVER, nvc, nvp, "nv_start_common  entered: cmd: 0x%x",
1925	    sata_cmdp->satacmd_cmd_reg));
1926
1927	if ((cmd == SATAC_WRITE_FPDMA_QUEUED) ||
1928	    (cmd == SATAC_READ_FPDMA_QUEUED)) {
1929		nvp->nvp_ncq_run++;
1930		/*
1931		 * search for an empty NCQ slot.  by the time, it's already
1932		 * been determined by the caller that there is room on the
1933		 * queue.
1934		 */
1935		for (slot = 0; slot < nvp->nvp_queue_depth; slot++,
1936		    on_bit <<= 1) {
1937			if ((nvp->nvp_sactive_cache & on_bit) == 0) {
1938				break;
1939			}
1940		}
1941
1942		/*
1943		 * the first empty slot found, should not exceed the queue
1944		 * depth of the drive.  if it does it's an error.
1945		 */
1946		ASSERT(slot != nvp->nvp_queue_depth);
1947
1948		sactive = nv_get32(nvc->nvc_bar_hdl[5],
1949		    nvp->nvp_sactive);
1950		ASSERT((sactive & on_bit) == 0);
1951		nv_put32(nvc->nvc_bar_hdl[5], nvp->nvp_sactive, on_bit);
1952		NVLOG((NVDBG_INIT, nvc, nvp, "setting SACTIVE onbit: %X",
1953		    on_bit));
1954		nvp->nvp_sactive_cache |= on_bit;
1955
1956		ncq = NVSLOT_NCQ;
1957
1958	} else {
1959		nvp->nvp_non_ncq_run++;
1960		slot = 0;
1961	}
1962
1963	nv_slotp = (nv_slot_t *)&nvp->nvp_slot[slot];
1964
1965	ASSERT(nv_slotp->nvslot_spkt == NULL);
1966
1967	nv_slotp->nvslot_spkt = spkt;
1968	nv_slotp->nvslot_flags = ncq;
1969
1970	/*
1971	 * the sata module doesn't indicate which commands utilize the
1972	 * DMA engine, so find out using this switch table.
1973	 */
1974	switch (spkt->satapkt_cmd.satacmd_cmd_reg) {
1975	case SATAC_READ_DMA_EXT:
1976	case SATAC_WRITE_DMA_EXT:
1977	case SATAC_WRITE_DMA:
1978	case SATAC_READ_DMA:
1979	case SATAC_READ_DMA_QUEUED:
1980	case SATAC_READ_DMA_QUEUED_EXT:
1981	case SATAC_WRITE_DMA_QUEUED:
1982	case SATAC_WRITE_DMA_QUEUED_EXT:
1983	case SATAC_READ_FPDMA_QUEUED:
1984	case SATAC_WRITE_FPDMA_QUEUED:
1985		dma_cmd = B_TRUE;
1986		break;
1987	default:
1988		dma_cmd = B_FALSE;
1989	}
1990
1991	if (sata_cmdp->satacmd_num_dma_cookies != 0 && dma_cmd == B_TRUE) {
1992		NVLOG((NVDBG_DELIVER, nvc,  nvp, "DMA command"));
1993		nv_slotp->nvslot_start = nv_start_dma;
1994		nv_slotp->nvslot_intr = nv_intr_dma;
1995	} else if (spkt->satapkt_cmd.satacmd_cmd_reg == SATAC_PACKET) {
1996		NVLOG((NVDBG_DELIVER, nvc,  nvp, "packet command"));
1997		nv_slotp->nvslot_start = nv_start_pkt_pio;
1998		nv_slotp->nvslot_intr = nv_intr_pkt_pio;
1999		if ((direction == SATA_DIR_READ) ||
2000		    (direction == SATA_DIR_WRITE)) {
2001			nv_slotp->nvslot_byte_count =
2002			    spkt->satapkt_cmd.satacmd_bp->b_bcount;
2003			nv_slotp->nvslot_v_addr =
2004			    spkt->satapkt_cmd.satacmd_bp->b_un.b_addr;
2005			/*
2006			 * Freeing DMA resources allocated by the framework
2007			 * now to avoid buffer overwrite (dma sync) problems
2008			 * when the buffer is released at command completion.
2009			 * Primarily an issue on systems with more than
2010			 * 4GB of memory.
2011			 */
2012			sata_free_dma_resources(spkt);
2013		}
2014	} else if (direction == SATA_DIR_NODATA_XFER) {
2015		NVLOG((NVDBG_DELIVER, nvc, nvp, "non-data command"));
2016		nv_slotp->nvslot_start = nv_start_nodata;
2017		nv_slotp->nvslot_intr = nv_intr_nodata;
2018	} else if (direction == SATA_DIR_READ) {
2019		NVLOG((NVDBG_DELIVER, nvc, nvp, "pio in command"));
2020		nv_slotp->nvslot_start = nv_start_pio_in;
2021		nv_slotp->nvslot_intr = nv_intr_pio_in;
2022		nv_slotp->nvslot_byte_count =
2023		    spkt->satapkt_cmd.satacmd_bp->b_bcount;
2024		nv_slotp->nvslot_v_addr =
2025		    spkt->satapkt_cmd.satacmd_bp->b_un.b_addr;
2026		/*
2027		 * Freeing DMA resources allocated by the framework now to
2028		 * avoid buffer overwrite (dma sync) problems when the buffer
2029		 * is released at command completion.  This is not an issue
2030		 * for write because write does not update the buffer.
2031		 * Primarily an issue on systems with more than 4GB of memory.
2032		 */
2033		sata_free_dma_resources(spkt);
2034	} else if (direction == SATA_DIR_WRITE) {
2035		NVLOG((NVDBG_DELIVER, nvc, nvp, "pio out command"));
2036		nv_slotp->nvslot_start = nv_start_pio_out;
2037		nv_slotp->nvslot_intr = nv_intr_pio_out;
2038		nv_slotp->nvslot_byte_count =
2039		    spkt->satapkt_cmd.satacmd_bp->b_bcount;
2040		nv_slotp->nvslot_v_addr =
2041		    spkt->satapkt_cmd.satacmd_bp->b_un.b_addr;
2042	} else {
2043		nv_cmn_err(CE_WARN, nvc, nvp, "malformed command: direction"
2044		    " %d cookies %d cmd %x",
2045		    sata_cmdp->satacmd_flags.sata_data_direction,
2046		    sata_cmdp->satacmd_num_dma_cookies,  cmd);
2047		spkt->satapkt_reason = SATA_PKT_CMD_UNSUPPORTED;
2048		ret = SATA_TRAN_CMD_UNSUPPORTED;
2049
2050		goto fail;
2051	}
2052
2053	if ((ret = (*nv_slotp->nvslot_start)(nvp, slot)) ==
2054	    SATA_TRAN_ACCEPTED) {
2055#ifdef SGPIO_SUPPORT
2056		nv_sgp_drive_active(nvp->nvp_ctlp,
2057		    (nvp->nvp_ctlp->nvc_ctlr_num * 2) + nvp->nvp_port_num);
2058#endif
2059		nv_slotp->nvslot_stime = ddi_get_lbolt();
2060
2061		/*
2062		 * start timer if it's not already running and this packet
2063		 * is not requesting polled mode.
2064		 */
2065		if ((nvp->nvp_timeout_id == 0) &&
2066		    ((spkt->satapkt_op_mode & SATA_OPMODE_POLLING) == 0)) {
2067			nvp->nvp_timeout_id = timeout(nv_timeout, (void *)nvp,
2068			    drv_usectohz(NV_ONE_SEC));
2069		}
2070
2071		return (SATA_TRAN_ACCEPTED);
2072	}
2073
2074	fail:
2075
2076	spkt->satapkt_reason = SATA_TRAN_PORT_ERROR;
2077
2078	if (ncq == NVSLOT_NCQ) {
2079		nvp->nvp_ncq_run--;
2080		nvp->nvp_sactive_cache &= ~on_bit;
2081	} else {
2082		nvp->nvp_non_ncq_run--;
2083	}
2084	nv_slotp->nvslot_spkt = NULL;
2085	nv_slotp->nvslot_flags = 0;
2086
2087	return (ret);
2088}
2089
2090
2091/*
2092 * Check if the signature is ready and if non-zero translate
2093 * it into a solaris sata defined type.
2094 */
2095static void
2096nv_read_signature(nv_port_t *nvp)
2097{
2098	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
2099
2100	nvp->nvp_signature = nv_get8(cmdhdl, nvp->nvp_count);
2101	nvp->nvp_signature |= (nv_get8(cmdhdl, nvp->nvp_sect) << 8);
2102	nvp->nvp_signature |= (nv_get8(cmdhdl, nvp->nvp_lcyl) << 16);
2103	nvp->nvp_signature |= (nv_get8(cmdhdl, nvp->nvp_hcyl) << 24);
2104
2105	switch (nvp->nvp_signature) {
2106
2107	case NV_SIG_DISK:
2108		NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp, "drive is a disk"));
2109		nvp->nvp_type = SATA_DTYPE_ATADISK;
2110		break;
2111	case NV_SIG_ATAPI:
2112		NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp,
2113		    "drive is an optical device"));
2114		nvp->nvp_type = SATA_DTYPE_ATAPICD;
2115		break;
2116	case NV_SIG_PM:
2117		NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp,
2118		    "device is a port multiplier"));
2119		nvp->nvp_type = SATA_DTYPE_PMULT;
2120		break;
2121	case NV_SIG_NOTREADY:
2122		NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp,
2123		    "signature not ready"));
2124		nvp->nvp_type = SATA_DTYPE_UNKNOWN;
2125		break;
2126	default:
2127		nv_cmn_err(CE_WARN, nvp->nvp_ctlp, nvp, "signature %X not"
2128		    " recognized", nvp->nvp_signature);
2129		nvp->nvp_type = SATA_DTYPE_UNKNOWN;
2130		break;
2131	}
2132
2133	if (nvp->nvp_signature) {
2134		nvp->nvp_state &= ~(NV_PORT_RESET_PROBE|NV_PORT_RESET);
2135	}
2136}
2137
2138
2139/*
2140 * Reset the port
2141 */
2142static void
2143nv_reset(nv_port_t *nvp)
2144{
2145	ddi_acc_handle_t bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5];
2146	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
2147	nv_ctl_t *nvc = nvp->nvp_ctlp;
2148	uint32_t sctrl;
2149
2150	NVLOG((NVDBG_ENTRY, nvc, nvp, "nv_reset()"));
2151
2152	ASSERT(mutex_owned(&nvp->nvp_mutex));
2153
2154	/*
2155	 * clear signature registers
2156	 */
2157	nv_put8(cmdhdl, nvp->nvp_sect, 0);
2158	nv_put8(cmdhdl, nvp->nvp_lcyl, 0);
2159	nv_put8(cmdhdl, nvp->nvp_hcyl, 0);
2160	nv_put8(cmdhdl, nvp->nvp_count, 0);
2161
2162	nvp->nvp_signature = 0;
2163	nvp->nvp_type = 0;
2164	nvp->nvp_state |= NV_PORT_RESET;
2165	nvp->nvp_reset_time = ddi_get_lbolt();
2166	nvp->nvp_link_lost_time = 0;
2167
2168	/*
2169	 * assert reset in PHY by writing a 1 to bit 0 scontrol
2170	 */
2171	sctrl = nv_get32(bar5_hdl, nvp->nvp_sctrl);
2172
2173	nv_put32(bar5_hdl, nvp->nvp_sctrl, sctrl | SCONTROL_DET_COMRESET);
2174
2175	/*
2176	 * wait 1ms
2177	 */
2178	drv_usecwait(1000);
2179
2180	/*
2181	 * de-assert reset in PHY
2182	 */
2183	nv_put32(bar5_hdl, nvp->nvp_sctrl, sctrl);
2184
2185	/*
2186	 * make sure timer is running
2187	 */
2188	if (nvp->nvp_timeout_id == 0) {
2189		nvp->nvp_timeout_id = timeout(nv_timeout, (void *)nvp,
2190		    drv_usectohz(NV_ONE_SEC));
2191	}
2192}
2193
2194
2195/*
2196 * Initialize register handling specific to mcp51/mcp55
2197 */
2198/* ARGSUSED */
2199static void
2200mcp5x_reg_init(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle)
2201{
2202	nv_port_t *nvp;
2203	uchar_t *bar5  = nvc->nvc_bar_addr[5];
2204	uint8_t off, port;
2205
2206	nvc->nvc_mcp5x_ctl = (uint32_t *)(bar5 + MCP5X_CTL);
2207	nvc->nvc_mcp5x_ncq = (uint32_t *)(bar5 + MCP5X_NCQ);
2208
2209	for (port = 0, off = 0; port < NV_MAX_PORTS(nvc); port++, off += 2) {
2210		nvp = &(nvc->nvc_port[port]);
2211		nvp->nvp_mcp5x_int_status =
2212		    (uint16_t *)(bar5 + MCP5X_INT_STATUS + off);
2213		nvp->nvp_mcp5x_int_ctl =
2214		    (uint16_t *)(bar5 + MCP5X_INT_CTL + off);
2215
2216		/*
2217		 * clear any previous interrupts asserted
2218		 */
2219		nv_put16(nvc->nvc_bar_hdl[5], nvp->nvp_mcp5x_int_status,
2220		    MCP5X_INT_CLEAR);
2221
2222		/*
2223		 * These are the interrupts to accept for now.  The spec
2224		 * says these are enable bits, but nvidia has indicated
2225		 * these are masking bits.  Even though they may be masked
2226		 * out to prevent asserting the main interrupt, they can
2227		 * still be asserted while reading the interrupt status
2228		 * register, so that needs to be considered in the interrupt
2229		 * handler.
2230		 */
2231		nv_put16(nvc->nvc_bar_hdl[5], nvp->nvp_mcp5x_int_ctl,
2232		    ~(MCP5X_INT_IGNORE));
2233	}
2234
2235	/*
2236	 * Allow the driver to program the BM on the first command instead
2237	 * of waiting for an interrupt.
2238	 */
2239#ifdef NCQ
2240	flags = MCP_SATA_AE_NCQ_PDEV_FIRST_CMD | MCP_SATA_AE_NCQ_SDEV_FIRST_CMD;
2241	nv_put32(nvc->nvc_bar_hdl[5], nvc->nvc_mcp5x_ncq, flags);
2242	flags = MCP_SATA_AE_CTL_PRI_SWNCQ | MCP_SATA_AE_CTL_SEC_SWNCQ;
2243	nv_put32(nvc->nvc_bar_hdl[5], nvc->nvc_mcp5x_ctl, flags);
2244#endif
2245
2246	/*
2247	 * mcp55 rev A03 and above supports 40-bit physical addressing.
2248	 * Enable DMA to take advantage of that.
2249	 *
2250	 */
2251	if (nvc->nvc_revid >= 0xa3) {
2252		if (nv_sata_40bit_dma == B_TRUE) {
2253			uint32_t reg32;
2254			NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp,
2255			    "rev id is %X and"
2256			    " is capable of 40-bit addressing",
2257			    nvc->nvc_revid));
2258			buffer_dma_attr.dma_attr_addr_hi = 0xffffffffffull;
2259			reg32 = pci_config_get32(pci_conf_handle,
2260			    NV_SATA_CFG_20);
2261			pci_config_put32(pci_conf_handle, NV_SATA_CFG_20,
2262			    reg32 |NV_40BIT_PRD);
2263		} else {
2264			NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp,
2265			    "40-bit dma disabled by nv_sata_40bit_dma"));
2266		}
2267	} else {
2268		NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp, "rev is %X and is "
2269		    "not capable of 40-bit addressing", nvc->nvc_revid));
2270	}
2271}
2272
2273
2274/*
2275 * Initialize register handling specific to ck804
2276 */
2277static void
2278ck804_reg_init(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle)
2279{
2280	uchar_t *bar5  = nvc->nvc_bar_addr[5];
2281	uint32_t reg32;
2282	uint16_t reg16;
2283	nv_port_t *nvp;
2284	int j;
2285
2286	/*
2287	 * delay hotplug interrupts until PHYRDY.
2288	 */
2289	reg32 = pci_config_get32(pci_conf_handle, NV_SATA_CFG_42);
2290	pci_config_put32(pci_conf_handle, NV_SATA_CFG_42,
2291	    reg32 | CK804_CFG_DELAY_HOTPLUG_INTR);
2292
2293	/*
2294	 * enable hot plug interrupts for channel x and y
2295	 */
2296	reg16 = nv_get16(nvc->nvc_bar_hdl[5],
2297	    (uint16_t *)(bar5 + NV_ADMACTL_X));
2298	nv_put16(nvc->nvc_bar_hdl[5], (uint16_t *)(bar5 + NV_ADMACTL_X),
2299	    NV_HIRQ_EN | reg16);
2300
2301
2302	reg16 = nv_get16(nvc->nvc_bar_hdl[5],
2303	    (uint16_t *)(bar5 + NV_ADMACTL_Y));
2304	nv_put16(nvc->nvc_bar_hdl[5], (uint16_t *)(bar5 + NV_ADMACTL_Y),
2305	    NV_HIRQ_EN | reg16);
2306
2307	nvc->nvc_ck804_int_status = (uint8_t *)(bar5 + CK804_SATA_INT_STATUS);
2308
2309	/*
2310	 * clear any existing interrupt pending then enable
2311	 */
2312	for (j = 0; j < NV_MAX_PORTS(nvc); j++) {
2313		nvp = &(nvc->nvc_port[j]);
2314		mutex_enter(&nvp->nvp_mutex);
2315		(*(nvp->nvp_ctlp->nvc_set_intr))(nvp,
2316		    NV_INTR_CLEAR_ALL|NV_INTR_ENABLE);
2317		mutex_exit(&nvp->nvp_mutex);
2318	}
2319}
2320
2321
2322/*
2323 * Initialize the controller and set up driver data structures.
2324 * determine if ck804 or mcp5x class.
2325 */
2326static int
2327nv_init_ctl(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle)
2328{
2329	struct sata_hba_tran stran;
2330	nv_port_t *nvp;
2331	int j, ck804;
2332	uchar_t *cmd_addr, *ctl_addr, *bm_addr;
2333	ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5];
2334	uchar_t *bar5  = nvc->nvc_bar_addr[5];
2335	uint32_t reg32;
2336	uint8_t reg8, reg8_save;
2337
2338	NVLOG((NVDBG_INIT, nvc, NULL, "nv_init_ctl entered"));
2339
2340	ck804 = B_TRUE;
2341#ifdef SGPIO_SUPPORT
2342	nvc->nvc_mcp5x_flag = B_FALSE;
2343#endif
2344
2345	/*
2346	 * Need to set bit 2 to 1 at config offset 0x50
2347	 * to enable access to the bar5 registers.
2348	 */
2349	reg32 = pci_config_get32(pci_conf_handle, NV_SATA_CFG_20);
2350	pci_config_put32(pci_conf_handle, NV_SATA_CFG_20,
2351	    reg32 | NV_BAR5_SPACE_EN);
2352
2353	/*
2354	 * Determine if this is ck804 or mcp5x.  ck804 will map in the
2355	 * task file registers into bar5 while mcp5x won't.  The offset of
2356	 * the task file registers in mcp5x's space is unused, so it will
2357	 * return zero.  So check one of the task file registers to see if it is
2358	 * writable and reads back what was written.  If it's mcp5x it will
2359	 * return back 0xff whereas ck804 will return the value written.
2360	 */
2361	reg8_save = nv_get8(bar5_hdl,
2362	    (uint8_t *)(bar5 + NV_BAR5_TRAN_LEN_CH_X));
2363
2364
2365	for (j = 1; j < 3; j++) {
2366
2367		nv_put8(bar5_hdl, (uint8_t *)(bar5 + NV_BAR5_TRAN_LEN_CH_X), j);
2368		reg8 = nv_get8(bar5_hdl,
2369		    (uint8_t *)(bar5 + NV_BAR5_TRAN_LEN_CH_X));
2370
2371		if (reg8 != j) {
2372			ck804 = B_FALSE;
2373			nvc->nvc_mcp5x_flag = B_TRUE;
2374			break;
2375		}
2376	}
2377
2378	nv_put8(bar5_hdl, (uint8_t *)(bar5 + NV_BAR5_TRAN_LEN_CH_X), reg8_save);
2379
2380	if (ck804 == B_TRUE) {
2381		NVLOG((NVDBG_INIT, nvc, NULL, "controller is CK804"));
2382		nvc->nvc_interrupt = ck804_intr;
2383		nvc->nvc_reg_init = ck804_reg_init;
2384		nvc->nvc_set_intr = ck804_set_intr;
2385	} else {
2386		NVLOG((NVDBG_INIT, nvc, NULL, "controller is MCP51/MCP55"));
2387		nvc->nvc_interrupt = mcp5x_intr;
2388		nvc->nvc_reg_init = mcp5x_reg_init;
2389		nvc->nvc_set_intr = mcp5x_set_intr;
2390	}
2391
2392
2393	stran.sata_tran_hba_rev = SATA_TRAN_HBA_REV_2;
2394	stran.sata_tran_hba_dip = nvc->nvc_dip;
2395	stran.sata_tran_hba_dma_attr = &buffer_dma_attr;
2396	stran.sata_tran_hba_num_cports = NV_NUM_CPORTS;
2397	stran.sata_tran_hba_features_support =
2398	    SATA_CTLF_HOTPLUG | SATA_CTLF_ASN | SATA_CTLF_ATAPI;
2399	stran.sata_tran_hba_qdepth = NV_QUEUE_SLOTS;
2400	stran.sata_tran_probe_port = nv_sata_probe;
2401	stran.sata_tran_start = nv_sata_start;
2402	stran.sata_tran_abort = nv_sata_abort;
2403	stran.sata_tran_reset_dport = nv_sata_reset;
2404	stran.sata_tran_selftest = NULL;
2405	stran.sata_tran_hotplug_ops = &nv_hotplug_ops;
2406	stran.sata_tran_pwrmgt_ops = NULL;
2407	stran.sata_tran_ioctl = NULL;
2408	nvc->nvc_sata_hba_tran = stran;
2409
2410	nvc->nvc_port = kmem_zalloc(sizeof (nv_port_t) * NV_MAX_PORTS(nvc),
2411	    KM_SLEEP);
2412
2413	/*
2414	 * initialize registers common to all chipsets
2415	 */
2416	nv_common_reg_init(nvc);
2417
2418	for (j = 0; j < NV_MAX_PORTS(nvc); j++) {
2419		nvp = &(nvc->nvc_port[j]);
2420
2421		cmd_addr = nvp->nvp_cmd_addr;
2422		ctl_addr = nvp->nvp_ctl_addr;
2423		bm_addr = nvp->nvp_bm_addr;
2424
2425		mutex_init(&nvp->nvp_mutex, NULL, MUTEX_DRIVER,
2426		    DDI_INTR_PRI(nvc->nvc_intr_pri));
2427
2428		cv_init(&nvp->nvp_poll_cv, NULL, CV_DRIVER, NULL);
2429
2430		nvp->nvp_data	= cmd_addr + NV_DATA;
2431		nvp->nvp_error	= cmd_addr + NV_ERROR;
2432		nvp->nvp_feature = cmd_addr + NV_FEATURE;
2433		nvp->nvp_count	= cmd_addr + NV_COUNT;
2434		nvp->nvp_sect	= cmd_addr + NV_SECT;
2435		nvp->nvp_lcyl	= cmd_addr + NV_LCYL;
2436		nvp->nvp_hcyl	= cmd_addr + NV_HCYL;
2437		nvp->nvp_drvhd	= cmd_addr + NV_DRVHD;
2438		nvp->nvp_status	= cmd_addr + NV_STATUS;
2439		nvp->nvp_cmd	= cmd_addr + NV_CMD;
2440		nvp->nvp_altstatus = ctl_addr + NV_ALTSTATUS;
2441		nvp->nvp_devctl	= ctl_addr + NV_DEVCTL;
2442
2443		nvp->nvp_bmicx	= bm_addr + BMICX_REG;
2444		nvp->nvp_bmisx	= bm_addr + BMISX_REG;
2445		nvp->nvp_bmidtpx = (uint32_t *)(bm_addr + BMIDTPX_REG);
2446
2447		nvp->nvp_state = 0;
2448	}
2449
2450	/*
2451	 * initialize register by calling chip specific reg initialization
2452	 */
2453	(*(nvc->nvc_reg_init))(nvc, pci_conf_handle);
2454
2455	return (NV_SUCCESS);
2456}
2457
2458
2459/*
2460 * Initialize data structures with enough slots to handle queuing, if
2461 * enabled.  NV_QUEUE_SLOTS will be set to 1 or 32, depending on whether
2462 * NCQ support is built into the driver and enabled.  It might have been
2463 * better to derive the true size from the drive itself, but the sata
2464 * module only sends down that information on the first NCQ command,
2465 * which means possibly re-sizing the structures on an interrupt stack,
2466 * making error handling more messy.  The easy way is to just allocate
2467 * all 32 slots, which is what most drives support anyway.
2468 */
2469static int
2470nv_init_port(nv_port_t *nvp)
2471{
2472	nv_ctl_t *nvc = nvp->nvp_ctlp;
2473	size_t	prd_size = sizeof (prde_t) * NV_DMA_NSEGS;
2474	dev_info_t *dip = nvc->nvc_dip;
2475	ddi_device_acc_attr_t dev_attr;
2476	size_t buf_size;
2477	ddi_dma_cookie_t cookie;
2478	uint_t count;
2479	int rc, i;
2480
2481	dev_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
2482	dev_attr.devacc_attr_endian_flags = DDI_NEVERSWAP_ACC;
2483	dev_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
2484
2485	if (nvp->nvp_state & NV_PORT_INIT) {
2486		NVLOG((NVDBG_INIT, nvc, nvp,
2487		    "nv_init_port previously initialized"));
2488
2489		return (NV_SUCCESS);
2490	} else {
2491		NVLOG((NVDBG_INIT, nvc, nvp, "nv_init_port initializing"));
2492	}
2493
2494	nvp->nvp_sg_dma_hdl = kmem_zalloc(sizeof (ddi_dma_handle_t) *
2495	    NV_QUEUE_SLOTS, KM_SLEEP);
2496
2497	nvp->nvp_sg_acc_hdl = kmem_zalloc(sizeof (ddi_acc_handle_t) *
2498	    NV_QUEUE_SLOTS, KM_SLEEP);
2499
2500	nvp->nvp_sg_addr = kmem_zalloc(sizeof (caddr_t) *
2501	    NV_QUEUE_SLOTS, KM_SLEEP);
2502
2503	nvp->nvp_sg_paddr = kmem_zalloc(sizeof (uint32_t) *
2504	    NV_QUEUE_SLOTS, KM_SLEEP);
2505
2506	nvp->nvp_slot = kmem_zalloc(sizeof (nv_slot_t) * NV_QUEUE_SLOTS,
2507	    KM_SLEEP);
2508
2509	for (i = 0; i < NV_QUEUE_SLOTS; i++) {
2510
2511		rc = ddi_dma_alloc_handle(dip, &nv_prd_dma_attr,
2512		    DDI_DMA_SLEEP, NULL, &(nvp->nvp_sg_dma_hdl[i]));
2513
2514		if (rc != DDI_SUCCESS) {
2515			nv_uninit_port(nvp);
2516
2517			return (NV_FAILURE);
2518		}
2519
2520		rc = ddi_dma_mem_alloc(nvp->nvp_sg_dma_hdl[i], prd_size,
2521		    &dev_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
2522		    NULL, &(nvp->nvp_sg_addr[i]), &buf_size,
2523		    &(nvp->nvp_sg_acc_hdl[i]));
2524
2525		if (rc != DDI_SUCCESS) {
2526			nv_uninit_port(nvp);
2527
2528			return (NV_FAILURE);
2529		}
2530
2531		rc = ddi_dma_addr_bind_handle(nvp->nvp_sg_dma_hdl[i], NULL,
2532		    nvp->nvp_sg_addr[i], buf_size,
2533		    DDI_DMA_WRITE | DDI_DMA_CONSISTENT,
2534		    DDI_DMA_SLEEP, NULL, &cookie, &count);
2535
2536		if (rc != DDI_DMA_MAPPED) {
2537			nv_uninit_port(nvp);
2538
2539			return (NV_FAILURE);
2540		}
2541
2542		ASSERT(count == 1);
2543		ASSERT((cookie.dmac_address & (sizeof (int) - 1)) == 0);
2544
2545		ASSERT(cookie.dmac_laddress <= UINT32_MAX);
2546
2547		nvp->nvp_sg_paddr[i] = cookie.dmac_address;
2548	}
2549
2550	/*
2551	 * nvp_queue_depth represents the actual drive queue depth, not the
2552	 * number of slots allocated in the structures (which may be more).
2553	 * Actual queue depth is only learned after the first NCQ command, so
2554	 * initialize it to 1 for now.
2555	 */
2556	nvp->nvp_queue_depth = 1;
2557
2558	nvp->nvp_state |= NV_PORT_INIT;
2559
2560	return (NV_SUCCESS);
2561}
2562
2563
2564/*
2565 * Free dynamically allocated structures for port.
2566 */
2567static void
2568nv_uninit_port(nv_port_t *nvp)
2569{
2570	int i;
2571
2572	/*
2573	 * It is possible to reach here before a port has been initialized or
2574	 * after it has already been uninitialized.  Just return in that case.
2575	 */
2576	if (nvp->nvp_slot == NULL) {
2577
2578		return;
2579	}
2580
2581	NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp,
2582	    "nv_uninit_port uninitializing"));
2583
2584	nvp->nvp_type = SATA_DTYPE_NONE;
2585
2586	for (i = 0; i < NV_QUEUE_SLOTS; i++) {
2587		if (nvp->nvp_sg_paddr[i]) {
2588			(void) ddi_dma_unbind_handle(nvp->nvp_sg_dma_hdl[i]);
2589		}
2590
2591		if (nvp->nvp_sg_acc_hdl[i] != NULL) {
2592			ddi_dma_mem_free(&(nvp->nvp_sg_acc_hdl[i]));
2593		}
2594
2595		if (nvp->nvp_sg_dma_hdl[i] != NULL) {
2596			ddi_dma_free_handle(&(nvp->nvp_sg_dma_hdl[i]));
2597		}
2598	}
2599
2600	kmem_free(nvp->nvp_slot, sizeof (nv_slot_t) * NV_QUEUE_SLOTS);
2601	nvp->nvp_slot = NULL;
2602
2603	kmem_free(nvp->nvp_sg_dma_hdl,
2604	    sizeof (ddi_dma_handle_t) * NV_QUEUE_SLOTS);
2605	nvp->nvp_sg_dma_hdl = NULL;
2606
2607	kmem_free(nvp->nvp_sg_acc_hdl,
2608	    sizeof (ddi_acc_handle_t) * NV_QUEUE_SLOTS);
2609	nvp->nvp_sg_acc_hdl = NULL;
2610
2611	kmem_free(nvp->nvp_sg_addr, sizeof (caddr_t) * NV_QUEUE_SLOTS);
2612	nvp->nvp_sg_addr = NULL;
2613
2614	kmem_free(nvp->nvp_sg_paddr, sizeof (uint32_t) * NV_QUEUE_SLOTS);
2615	nvp->nvp_sg_paddr = NULL;
2616
2617	nvp->nvp_state &= ~NV_PORT_INIT;
2618	nvp->nvp_signature = 0;
2619}
2620
2621
2622/*
2623 * Cache register offsets and access handles to frequently accessed registers
2624 * which are common to either chipset.
2625 */
2626static void
2627nv_common_reg_init(nv_ctl_t *nvc)
2628{
2629	uchar_t *bar5_addr = nvc->nvc_bar_addr[5];
2630	uchar_t *bm_addr_offset, *sreg_offset;
2631	uint8_t bar, port;
2632	nv_port_t *nvp;
2633
2634	for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
2635		if (port == 0) {
2636			bar = NV_BAR_0;
2637			bm_addr_offset = 0;
2638			sreg_offset = (uchar_t *)(CH0_SREG_OFFSET + bar5_addr);
2639		} else {
2640			bar = NV_BAR_2;
2641			bm_addr_offset = (uchar_t *)8;
2642			sreg_offset = (uchar_t *)(CH1_SREG_OFFSET + bar5_addr);
2643		}
2644
2645		nvp = &(nvc->nvc_port[port]);
2646		nvp->nvp_ctlp = nvc;
2647		nvp->nvp_port_num = port;
2648		NVLOG((NVDBG_INIT, nvc, nvp, "setting up port mappings"));
2649
2650		nvp->nvp_cmd_hdl = nvc->nvc_bar_hdl[bar];
2651		nvp->nvp_cmd_addr = nvc->nvc_bar_addr[bar];
2652		nvp->nvp_ctl_hdl = nvc->nvc_bar_hdl[bar + 1];
2653		nvp->nvp_ctl_addr = nvc->nvc_bar_addr[bar + 1];
2654		nvp->nvp_bm_hdl = nvc->nvc_bar_hdl[NV_BAR_4];
2655		nvp->nvp_bm_addr = nvc->nvc_bar_addr[NV_BAR_4] +
2656		    (long)bm_addr_offset;
2657
2658		nvp->nvp_sstatus = (uint32_t *)(sreg_offset + NV_SSTATUS);
2659		nvp->nvp_serror = (uint32_t *)(sreg_offset + NV_SERROR);
2660		nvp->nvp_sactive = (uint32_t *)(sreg_offset + NV_SACTIVE);
2661		nvp->nvp_sctrl = (uint32_t *)(sreg_offset + NV_SCTRL);
2662	}
2663}
2664
2665
2666static void
2667nv_uninit_ctl(nv_ctl_t *nvc)
2668{
2669	int port;
2670	nv_port_t *nvp;
2671
2672	NVLOG((NVDBG_INIT, nvc, NULL, "nv_uninit_ctl entered"));
2673
2674	for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
2675		nvp = &(nvc->nvc_port[port]);
2676		mutex_enter(&nvp->nvp_mutex);
2677		NVLOG((NVDBG_INIT, nvc, nvp, "uninitializing port"));
2678		nv_uninit_port(nvp);
2679		mutex_exit(&nvp->nvp_mutex);
2680		mutex_destroy(&nvp->nvp_mutex);
2681		cv_destroy(&nvp->nvp_poll_cv);
2682	}
2683
2684	kmem_free(nvc->nvc_port, NV_MAX_PORTS(nvc) * sizeof (nv_port_t));
2685	nvc->nvc_port = NULL;
2686}
2687
2688
2689/*
2690 * ck804 interrupt.  This is a wrapper around ck804_intr_process so
2691 * that interrupts from other devices can be disregarded while dtracing.
2692 */
2693/* ARGSUSED */
2694static uint_t
2695ck804_intr(caddr_t arg1, caddr_t arg2)
2696{
2697	nv_ctl_t *nvc = (nv_ctl_t *)arg1;
2698	uint8_t intr_status;
2699	ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5];
2700
2701	intr_status = ddi_get8(bar5_hdl, nvc->nvc_ck804_int_status);
2702
2703	if (intr_status == 0) {
2704
2705		return (DDI_INTR_UNCLAIMED);
2706	}
2707
2708	ck804_intr_process(nvc, intr_status);
2709
2710	return (DDI_INTR_CLAIMED);
2711}
2712
2713
2714/*
2715 * Main interrupt handler for ck804.  handles normal device
2716 * interrupts as well as port hot plug and remove interrupts.
2717 *
2718 */
2719static void
2720ck804_intr_process(nv_ctl_t *nvc, uint8_t intr_status)
2721{
2722
2723	int port, i;
2724	nv_port_t *nvp;
2725	nv_slot_t *nv_slotp;
2726	uchar_t	status;
2727	sata_pkt_t *spkt;
2728	uint8_t bmstatus, clear_bits;
2729	ddi_acc_handle_t bmhdl;
2730	int nvcleared = 0;
2731	ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5];
2732	uint32_t sstatus;
2733	int port_mask_hot[] = {
2734		CK804_INT_PDEV_HOT, CK804_INT_SDEV_HOT,
2735	};
2736	int port_mask_pm[] = {
2737		CK804_INT_PDEV_PM, CK804_INT_SDEV_PM,
2738	};
2739
2740	NVLOG((NVDBG_INTR, nvc, NULL,
2741	    "ck804_intr_process entered intr_status=%x", intr_status));
2742
2743	/*
2744	 * For command completion interrupt, explicit clear is not required.
2745	 * however, for the error cases explicit clear is performed.
2746	 */
2747	for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
2748
2749		int port_mask[] = {CK804_INT_PDEV_INT, CK804_INT_SDEV_INT};
2750
2751		if ((port_mask[port] & intr_status) == 0) {
2752			continue;
2753		}
2754
2755		NVLOG((NVDBG_INTR, nvc, NULL,
2756		    "ck804_intr_process interrupt on port %d", port));
2757
2758		nvp = &(nvc->nvc_port[port]);
2759
2760		mutex_enter(&nvp->nvp_mutex);
2761
2762		/*
2763		 * there was a corner case found where an interrupt
2764		 * arrived before nvp_slot was set.  Should
2765		 * probably should track down why that happens and try
2766		 * to eliminate that source and then get rid of this
2767		 * check.
2768		 */
2769		if (nvp->nvp_slot == NULL) {
2770			status = nv_get8(nvp->nvp_ctl_hdl, nvp->nvp_status);
2771			NVLOG((NVDBG_ALWAYS, nvc, nvp, "spurious interrupt "
2772			    "received before initialization "
2773			    "completed status=%x", status));
2774			mutex_exit(&nvp->nvp_mutex);
2775
2776			/*
2777			 * clear interrupt bits
2778			 */
2779			nv_put8(bar5_hdl, nvc->nvc_ck804_int_status,
2780			    port_mask[port]);
2781
2782			continue;
2783		}
2784
2785		if ((&(nvp->nvp_slot[0]))->nvslot_spkt == NULL)  {
2786			status = nv_get8(nvp->nvp_ctl_hdl, nvp->nvp_status);
2787			NVLOG((NVDBG_ALWAYS, nvc, nvp, "spurious interrupt "
2788			    " no command in progress status=%x", status));
2789			mutex_exit(&nvp->nvp_mutex);
2790
2791			/*
2792			 * clear interrupt bits
2793			 */
2794			nv_put8(bar5_hdl, nvc->nvc_ck804_int_status,
2795			    port_mask[port]);
2796
2797			continue;
2798		}
2799
2800		bmhdl = nvp->nvp_bm_hdl;
2801		bmstatus = nv_get8(bmhdl, nvp->nvp_bmisx);
2802
2803		if (!(bmstatus & BMISX_IDEINTS)) {
2804			mutex_exit(&nvp->nvp_mutex);
2805
2806			continue;
2807		}
2808
2809		status = nv_get8(nvp->nvp_ctl_hdl, nvp->nvp_altstatus);
2810
2811		if (status & SATA_STATUS_BSY) {
2812			mutex_exit(&nvp->nvp_mutex);
2813
2814			continue;
2815		}
2816
2817		nv_slotp = &(nvp->nvp_slot[0]);
2818
2819		ASSERT(nv_slotp);
2820
2821		spkt = nv_slotp->nvslot_spkt;
2822
2823		if (spkt == NULL) {
2824			mutex_exit(&nvp->nvp_mutex);
2825
2826			continue;
2827		}
2828
2829		(*nv_slotp->nvslot_intr)(nvp, nv_slotp);
2830
2831		nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
2832
2833		/*
2834		 * If there is no link cannot be certain about the completion
2835		 * of the packet, so abort it.
2836		 */
2837		if (nv_check_link((&spkt->satapkt_device)->
2838		    satadev_scr.sstatus) == B_FALSE) {
2839
2840			(void) nv_abort_active(nvp, NULL, SATA_PKT_PORT_ERROR);
2841
2842		} else if (nv_slotp->nvslot_flags == NVSLOT_COMPLETE) {
2843
2844			nv_complete_io(nvp, spkt, 0);
2845		}
2846
2847		mutex_exit(&nvp->nvp_mutex);
2848	}
2849
2850	/*
2851	 * ck804 often doesn't correctly distinguish hot add/remove
2852	 * interrupts.  Frequently both the ADD and the REMOVE bits
2853	 * are asserted, whether it was a remove or add.  Use sstatus
2854	 * to distinguish hot add from hot remove.
2855	 */
2856
2857	for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
2858		clear_bits = 0;
2859
2860		nvp = &(nvc->nvc_port[port]);
2861		mutex_enter(&nvp->nvp_mutex);
2862
2863		if ((port_mask_pm[port] & intr_status) != 0) {
2864			clear_bits = port_mask_pm[port];
2865			NVLOG((NVDBG_HOT, nvc, nvp,
2866			    "clearing PM interrupt bit: %x",
2867			    intr_status & port_mask_pm[port]));
2868		}
2869
2870		if ((port_mask_hot[port] & intr_status) == 0) {
2871			if (clear_bits != 0) {
2872				goto clear;
2873			} else {
2874				mutex_exit(&nvp->nvp_mutex);
2875				continue;
2876			}
2877		}
2878
2879		/*
2880		 * reaching here means there was a hot add or remove.
2881		 */
2882		clear_bits |= port_mask_hot[port];
2883
2884		ASSERT(nvc->nvc_port[port].nvp_sstatus);
2885
2886		sstatus = nv_get32(bar5_hdl,
2887		    nvc->nvc_port[port].nvp_sstatus);
2888
2889		if ((sstatus & SSTATUS_DET_DEVPRE_PHYCOM) ==
2890		    SSTATUS_DET_DEVPRE_PHYCOM) {
2891			nv_report_add_remove(nvp, 0);
2892		} else {
2893			nv_report_add_remove(nvp, NV_PORT_HOTREMOVED);
2894		}
2895	clear:
2896		/*
2897		 * clear interrupt bits.  explicit interrupt clear is
2898		 * required for hotplug interrupts.
2899		 */
2900		nv_put8(bar5_hdl, nvc->nvc_ck804_int_status, clear_bits);
2901
2902		/*
2903		 * make sure it's flushed and cleared.  If not try
2904		 * again.  Sometimes it has been observed to not clear
2905		 * on the first try.
2906		 */
2907		intr_status = nv_get8(bar5_hdl, nvc->nvc_ck804_int_status);
2908
2909		/*
2910		 * make 10 additional attempts to clear the interrupt
2911		 */
2912		for (i = 0; (intr_status & clear_bits) && (i < 10); i++) {
2913			NVLOG((NVDBG_ALWAYS, nvc, nvp, "inst_status=%x "
2914			    "still not clear try=%d", intr_status,
2915			    ++nvcleared));
2916			nv_put8(bar5_hdl, nvc->nvc_ck804_int_status,
2917			    clear_bits);
2918			intr_status = nv_get8(bar5_hdl,
2919			    nvc->nvc_ck804_int_status);
2920		}
2921
2922		/*
2923		 * if still not clear, log a message and disable the
2924		 * port. highly unlikely that this path is taken, but it
2925		 * gives protection against a wedged interrupt.
2926		 */
2927		if (intr_status & clear_bits) {
2928			(*(nvc->nvc_set_intr))(nvp, NV_INTR_DISABLE);
2929			nv_port_state_change(nvp, SATA_EVNT_PORT_FAILED,
2930			    SATA_ADDR_CPORT, SATA_PSTATE_FAILED);
2931			nvp->nvp_state |= NV_PORT_FAILED;
2932			(void) nv_abort_active(nvp, NULL, SATA_PKT_DEV_ERROR);
2933			nv_cmn_err(CE_WARN, nvc, nvp, "unable to clear "
2934			    "interrupt.  disabling port intr_status=%X",
2935			    intr_status);
2936		}
2937
2938		mutex_exit(&nvp->nvp_mutex);
2939	}
2940}
2941
2942
2943/*
2944 * Interrupt handler for mcp5x.  It is invoked by the wrapper for each port
2945 * on the controller, to handle completion and hot plug and remove events.
2946 *
2947 */
2948static uint_t
2949mcp5x_intr_port(nv_port_t *nvp)
2950{
2951	nv_ctl_t *nvc = nvp->nvp_ctlp;
2952	ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5];
2953	uint8_t clear = 0, intr_cycles = 0;
2954	int ret = DDI_INTR_UNCLAIMED;
2955	uint16_t int_status;
2956
2957	NVLOG((NVDBG_INTR, nvc, nvp, "mcp5x_intr_port entered"));
2958
2959	for (;;) {
2960		/*
2961		 * read current interrupt status
2962		 */
2963		int_status = nv_get16(bar5_hdl, nvp->nvp_mcp5x_int_status);
2964
2965		NVLOG((NVDBG_INTR, nvc, nvp, "int_status = %x", int_status));
2966
2967		/*
2968		 * MCP5X_INT_IGNORE interrupts will show up in the status,
2969		 * but are masked out from causing an interrupt to be generated
2970		 * to the processor.  Ignore them here by masking them out.
2971		 */
2972		int_status &= ~(MCP5X_INT_IGNORE);
2973
2974		/*
2975		 * exit the loop when no more interrupts to process
2976		 */
2977		if (int_status == 0) {
2978
2979			break;
2980		}
2981
2982		if (int_status & MCP5X_INT_COMPLETE) {
2983			NVLOG((NVDBG_INTR, nvc, nvp,
2984			    "mcp5x_packet_complete_intr"));
2985			/*
2986			 * since int_status was set, return DDI_INTR_CLAIMED
2987			 * from the DDI's perspective even though the packet
2988			 * completion may not have succeeded.  If it fails,
2989			 * need to manually clear the interrupt, otherwise
2990			 * clearing is implicit.
2991			 */
2992			ret = DDI_INTR_CLAIMED;
2993			if (mcp5x_packet_complete_intr(nvc, nvp) ==
2994			    NV_FAILURE) {
2995				clear = MCP5X_INT_COMPLETE;
2996			} else {
2997				intr_cycles = 0;
2998			}
2999		}
3000
3001		if (int_status & MCP5X_INT_DMA_SETUP) {
3002			NVLOG((NVDBG_INTR, nvc, nvp, "mcp5x_dma_setup_intr"));
3003
3004			/*
3005			 * Needs to be cleared before starting the BM, so do it
3006			 * now.  make sure this is still working.
3007			 */
3008			nv_put16(bar5_hdl, nvp->nvp_mcp5x_int_status,
3009			    MCP5X_INT_DMA_SETUP);
3010#ifdef NCQ
3011			ret = mcp5x_dma_setup_intr(nvc, nvp);
3012#endif
3013		}
3014
3015		if (int_status & MCP5X_INT_REM) {
3016			NVLOG((NVDBG_INTR, nvc, nvp, "mcp5x device removed"));
3017			clear = MCP5X_INT_REM;
3018			ret = DDI_INTR_CLAIMED;
3019
3020			mutex_enter(&nvp->nvp_mutex);
3021			nv_report_add_remove(nvp, NV_PORT_HOTREMOVED);
3022			mutex_exit(&nvp->nvp_mutex);
3023
3024		} else if (int_status & MCP5X_INT_ADD) {
3025			NVLOG((NVDBG_HOT, nvc, nvp, "mcp5x device added"));
3026			clear = MCP5X_INT_ADD;
3027			ret = DDI_INTR_CLAIMED;
3028
3029			mutex_enter(&nvp->nvp_mutex);
3030			nv_report_add_remove(nvp, 0);
3031			mutex_exit(&nvp->nvp_mutex);
3032		}
3033
3034		if (clear) {
3035			nv_put16(bar5_hdl, nvp->nvp_mcp5x_int_status, clear);
3036			clear = 0;
3037		}
3038
3039		if (intr_cycles++ == NV_MAX_INTR_LOOP) {
3040			nv_cmn_err(CE_WARN, nvc, nvp, "excessive interrupt "
3041			    "processing.  Disabling port int_status=%X"
3042			    " clear=%X", int_status, clear);
3043			mutex_enter(&nvp->nvp_mutex);
3044			(*(nvc->nvc_set_intr))(nvp, NV_INTR_DISABLE);
3045			nv_port_state_change(nvp, SATA_EVNT_PORT_FAILED,
3046			    SATA_ADDR_CPORT, SATA_PSTATE_FAILED);
3047			nvp->nvp_state |= NV_PORT_FAILED;
3048			(void) nv_abort_active(nvp, NULL, SATA_PKT_DEV_ERROR);
3049			mutex_exit(&nvp->nvp_mutex);
3050		}
3051	}
3052
3053	NVLOG((NVDBG_INTR, nvc, nvp, "mcp5x_intr_port: finished ret=%d", ret));
3054
3055	return (ret);
3056}
3057
3058
3059/* ARGSUSED */
3060static uint_t
3061mcp5x_intr(caddr_t arg1, caddr_t arg2)
3062{
3063	nv_ctl_t *nvc = (nv_ctl_t *)arg1;
3064	int ret;
3065
3066	ret = mcp5x_intr_port(&(nvc->nvc_port[0]));
3067	ret |= mcp5x_intr_port(&(nvc->nvc_port[1]));
3068
3069	return (ret);
3070}
3071
3072
3073#ifdef NCQ
3074/*
3075 * with software driven NCQ on mcp5x, an interrupt occurs right
3076 * before the drive is ready to do a DMA transfer.  At this point,
3077 * the PRD table needs to be programmed and the DMA engine enabled
3078 * and ready to go.
3079 *
3080 * -- MCP_SATA_AE_INT_STATUS_SDEV_DMA_SETUP indicates the interrupt
3081 * -- MCP_SATA_AE_NCQ_PDEV_DMA_SETUP_TAG shows which command is ready
3082 * -- clear bit 0 of master command reg
3083 * -- program PRD
3084 * -- clear the interrupt status bit for the DMA Setup FIS
3085 * -- set bit 0 of the bus master command register
3086 */
3087static int
3088mcp5x_dma_setup_intr(nv_ctl_t *nvc, nv_port_t *nvp)
3089{
3090	int slot;
3091	ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl;
3092	uint8_t bmicx;
3093	int port = nvp->nvp_port_num;
3094	uint8_t tag_shift[] = {MCP_SATA_AE_NCQ_PDEV_DMA_SETUP_TAG_SHIFT,
3095	    MCP_SATA_AE_NCQ_SDEV_DMA_SETUP_TAG_SHIFT};
3096
3097	nv_cmn_err(CE_PANIC, nvc, nvp,
3098	    "this is should not be executed at all until NCQ");
3099
3100	mutex_enter(&nvp->nvp_mutex);
3101
3102	slot = nv_get32(nvc->nvc_bar_hdl[5], nvc->nvc_mcp5x_ncq);
3103
3104	slot = (slot >> tag_shift[port]) & MCP_SATA_AE_NCQ_DMA_SETUP_TAG_MASK;
3105
3106	NVLOG((NVDBG_INTR, nvc, nvp, "mcp5x_dma_setup_intr slot %d"
3107	    " nvp_slot_sactive %X", slot, nvp->nvp_sactive_cache));
3108
3109	/*
3110	 * halt the DMA engine.  This step is necessary according to
3111	 * the mcp5x spec, probably since there may have been a "first" packet
3112	 * that already programmed the DMA engine, but may not turn out to
3113	 * be the first one processed.
3114	 */
3115	bmicx = nv_get8(bmhdl, nvp->nvp_bmicx);
3116
3117#if 0
3118	if (bmicx & BMICX_SSBM) {
3119		NVLOG((NVDBG_INTR, nvc, nvp, "BM was already enabled for "
3120		    "another packet.  Cancelling and reprogramming"));
3121		nv_put8(bmhdl, nvp->nvp_bmicx,  bmicx & ~BMICX_SSBM);
3122	}
3123#endif
3124	nv_put8(bmhdl, nvp->nvp_bmicx,  bmicx & ~BMICX_SSBM);
3125
3126	nv_start_dma_engine(nvp, slot);
3127
3128	mutex_exit(&nvp->nvp_mutex);
3129
3130	return (DDI_INTR_CLAIMED);
3131}
3132#endif /* NCQ */
3133
3134
3135/*
3136 * packet completion interrupt.  If the packet is complete, invoke
3137 * the packet completion callback.
3138 */
3139static int
3140mcp5x_packet_complete_intr(nv_ctl_t *nvc, nv_port_t *nvp)
3141{
3142	uint8_t status, bmstatus;
3143	ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl;
3144	int sactive;
3145	int active_pkt_bit = 0, active_pkt = 0, ncq_command = B_FALSE;
3146	sata_pkt_t *spkt;
3147	nv_slot_t *nv_slotp;
3148
3149	mutex_enter(&nvp->nvp_mutex);
3150
3151	bmstatus = nv_get8(bmhdl, nvp->nvp_bmisx);
3152
3153	if (!(bmstatus & BMISX_IDEINTS)) {
3154		NVLOG((NVDBG_INTR, nvc, nvp, "BMISX_IDEINTS not set"));
3155		mutex_exit(&nvp->nvp_mutex);
3156
3157		return (NV_FAILURE);
3158	}
3159
3160	/*
3161	 * If the just completed item is a non-ncq command, the busy
3162	 * bit should not be set
3163	 */
3164	if (nvp->nvp_non_ncq_run) {
3165		status = nv_get8(nvp->nvp_ctl_hdl, nvp->nvp_altstatus);
3166		if (status & SATA_STATUS_BSY) {
3167			nv_cmn_err(CE_WARN, nvc, nvp,
3168			    "unexpected SATA_STATUS_BSY set");
3169			mutex_exit(&nvp->nvp_mutex);
3170			/*
3171			 * calling function will clear interrupt.  then
3172			 * the real interrupt will either arrive or the
3173			 * packet timeout handling will take over and
3174			 * reset.
3175			 */
3176			return (NV_FAILURE);
3177		}
3178
3179	} else {
3180		/*
3181		 * NCQ check for BSY here and wait if still bsy before
3182		 * continuing. Rather than wait for it to be cleared
3183		 * when starting a packet and wasting CPU time, the starting
3184		 * thread can exit immediate, but might have to spin here
3185		 * for a bit possibly.  Needs more work and experimentation.
3186		 */
3187		ASSERT(nvp->nvp_ncq_run);
3188	}
3189
3190
3191	if (nvp->nvp_ncq_run) {
3192		ncq_command = B_TRUE;
3193		ASSERT(nvp->nvp_non_ncq_run == 0);
3194	} else {
3195		ASSERT(nvp->nvp_non_ncq_run != 0);
3196	}
3197
3198	/*
3199	 * active_pkt_bit will represent the bitmap of the single completed
3200	 * packet.  Because of the nature of sw assisted NCQ, only one
3201	 * command will complete per interrupt.
3202	 */
3203
3204	if (ncq_command == B_FALSE) {
3205		active_pkt = 0;
3206	} else {
3207		/*
3208		 * NCQ: determine which command just completed, by examining
3209		 * which bit cleared in the register since last written.
3210		 */
3211		sactive = nv_get32(nvc->nvc_bar_hdl[5], nvp->nvp_sactive);
3212
3213		active_pkt_bit = ~sactive & nvp->nvp_sactive_cache;
3214
3215		ASSERT(active_pkt_bit);
3216
3217
3218		/*
3219		 * this failure path needs more work to handle the
3220		 * error condition and recovery.
3221		 */
3222		if (active_pkt_bit == 0) {
3223			ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
3224
3225			nv_cmn_err(CE_CONT, nvc, nvp, "ERROR sactive = %X  "
3226			    "nvp->nvp_sactive %X", sactive,
3227			    nvp->nvp_sactive_cache);
3228
3229			(void) nv_get8(cmdhdl, nvp->nvp_status);
3230
3231			mutex_exit(&nvp->nvp_mutex);
3232
3233			return (NV_FAILURE);
3234		}
3235
3236		for (active_pkt = 0; (active_pkt_bit & 0x1) != 0x1;
3237		    active_pkt++, active_pkt_bit >>= 1) {
3238		}
3239
3240		/*
3241		 * make sure only one bit is ever turned on
3242		 */
3243		ASSERT(active_pkt_bit == 1);
3244
3245		nvp->nvp_sactive_cache &= ~(0x01 << active_pkt);
3246	}
3247
3248	nv_slotp = &(nvp->nvp_slot[active_pkt]);
3249
3250	spkt = nv_slotp->nvslot_spkt;
3251
3252	ASSERT(spkt != NULL);
3253
3254	(*nv_slotp->nvslot_intr)(nvp, nv_slotp);
3255
3256	nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
3257
3258	/*
3259	 * If there is no link cannot be certain about the completion
3260	 * of the packet, so abort it.
3261	 */
3262	if (nv_check_link((&spkt->satapkt_device)->
3263	    satadev_scr.sstatus) == B_FALSE) {
3264		(void) nv_abort_active(nvp, NULL, SATA_PKT_PORT_ERROR);
3265
3266	} else if (nv_slotp->nvslot_flags == NVSLOT_COMPLETE) {
3267
3268		nv_complete_io(nvp, spkt, active_pkt);
3269	}
3270
3271	mutex_exit(&nvp->nvp_mutex);
3272
3273	return (NV_SUCCESS);
3274}
3275
3276
3277static void
3278nv_complete_io(nv_port_t *nvp, sata_pkt_t *spkt, int slot)
3279{
3280
3281	ASSERT(MUTEX_HELD(&nvp->nvp_mutex));
3282
3283	if ((&(nvp->nvp_slot[slot]))->nvslot_flags & NVSLOT_NCQ) {
3284		nvp->nvp_ncq_run--;
3285	} else {
3286		nvp->nvp_non_ncq_run--;
3287	}
3288
3289	/*
3290	 * mark the packet slot idle so it can be reused.  Do this before
3291	 * calling satapkt_comp so the slot can be reused.
3292	 */
3293	(&(nvp->nvp_slot[slot]))->nvslot_spkt = NULL;
3294
3295	if (spkt->satapkt_op_mode & SATA_OPMODE_SYNCH) {
3296		/*
3297		 * If this is not timed polled mode cmd, which has an
3298		 * active thread monitoring for completion, then need
3299		 * to signal the sleeping thread that the cmd is complete.
3300		 */
3301		if ((spkt->satapkt_op_mode & SATA_OPMODE_POLLING) == 0) {
3302			cv_signal(&nvp->nvp_poll_cv);
3303		}
3304
3305		return;
3306	}
3307
3308	if (spkt->satapkt_comp != NULL) {
3309		mutex_exit(&nvp->nvp_mutex);
3310		(*spkt->satapkt_comp)(spkt);
3311		mutex_enter(&nvp->nvp_mutex);
3312	}
3313}
3314
3315
3316/*
3317 * check whether packet is ncq command or not.  for ncq command,
3318 * start it if there is still room on queue.  for non-ncq command only
3319 * start if no other command is running.
3320 */
3321static int
3322nv_start_async(nv_port_t *nvp, sata_pkt_t *spkt)
3323{
3324	uint8_t cmd, ncq;
3325
3326	NVLOG((NVDBG_ENTRY, nvp->nvp_ctlp, nvp, "nv_start_async: entry"));
3327
3328	cmd = spkt->satapkt_cmd.satacmd_cmd_reg;
3329
3330	ncq = ((cmd == SATAC_WRITE_FPDMA_QUEUED) ||
3331	    (cmd == SATAC_READ_FPDMA_QUEUED));
3332
3333	if (ncq == B_FALSE) {
3334
3335		if ((nvp->nvp_non_ncq_run == 1) ||
3336		    (nvp->nvp_ncq_run > 0)) {
3337			/*
3338			 * next command is non-ncq which can't run
3339			 * concurrently.  exit and return queue full.
3340			 */
3341			spkt->satapkt_reason = SATA_PKT_QUEUE_FULL;
3342
3343			return (SATA_TRAN_QUEUE_FULL);
3344		}
3345
3346		return (nv_start_common(nvp, spkt));
3347	}
3348
3349	/*
3350	 * ncq == B_TRUE
3351	 */
3352	if (nvp->nvp_non_ncq_run == 1) {
3353		/*
3354		 * cannot start any NCQ commands when there
3355		 * is a non-NCQ command running.
3356		 */
3357		spkt->satapkt_reason = SATA_PKT_QUEUE_FULL;
3358
3359		return (SATA_TRAN_QUEUE_FULL);
3360	}
3361
3362#ifdef NCQ
3363	/*
3364	 * this is not compiled for now as satapkt_device.satadev_qdepth
3365	 * is being pulled out until NCQ support is later addressed
3366	 *
3367	 * nvp_queue_depth is initialized by the first NCQ command
3368	 * received.
3369	 */
3370	if (nvp->nvp_queue_depth == 1) {
3371		nvp->nvp_queue_depth =
3372		    spkt->satapkt_device.satadev_qdepth;
3373
3374		ASSERT(nvp->nvp_queue_depth > 1);
3375
3376		NVLOG((NVDBG_ENTRY, nvp->nvp_ctlp, nvp,
3377		    "nv_process_queue: nvp_queue_depth set to %d",
3378		    nvp->nvp_queue_depth));
3379	}
3380#endif
3381
3382	if (nvp->nvp_ncq_run >= nvp->nvp_queue_depth) {
3383		/*
3384		 * max number of NCQ commands already active
3385		 */
3386		spkt->satapkt_reason = SATA_PKT_QUEUE_FULL;
3387
3388		return (SATA_TRAN_QUEUE_FULL);
3389	}
3390
3391	return (nv_start_common(nvp, spkt));
3392}
3393
3394
3395/*
3396 * configure INTx and legacy interrupts
3397 */
3398static int
3399nv_add_legacy_intrs(nv_ctl_t *nvc)
3400{
3401	dev_info_t	*devinfo = nvc->nvc_dip;
3402	int		actual, count = 0;
3403	int		x, y, rc, inum = 0;
3404
3405	NVLOG((NVDBG_ENTRY, nvc, NULL, "nv_add_legacy_intrs"));
3406
3407	/*
3408	 * get number of interrupts
3409	 */
3410	rc = ddi_intr_get_nintrs(devinfo, DDI_INTR_TYPE_FIXED, &count);
3411	if ((rc != DDI_SUCCESS) || (count == 0)) {
3412		NVLOG((NVDBG_INTR, nvc, NULL,
3413		    "ddi_intr_get_nintrs() failed, "
3414		    "rc %d count %d", rc, count));
3415
3416		return (DDI_FAILURE);
3417	}
3418
3419	/*
3420	 * allocate an array of interrupt handles
3421	 */
3422	nvc->nvc_intr_size = count * sizeof (ddi_intr_handle_t);
3423	nvc->nvc_htable = kmem_zalloc(nvc->nvc_intr_size, KM_SLEEP);
3424
3425	/*
3426	 * call ddi_intr_alloc()
3427	 */
3428	rc = ddi_intr_alloc(devinfo, nvc->nvc_htable, DDI_INTR_TYPE_FIXED,
3429	    inum, count, &actual, DDI_INTR_ALLOC_STRICT);
3430
3431	if ((rc != DDI_SUCCESS) || (actual == 0)) {
3432		nv_cmn_err(CE_WARN, nvc, NULL,
3433		    "ddi_intr_alloc() failed, rc %d", rc);
3434		kmem_free(nvc->nvc_htable, nvc->nvc_intr_size);
3435
3436		return (DDI_FAILURE);
3437	}
3438
3439	if (actual < count) {
3440		nv_cmn_err(CE_WARN, nvc, NULL,
3441		    "ddi_intr_alloc: requested: %d, received: %d",
3442		    count, actual);
3443
3444		goto failure;
3445	}
3446
3447	nvc->nvc_intr_cnt = actual;
3448
3449	/*
3450	 * get intr priority
3451	 */
3452	if (ddi_intr_get_pri(nvc->nvc_htable[0], &nvc->nvc_intr_pri) !=
3453	    DDI_SUCCESS) {
3454		nv_cmn_err(CE_WARN, nvc, NULL, "ddi_intr_get_pri() failed");
3455
3456		goto failure;
3457	}
3458
3459	/*
3460	 * Test for high level mutex
3461	 */
3462	if (nvc->nvc_intr_pri >= ddi_intr_get_hilevel_pri()) {
3463		nv_cmn_err(CE_WARN, nvc, NULL,
3464		    "nv_add_legacy_intrs: high level intr not supported");
3465
3466		goto failure;
3467	}
3468
3469	for (x = 0; x < actual; x++) {
3470		if (ddi_intr_add_handler(nvc->nvc_htable[x],
3471		    nvc->nvc_interrupt, (caddr_t)nvc, NULL) != DDI_SUCCESS) {
3472			nv_cmn_err(CE_WARN, nvc, NULL,
3473			    "ddi_intr_add_handler() failed");
3474
3475			goto failure;
3476		}
3477	}
3478
3479	/*
3480	 * call ddi_intr_enable() for legacy interrupts
3481	 */
3482	for (x = 0; x < nvc->nvc_intr_cnt; x++) {
3483		(void) ddi_intr_enable(nvc->nvc_htable[x]);
3484	}
3485
3486	return (DDI_SUCCESS);
3487
3488	failure:
3489	/*
3490	 * free allocated intr and nvc_htable
3491	 */
3492	for (y = 0; y < actual; y++) {
3493		(void) ddi_intr_free(nvc->nvc_htable[y]);
3494	}
3495
3496	kmem_free(nvc->nvc_htable, nvc->nvc_intr_size);
3497
3498	return (DDI_FAILURE);
3499}
3500
3501#ifdef	NV_MSI_SUPPORTED
3502/*
3503 * configure MSI interrupts
3504 */
3505static int
3506nv_add_msi_intrs(nv_ctl_t *nvc)
3507{
3508	dev_info_t	*devinfo = nvc->nvc_dip;
3509	int		count, avail, actual;
3510	int		x, y, rc, inum = 0;
3511
3512	NVLOG((NVDBG_ENTRY, nvc, NULL, "nv_add_msi_intrs"));
3513
3514	/*
3515	 * get number of interrupts
3516	 */
3517	rc = ddi_intr_get_nintrs(devinfo, DDI_INTR_TYPE_MSI, &count);
3518	if ((rc != DDI_SUCCESS) || (count == 0)) {
3519		nv_cmn_err(CE_WARN, nvc, NULL,
3520		    "ddi_intr_get_nintrs() failed, "
3521		    "rc %d count %d", rc, count);
3522
3523		return (DDI_FAILURE);
3524	}
3525
3526	/*
3527	 * get number of available interrupts
3528	 */
3529	rc = ddi_intr_get_navail(devinfo, DDI_INTR_TYPE_MSI, &avail);
3530	if ((rc != DDI_SUCCESS) || (avail == 0)) {
3531		nv_cmn_err(CE_WARN, nvc, NULL,
3532		    "ddi_intr_get_navail() failed, "
3533		    "rc %d avail %d", rc, avail);
3534
3535		return (DDI_FAILURE);
3536	}
3537
3538	if (avail < count) {
3539		nv_cmn_err(CE_WARN, nvc, NULL,
3540		    "ddi_intr_get_nvail returned %d ddi_intr_get_nintrs: %d",
3541		    avail, count);
3542	}
3543
3544	/*
3545	 * allocate an array of interrupt handles
3546	 */
3547	nvc->nvc_intr_size = count * sizeof (ddi_intr_handle_t);
3548	nvc->nvc_htable = kmem_alloc(nvc->nvc_intr_size, KM_SLEEP);
3549
3550	rc = ddi_intr_alloc(devinfo, nvc->nvc_htable, DDI_INTR_TYPE_MSI,
3551	    inum, count, &actual, DDI_INTR_ALLOC_NORMAL);
3552
3553	if ((rc != DDI_SUCCESS) || (actual == 0)) {
3554		nv_cmn_err(CE_WARN, nvc, NULL,
3555		    "ddi_intr_alloc() failed, rc %d", rc);
3556		kmem_free(nvc->nvc_htable, nvc->nvc_intr_size);
3557
3558		return (DDI_FAILURE);
3559	}
3560
3561	/*
3562	 * Use interrupt count returned or abort?
3563	 */
3564	if (actual < count) {
3565		NVLOG((NVDBG_INIT, nvc, NULL,
3566		    "Requested: %d, Received: %d", count, actual));
3567	}
3568
3569	nvc->nvc_intr_cnt = actual;
3570
3571	/*
3572	 * get priority for first msi, assume remaining are all the same
3573	 */
3574	if (ddi_intr_get_pri(nvc->nvc_htable[0], &nvc->nvc_intr_pri) !=
3575	    DDI_SUCCESS) {
3576		nv_cmn_err(CE_WARN, nvc, NULL, "ddi_intr_get_pri() failed");
3577
3578		goto failure;
3579	}
3580
3581	/*
3582	 * test for high level mutex
3583	 */
3584	if (nvc->nvc_intr_pri >= ddi_intr_get_hilevel_pri()) {
3585		nv_cmn_err(CE_WARN, nvc, NULL,
3586		    "nv_add_msi_intrs: high level intr not supported");
3587
3588		goto failure;
3589	}
3590
3591	/*
3592	 * Call ddi_intr_add_handler()
3593	 */
3594	for (x = 0; x < actual; x++) {
3595		if (ddi_intr_add_handler(nvc->nvc_htable[x],
3596		    nvc->nvc_interrupt, (caddr_t)nvc, NULL) != DDI_SUCCESS) {
3597			nv_cmn_err(CE_WARN, nvc, NULL,
3598			    "ddi_intr_add_handler() failed");
3599
3600			goto failure;
3601		}
3602	}
3603
3604	(void) ddi_intr_get_cap(nvc->nvc_htable[0], &nvc->nvc_intr_cap);
3605
3606	if (nvc->nvc_intr_cap & DDI_INTR_FLAG_BLOCK) {
3607		(void) ddi_intr_block_enable(nvc->nvc_htable,
3608		    nvc->nvc_intr_cnt);
3609	} else {
3610		/*
3611		 * Call ddi_intr_enable() for MSI non block enable
3612		 */
3613		for (x = 0; x < nvc->nvc_intr_cnt; x++) {
3614			(void) ddi_intr_enable(nvc->nvc_htable[x]);
3615		}
3616	}
3617
3618	return (DDI_SUCCESS);
3619
3620	failure:
3621	/*
3622	 * free allocated intr and nvc_htable
3623	 */
3624	for (y = 0; y < actual; y++) {
3625		(void) ddi_intr_free(nvc->nvc_htable[y]);
3626	}
3627
3628	kmem_free(nvc->nvc_htable, nvc->nvc_intr_size);
3629
3630	return (DDI_FAILURE);
3631}
3632#endif
3633
3634
3635static void
3636nv_rem_intrs(nv_ctl_t *nvc)
3637{
3638	int x, i;
3639	nv_port_t *nvp;
3640
3641	NVLOG((NVDBG_ENTRY, nvc, NULL, "nv_rem_intrs"));
3642
3643	/*
3644	 * prevent controller from generating interrupts by
3645	 * masking them out.  This is an extra precaution.
3646	 */
3647	for (i = 0; i < NV_MAX_PORTS(nvc); i++) {
3648		nvp = (&nvc->nvc_port[i]);
3649		mutex_enter(&nvp->nvp_mutex);
3650		(*(nvc->nvc_set_intr))(nvp, NV_INTR_DISABLE);
3651		mutex_exit(&nvp->nvp_mutex);
3652	}
3653
3654	/*
3655	 * disable all interrupts
3656	 */
3657	if ((nvc->nvc_intr_type == DDI_INTR_TYPE_MSI) &&
3658	    (nvc->nvc_intr_cap & DDI_INTR_FLAG_BLOCK)) {
3659		(void) ddi_intr_block_disable(nvc->nvc_htable,
3660		    nvc->nvc_intr_cnt);
3661	} else {
3662		for (x = 0; x < nvc->nvc_intr_cnt; x++) {
3663			(void) ddi_intr_disable(nvc->nvc_htable[x]);
3664		}
3665	}
3666
3667	for (x = 0; x < nvc->nvc_intr_cnt; x++) {
3668		(void) ddi_intr_remove_handler(nvc->nvc_htable[x]);
3669		(void) ddi_intr_free(nvc->nvc_htable[x]);
3670	}
3671
3672	kmem_free(nvc->nvc_htable, nvc->nvc_intr_size);
3673}
3674
3675
3676/*
3677 * variable argument wrapper for cmn_err.  prefixes the instance and port
3678 * number if possible
3679 */
3680static void
3681nv_vcmn_err(int ce, nv_ctl_t *nvc, nv_port_t *nvp, char *fmt, va_list ap)
3682{
3683	char port[NV_STRING_10];
3684	char inst[NV_STRING_10];
3685
3686	mutex_enter(&nv_log_mutex);
3687
3688	if (nvc) {
3689		(void) snprintf(inst, NV_STRING_10, "inst %d",
3690		    ddi_get_instance(nvc->nvc_dip));
3691	} else {
3692		inst[0] = '\0';
3693	}
3694
3695	if (nvp) {
3696		(void) sprintf(port, " port %d", nvp->nvp_port_num);
3697	} else {
3698		port[0] = '\0';
3699	}
3700
3701	(void) sprintf(nv_log_buf, "nv_sata %s%s%s", inst, port,
3702	    (inst[0]|port[0] ? ": " :""));
3703
3704	(void) vsnprintf(&nv_log_buf[strlen(nv_log_buf)],
3705	    NV_STRING_512 - strlen(nv_log_buf), fmt, ap);
3706
3707	/*
3708	 * normally set to log to console but in some debug situations it
3709	 * may be useful to log only to a file.
3710	 */
3711	if (nv_log_to_console) {
3712		if (nv_prom_print) {
3713			prom_printf("%s\n", nv_log_buf);
3714		} else {
3715			cmn_err(ce, "%s", nv_log_buf);
3716		}
3717
3718
3719	} else {
3720		cmn_err(ce, "!%s", nv_log_buf);
3721	}
3722
3723	mutex_exit(&nv_log_mutex);
3724}
3725
3726
3727/*
3728 * wrapper for cmn_err
3729 */
3730static void
3731nv_cmn_err(int ce, nv_ctl_t *nvc, nv_port_t *nvp, char *fmt, ...)
3732{
3733	va_list ap;
3734
3735	va_start(ap, fmt);
3736	nv_vcmn_err(ce, nvc, nvp, fmt, ap);
3737	va_end(ap);
3738}
3739
3740
3741#if defined(DEBUG)
3742/*
3743 * prefixes the instance and port number if possible to the debug message
3744 */
3745static void
3746nv_log(uint_t flag, nv_ctl_t *nvc, nv_port_t *nvp, char *fmt, ...)
3747{
3748	va_list ap;
3749
3750	if ((nv_debug_flags & flag) == 0) {
3751		return;
3752	}
3753
3754	va_start(ap, fmt);
3755	nv_vcmn_err(CE_NOTE, nvc, nvp, fmt, ap);
3756	va_end(ap);
3757
3758	/*
3759	 * useful for some debugging situations
3760	 */
3761	if (nv_log_delay) {
3762		drv_usecwait(nv_log_delay);
3763	}
3764
3765}
3766#endif /* DEBUG */
3767
3768
3769/*
3770 * program registers which are common to all commands
3771 */
3772static void
3773nv_program_taskfile_regs(nv_port_t *nvp, int slot)
3774{
3775	nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
3776	sata_pkt_t *spkt;
3777	sata_cmd_t *satacmd;
3778	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
3779	uint8_t cmd, ncq = B_FALSE;
3780
3781	spkt = nv_slotp->nvslot_spkt;
3782	satacmd = &spkt->satapkt_cmd;
3783	cmd = satacmd->satacmd_cmd_reg;
3784
3785	ASSERT(nvp->nvp_slot);
3786
3787	if ((cmd == SATAC_WRITE_FPDMA_QUEUED) ||
3788	    (cmd == SATAC_READ_FPDMA_QUEUED)) {
3789		ncq = B_TRUE;
3790	}
3791
3792	/*
3793	 * select the drive
3794	 */
3795	nv_put8(cmdhdl, nvp->nvp_drvhd, satacmd->satacmd_device_reg);
3796
3797	/*
3798	 * make certain the drive selected
3799	 */
3800	if (nv_wait(nvp, SATA_STATUS_DRDY, SATA_STATUS_BSY,
3801	    NV_SEC2USEC(5), 0) == B_FALSE) {
3802
3803		return;
3804	}
3805
3806	switch (spkt->satapkt_cmd.satacmd_addr_type) {
3807
3808	case ATA_ADDR_LBA:
3809		NVLOG((NVDBG_DELIVER, nvp->nvp_ctlp, nvp, "ATA_ADDR_LBA mode"));
3810
3811		nv_put8(cmdhdl, nvp->nvp_count, satacmd->satacmd_sec_count_lsb);
3812		nv_put8(cmdhdl, nvp->nvp_hcyl, satacmd->satacmd_lba_high_lsb);
3813		nv_put8(cmdhdl, nvp->nvp_lcyl, satacmd->satacmd_lba_mid_lsb);
3814		nv_put8(cmdhdl, nvp->nvp_sect, satacmd->satacmd_lba_low_lsb);
3815
3816		break;
3817
3818	case ATA_ADDR_LBA28:
3819		NVLOG((NVDBG_DELIVER, nvp->nvp_ctlp, nvp,
3820		    "ATA_ADDR_LBA28 mode"));
3821		/*
3822		 * NCQ only uses 48-bit addressing
3823		 */
3824		ASSERT(ncq != B_TRUE);
3825
3826		nv_put8(cmdhdl, nvp->nvp_count, satacmd->satacmd_sec_count_lsb);
3827		nv_put8(cmdhdl, nvp->nvp_hcyl, satacmd->satacmd_lba_high_lsb);
3828		nv_put8(cmdhdl, nvp->nvp_lcyl, satacmd->satacmd_lba_mid_lsb);
3829		nv_put8(cmdhdl, nvp->nvp_sect, satacmd->satacmd_lba_low_lsb);
3830
3831		break;
3832
3833	case ATA_ADDR_LBA48:
3834		NVLOG((NVDBG_DELIVER, nvp->nvp_ctlp, nvp,
3835		    "ATA_ADDR_LBA48 mode"));
3836
3837		/*
3838		 * for NCQ, tag goes into count register and real sector count
3839		 * into features register.  The sata module does the translation
3840		 * in the satacmd.
3841		 */
3842		if (ncq == B_TRUE) {
3843			nv_put8(cmdhdl, nvp->nvp_count, slot << 3);
3844			nv_put8(cmdhdl, nvp->nvp_feature,
3845			    satacmd->satacmd_features_reg_ext);
3846			nv_put8(cmdhdl, nvp->nvp_feature,
3847			    satacmd->satacmd_features_reg);
3848		} else {
3849			nv_put8(cmdhdl, nvp->nvp_count,
3850			    satacmd->satacmd_sec_count_msb);
3851			nv_put8(cmdhdl, nvp->nvp_count,
3852			    satacmd->satacmd_sec_count_lsb);
3853		}
3854
3855		/*
3856		 * send the high-order half first
3857		 */
3858		nv_put8(cmdhdl, nvp->nvp_hcyl, satacmd->satacmd_lba_high_msb);
3859		nv_put8(cmdhdl, nvp->nvp_lcyl, satacmd->satacmd_lba_mid_msb);
3860		nv_put8(cmdhdl, nvp->nvp_sect, satacmd->satacmd_lba_low_msb);
3861		/*
3862		 * Send the low-order half
3863		 */
3864		nv_put8(cmdhdl, nvp->nvp_hcyl, satacmd->satacmd_lba_high_lsb);
3865		nv_put8(cmdhdl, nvp->nvp_lcyl, satacmd->satacmd_lba_mid_lsb);
3866		nv_put8(cmdhdl, nvp->nvp_sect, satacmd->satacmd_lba_low_lsb);
3867
3868		break;
3869
3870	case 0:
3871		/*
3872		 * non-media access commands such as identify and features
3873		 * take this path.
3874		 */
3875		nv_put8(cmdhdl, nvp->nvp_count, satacmd->satacmd_sec_count_lsb);
3876		nv_put8(cmdhdl, nvp->nvp_feature,
3877		    satacmd->satacmd_features_reg);
3878		nv_put8(cmdhdl, nvp->nvp_hcyl, satacmd->satacmd_lba_high_lsb);
3879		nv_put8(cmdhdl, nvp->nvp_lcyl, satacmd->satacmd_lba_mid_lsb);
3880		nv_put8(cmdhdl, nvp->nvp_sect, satacmd->satacmd_lba_low_lsb);
3881
3882		break;
3883
3884	default:
3885		break;
3886	}
3887
3888	ASSERT(nvp->nvp_slot);
3889}
3890
3891
3892/*
3893 * start a command that involves no media access
3894 */
3895static int
3896nv_start_nodata(nv_port_t *nvp, int slot)
3897{
3898	nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
3899	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
3900	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
3901	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
3902
3903	nv_program_taskfile_regs(nvp, slot);
3904
3905	/*
3906	 * This next one sets the controller in motion
3907	 */
3908	nv_put8(cmdhdl, nvp->nvp_cmd, sata_cmdp->satacmd_cmd_reg);
3909
3910	return (SATA_TRAN_ACCEPTED);
3911}
3912
3913
3914int
3915nv_bm_status_clear(nv_port_t *nvp)
3916{
3917	ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl;
3918	uchar_t	status, ret;
3919
3920	/*
3921	 * Get the current BM status
3922	 */
3923	ret = status = nv_get8(bmhdl, nvp->nvp_bmisx);
3924
3925	status = (status & BMISX_MASK) | BMISX_IDERR | BMISX_IDEINTS;
3926
3927	/*
3928	 * Clear the latches (and preserve the other bits)
3929	 */
3930	nv_put8(bmhdl, nvp->nvp_bmisx, status);
3931
3932	return (ret);
3933}
3934
3935
3936/*
3937 * program the bus master DMA engine with the PRD address for
3938 * the active slot command, and start the DMA engine.
3939 */
3940static void
3941nv_start_dma_engine(nv_port_t *nvp, int slot)
3942{
3943	nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
3944	ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl;
3945	uchar_t direction;
3946
3947	ASSERT(nv_slotp->nvslot_spkt != NULL);
3948
3949	if (nv_slotp->nvslot_spkt->satapkt_cmd.satacmd_flags.sata_data_direction
3950	    == SATA_DIR_READ) {
3951		direction = BMICX_RWCON_WRITE_TO_MEMORY;
3952	} else {
3953		direction = BMICX_RWCON_READ_FROM_MEMORY;
3954	}
3955
3956	NVLOG((NVDBG_DELIVER, nvp->nvp_ctlp, nvp,
3957	    "nv_start_dma_engine entered"));
3958
3959	/*
3960	 * reset the controller's interrupt and error status bits
3961	 */
3962	(void) nv_bm_status_clear(nvp);
3963
3964	/*
3965	 * program the PRD table physical start address
3966	 */
3967	nv_put32(bmhdl, nvp->nvp_bmidtpx, nvp->nvp_sg_paddr[slot]);
3968
3969	/*
3970	 * set the direction control and start the DMA controller
3971	 */
3972	nv_put8(bmhdl, nvp->nvp_bmicx, direction | BMICX_SSBM);
3973}
3974
3975/*
3976 * start dma command, either in or out
3977 */
3978static int
3979nv_start_dma(nv_port_t *nvp, int slot)
3980{
3981	nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
3982	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
3983	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
3984	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
3985	uint8_t cmd = sata_cmdp->satacmd_cmd_reg;
3986#ifdef NCQ
3987	uint8_t ncq = B_FALSE;
3988#endif
3989	ddi_acc_handle_t sghdl = nvp->nvp_sg_acc_hdl[slot];
3990	uint_t *dstp = (uint_t *)nvp->nvp_sg_addr[slot];
3991	int sg_count = sata_cmdp->satacmd_num_dma_cookies, idx;
3992	ddi_dma_cookie_t  *srcp = sata_cmdp->satacmd_dma_cookie_list;
3993
3994	ASSERT(sg_count != 0);
3995
3996	if (sata_cmdp->satacmd_num_dma_cookies > NV_DMA_NSEGS) {
3997		nv_cmn_err(CE_WARN, nvp->nvp_ctlp, nvp, "NV_DMA_NSEGS=%d <"
3998		    " satacmd_num_dma_cookies=%d", NV_DMA_NSEGS,
3999		    sata_cmdp->satacmd_num_dma_cookies);
4000
4001		return (NV_FAILURE);
4002	}
4003
4004	nv_program_taskfile_regs(nvp, slot);
4005
4006	/*
4007	 * start the drive in motion
4008	 */
4009	nv_put8(cmdhdl, nvp->nvp_cmd, cmd);
4010
4011	/*
4012	 * the drive starts processing the transaction when the cmd register
4013	 * is written.  This is done here before programming the DMA engine to
4014	 * parallelize and save some time.  In the event that the drive is ready
4015	 * before DMA, it will wait.
4016	 */
4017#ifdef NCQ
4018	if ((cmd == SATAC_WRITE_FPDMA_QUEUED) ||
4019	    (cmd == SATAC_READ_FPDMA_QUEUED)) {
4020		ncq = B_TRUE;
4021	}
4022#endif
4023
4024	/*
4025	 * copy the PRD list to PRD table in DMA accessible memory
4026	 * so that the controller can access it.
4027	 */
4028	for (idx = 0; idx < sg_count; idx++, srcp++) {
4029		uint32_t size;
4030
4031		ASSERT(srcp->dmac_size <= UINT16_MAX);
4032
4033		nv_put32(sghdl, dstp++, srcp->dmac_address);
4034
4035		size = srcp->dmac_size;
4036
4037		/*
4038		 * If this is a 40-bit address, copy bits 32-40 of the
4039		 * physical address to bits 16-24 of the PRD count.
4040		 */
4041		if (srcp->dmac_laddress > UINT32_MAX) {
4042			size |= ((srcp->dmac_laddress & 0xff00000000) >> 16);
4043		}
4044
4045		/*
4046		 * set the end of table flag for the last entry
4047		 */
4048		if (idx == (sg_count - 1)) {
4049			size |= PRDE_EOT;
4050		}
4051
4052		nv_put32(sghdl, dstp++, size);
4053	}
4054
4055	(void) ddi_dma_sync(nvp->nvp_sg_dma_hdl[slot], 0,
4056	    sizeof (prde_t) * NV_DMA_NSEGS, DDI_DMA_SYNC_FORDEV);
4057
4058	nv_start_dma_engine(nvp, slot);
4059
4060#ifdef NCQ
4061	/*
4062	 * optimization:  for SWNCQ, start DMA engine if this is the only
4063	 * command running.  Preliminary NCQ efforts indicated this needs
4064	 * more debugging.
4065	 *
4066	 * if (nvp->nvp_ncq_run <= 1)
4067	 */
4068
4069	if (ncq == B_FALSE) {
4070		NVLOG((NVDBG_DELIVER, nvp->nvp_ctlp, nvp,
4071		    "NOT NCQ so starting DMA NOW non_ncq_commands=%d"
4072		    " cmd = %X", non_ncq_commands++, cmd));
4073		nv_start_dma_engine(nvp, slot);
4074	} else {
4075		NVLOG((NVDBG_DELIVER, nvp->nvp_ctlp, nvp, "?NCQ, so program "
4076		    "DMA later ncq_commands=%d cmd = %X", ncq_commands++, cmd));
4077	}
4078#endif /* NCQ */
4079
4080	return (SATA_TRAN_ACCEPTED);
4081}
4082
4083
4084/*
4085 * start a PIO data-in ATA command
4086 */
4087static int
4088nv_start_pio_in(nv_port_t *nvp, int slot)
4089{
4090
4091	nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
4092	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4093	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4094
4095	nv_program_taskfile_regs(nvp, slot);
4096
4097	/*
4098	 * This next one sets the drive in motion
4099	 */
4100	nv_put8(cmdhdl, nvp->nvp_cmd, spkt->satapkt_cmd.satacmd_cmd_reg);
4101
4102	return (SATA_TRAN_ACCEPTED);
4103}
4104
4105
4106/*
4107 * start a PIO data-out ATA command
4108 */
4109static int
4110nv_start_pio_out(nv_port_t *nvp, int slot)
4111{
4112	nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
4113	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4114	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4115
4116	nv_program_taskfile_regs(nvp, slot);
4117
4118	/*
4119	 * this next one sets the drive in motion
4120	 */
4121	nv_put8(cmdhdl, nvp->nvp_cmd, spkt->satapkt_cmd.satacmd_cmd_reg);
4122
4123	/*
4124	 * wait for the busy bit to settle
4125	 */
4126	NV_DELAY_NSEC(400);
4127
4128	/*
4129	 * wait for the drive to assert DRQ to send the first chunk
4130	 * of data. Have to busy wait because there's no interrupt for
4131	 * the first chunk. This is bad... uses a lot of cycles if the
4132	 * drive responds too slowly or if the wait loop granularity
4133	 * is too large. It's even worse if the drive is defective and
4134	 * the loop times out.
4135	 */
4136	if (nv_wait3(nvp, SATA_STATUS_DRQ, SATA_STATUS_BSY, /* okay */
4137	    SATA_STATUS_ERR, SATA_STATUS_BSY, /* cmd failed */
4138	    SATA_STATUS_DF, SATA_STATUS_BSY, /* drive failed */
4139	    4000000, 0) == B_FALSE) {
4140		spkt->satapkt_reason = SATA_PKT_TIMEOUT;
4141
4142		goto error;
4143	}
4144
4145	/*
4146	 * send the first block.
4147	 */
4148	nv_intr_pio_out(nvp, nv_slotp);
4149
4150	/*
4151	 * If nvslot_flags is not set to COMPLETE yet, then processing
4152	 * is OK so far, so return.  Otherwise, fall into error handling
4153	 * below.
4154	 */
4155	if (nv_slotp->nvslot_flags != NVSLOT_COMPLETE) {
4156
4157		return (SATA_TRAN_ACCEPTED);
4158	}
4159
4160	error:
4161	/*
4162	 * there was an error so reset the device and complete the packet.
4163	 */
4164	nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
4165	nv_complete_io(nvp, spkt, 0);
4166	nv_reset(nvp);
4167
4168	return (SATA_TRAN_PORT_ERROR);
4169}
4170
4171
4172/*
4173 * start a ATAPI Packet command (PIO data in or out)
4174 */
4175static int
4176nv_start_pkt_pio(nv_port_t *nvp, int slot)
4177{
4178	nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
4179	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4180	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4181	sata_cmd_t *satacmd = &spkt->satapkt_cmd;
4182
4183	NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4184	    "nv_start_pkt_pio: start"));
4185
4186	/*
4187	 * Write the PACKET command to the command register.  Normally
4188	 * this would be done through nv_program_taskfile_regs().  It
4189	 * is done here because some values need to be overridden.
4190	 */
4191
4192	/* select the drive */
4193	nv_put8(cmdhdl, nvp->nvp_drvhd, satacmd->satacmd_device_reg);
4194
4195	/* make certain the drive selected */
4196	if (nv_wait(nvp, SATA_STATUS_DRDY, SATA_STATUS_BSY,
4197	    NV_SEC2USEC(5), 0) == B_FALSE) {
4198		NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4199		    "nv_start_pkt_pio: drive select failed"));
4200		return (SATA_TRAN_PORT_ERROR);
4201	}
4202
4203	/*
4204	 * The command is always sent via PIO, despite whatever the SATA
4205	 * framework sets in the command.  Overwrite the DMA bit to do this.
4206	 * Also, overwrite the overlay bit to be safe (it shouldn't be set).
4207	 */
4208	nv_put8(cmdhdl, nvp->nvp_feature, 0);	/* deassert DMA and OVL */
4209
4210	/* set appropriately by the sata framework */
4211	nv_put8(cmdhdl, nvp->nvp_hcyl, satacmd->satacmd_lba_high_lsb);
4212	nv_put8(cmdhdl, nvp->nvp_lcyl, satacmd->satacmd_lba_mid_lsb);
4213	nv_put8(cmdhdl, nvp->nvp_sect, satacmd->satacmd_lba_low_lsb);
4214	nv_put8(cmdhdl, nvp->nvp_count, satacmd->satacmd_sec_count_lsb);
4215
4216	/* initiate the command by writing the command register last */
4217	nv_put8(cmdhdl, nvp->nvp_cmd, spkt->satapkt_cmd.satacmd_cmd_reg);
4218
4219	/* Give the host controller time to do its thing */
4220	NV_DELAY_NSEC(400);
4221
4222	/*
4223	 * Wait for the device to indicate that it is ready for the command
4224	 * ATAPI protocol state - HP0: Check_Status_A
4225	 */
4226
4227	if (nv_wait3(nvp, SATA_STATUS_DRQ, SATA_STATUS_BSY, /* okay */
4228	    SATA_STATUS_ERR, SATA_STATUS_BSY, /* cmd failed */
4229	    SATA_STATUS_DF, SATA_STATUS_BSY, /* drive failed */
4230	    4000000, 0) == B_FALSE) {
4231		/*
4232		 * Either an error or device fault occurred or the wait
4233		 * timed out.  According to the ATAPI protocol, command
4234		 * completion is also possible.  Other implementations of
4235		 * this protocol don't handle this last case, so neither
4236		 * does this code.
4237		 */
4238
4239		if (nv_get8(cmdhdl, nvp->nvp_status) &
4240		    (SATA_STATUS_ERR | SATA_STATUS_DF)) {
4241			spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4242
4243			NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4244			    "nv_start_pkt_pio: device error (HP0)"));
4245		} else {
4246			spkt->satapkt_reason = SATA_PKT_TIMEOUT;
4247
4248			NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4249			    "nv_start_pkt_pio: timeout (HP0)"));
4250		}
4251
4252		nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
4253		nv_complete_io(nvp, spkt, 0);
4254		nv_reset(nvp);
4255
4256		return (SATA_TRAN_PORT_ERROR);
4257	}
4258
4259	/*
4260	 * Put the ATAPI command in the data register
4261	 * ATAPI protocol state - HP1: Send_Packet
4262	 */
4263
4264	ddi_rep_put16(cmdhdl, (ushort_t *)spkt->satapkt_cmd.satacmd_acdb,
4265	    (ushort_t *)nvp->nvp_data,
4266	    (spkt->satapkt_cmd.satacmd_acdb_len >> 1), DDI_DEV_NO_AUTOINCR);
4267
4268	/*
4269	 * See you in nv_intr_pkt_pio.
4270	 * ATAPI protocol state - HP3: INTRQ_wait
4271	 */
4272
4273	NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4274	    "nv_start_pkt_pio: exiting into HP3"));
4275
4276	return (SATA_TRAN_ACCEPTED);
4277}
4278
4279
4280/*
4281 * Interrupt processing for a non-data ATA command.
4282 */
4283static void
4284nv_intr_nodata(nv_port_t *nvp, nv_slot_t *nv_slotp)
4285{
4286	uchar_t status;
4287	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4288	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
4289	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
4290	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4291
4292	NVLOG((NVDBG_INTR, nvp->nvp_ctlp, nvp, "nv_intr_nodata entered"));
4293
4294	status = nv_get8(cmdhdl, nvp->nvp_status);
4295
4296	/*
4297	 * check for errors
4298	 */
4299	if (status & (SATA_STATUS_DF | SATA_STATUS_ERR)) {
4300		spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4301		sata_cmdp->satacmd_status_reg = nv_get8(ctlhdl,
4302		    nvp->nvp_altstatus);
4303		sata_cmdp->satacmd_error_reg = nv_get8(cmdhdl, nvp->nvp_error);
4304	} else {
4305		spkt->satapkt_reason = SATA_PKT_COMPLETED;
4306	}
4307
4308	nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4309}
4310
4311
4312/*
4313 * ATA command, PIO data in
4314 */
4315static void
4316nv_intr_pio_in(nv_port_t *nvp, nv_slot_t *nv_slotp)
4317{
4318	uchar_t	status;
4319	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4320	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
4321	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
4322	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4323	int count;
4324
4325	status = nv_get8(cmdhdl, nvp->nvp_status);
4326
4327	if (status & SATA_STATUS_BSY) {
4328		spkt->satapkt_reason = SATA_PKT_TIMEOUT;
4329		nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4330		sata_cmdp->satacmd_status_reg = nv_get8(ctlhdl,
4331		    nvp->nvp_altstatus);
4332		sata_cmdp->satacmd_error_reg = nv_get8(cmdhdl, nvp->nvp_error);
4333		nv_reset(nvp);
4334
4335		return;
4336	}
4337
4338	/*
4339	 * check for errors
4340	 */
4341	if ((status & (SATA_STATUS_DRQ | SATA_STATUS_DF |
4342	    SATA_STATUS_ERR)) != SATA_STATUS_DRQ) {
4343		nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
4344		nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4345		spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4346
4347		return;
4348	}
4349
4350	/*
4351	 * read the next chunk of data (if any)
4352	 */
4353	count = min(nv_slotp->nvslot_byte_count, NV_BYTES_PER_SEC);
4354
4355	/*
4356	 * read count bytes
4357	 */
4358	ASSERT(count != 0);
4359
4360	ddi_rep_get16(cmdhdl, (ushort_t *)nv_slotp->nvslot_v_addr,
4361	    (ushort_t *)nvp->nvp_data, (count >> 1), DDI_DEV_NO_AUTOINCR);
4362
4363	nv_slotp->nvslot_v_addr += count;
4364	nv_slotp->nvslot_byte_count -= count;
4365
4366
4367	if (nv_slotp->nvslot_byte_count != 0) {
4368		/*
4369		 * more to transfer.  Wait for next interrupt.
4370		 */
4371		return;
4372	}
4373
4374	/*
4375	 * transfer is complete. wait for the busy bit to settle.
4376	 */
4377	NV_DELAY_NSEC(400);
4378
4379	spkt->satapkt_reason = SATA_PKT_COMPLETED;
4380	nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4381}
4382
4383
4384/*
4385 * ATA command PIO data out
4386 */
4387static void
4388nv_intr_pio_out(nv_port_t *nvp, nv_slot_t *nv_slotp)
4389{
4390	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4391	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
4392	uchar_t status;
4393	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
4394	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4395	int count;
4396
4397	/*
4398	 * clear the IRQ
4399	 */
4400	status = nv_get8(cmdhdl, nvp->nvp_status);
4401
4402	if (status & SATA_STATUS_BSY) {
4403		/*
4404		 * this should not happen
4405		 */
4406		spkt->satapkt_reason = SATA_PKT_TIMEOUT;
4407		nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4408		sata_cmdp->satacmd_status_reg = nv_get8(ctlhdl,
4409		    nvp->nvp_altstatus);
4410		sata_cmdp->satacmd_error_reg = nv_get8(cmdhdl, nvp->nvp_error);
4411
4412		return;
4413	}
4414
4415	/*
4416	 * check for errors
4417	 */
4418	if (status & (SATA_STATUS_DF | SATA_STATUS_ERR)) {
4419		nv_copy_registers(nvp,  &spkt->satapkt_device, spkt);
4420		nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4421		spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4422
4423		return;
4424	}
4425
4426	/*
4427	 * this is the condition which signals the drive is
4428	 * no longer ready to transfer.  Likely that the transfer
4429	 * completed successfully, but check that byte_count is
4430	 * zero.
4431	 */
4432	if ((status & SATA_STATUS_DRQ) == 0) {
4433
4434		if (nv_slotp->nvslot_byte_count == 0) {
4435			/*
4436			 * complete; successful transfer
4437			 */
4438			spkt->satapkt_reason = SATA_PKT_COMPLETED;
4439		} else {
4440			/*
4441			 * error condition, incomplete transfer
4442			 */
4443			nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
4444			spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4445		}
4446		nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4447
4448		return;
4449	}
4450
4451	/*
4452	 * write the next chunk of data
4453	 */
4454	count = min(nv_slotp->nvslot_byte_count, NV_BYTES_PER_SEC);
4455
4456	/*
4457	 * read or write count bytes
4458	 */
4459
4460	ASSERT(count != 0);
4461
4462	ddi_rep_put16(cmdhdl, (ushort_t *)nv_slotp->nvslot_v_addr,
4463	    (ushort_t *)nvp->nvp_data, (count >> 1), DDI_DEV_NO_AUTOINCR);
4464
4465	nv_slotp->nvslot_v_addr += count;
4466	nv_slotp->nvslot_byte_count -= count;
4467}
4468
4469
4470/*
4471 * ATAPI PACKET command, PIO in/out interrupt
4472 *
4473 * Under normal circumstances, one of four different interrupt scenarios
4474 * will result in this function being called:
4475 *
4476 * 1. Packet command data transfer
4477 * 2. Packet command completion
4478 * 3. Request sense data transfer
4479 * 4. Request sense command completion
4480 */
4481static void
4482nv_intr_pkt_pio(nv_port_t *nvp, nv_slot_t *nv_slotp)
4483{
4484	uchar_t	status;
4485	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4486	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
4487	int direction = sata_cmdp->satacmd_flags.sata_data_direction;
4488	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
4489	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4490	uint16_t ctlr_count;
4491	int count;
4492
4493	/* ATAPI protocol state - HP2: Check_Status_B */
4494
4495	status = nv_get8(cmdhdl, nvp->nvp_status);
4496	NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4497	    "nv_intr_pkt_pio: status 0x%x", status));
4498
4499	if (status & SATA_STATUS_BSY) {
4500		if ((nv_slotp->nvslot_flags & NVSLOT_RQSENSE) != 0) {
4501			nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4502			spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4503		} else {
4504			nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4505			spkt->satapkt_reason = SATA_PKT_TIMEOUT;
4506
4507			nv_reset(nvp);
4508		}
4509
4510		NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4511		    "nv_intr_pkt_pio: busy - status 0x%x", status));
4512
4513		return;
4514	}
4515
4516	if ((status & SATA_STATUS_DF) != 0) {
4517		/*
4518		 * On device fault, just clean up and bail.  Request sense
4519		 * will just default to its NO SENSE initialized value.
4520		 */
4521
4522		if ((nv_slotp->nvslot_flags & NVSLOT_RQSENSE) == 0) {
4523			nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
4524		}
4525
4526		nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4527		spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4528
4529		sata_cmdp->satacmd_status_reg = nv_get8(ctlhdl,
4530		    nvp->nvp_altstatus);
4531		sata_cmdp->satacmd_error_reg = nv_get8(cmdhdl,
4532		    nvp->nvp_error);
4533
4534		NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4535		    "nv_intr_pkt_pio: device fault"));
4536
4537		return;
4538	}
4539
4540	if ((status & SATA_STATUS_ERR) != 0) {
4541		/*
4542		 * On command error, figure out whether we are processing a
4543		 * request sense.  If so, clean up and bail.  Otherwise,
4544		 * do a REQUEST SENSE.
4545		 */
4546
4547		if ((nv_slotp->nvslot_flags & NVSLOT_RQSENSE) == 0) {
4548			nv_slotp->nvslot_flags |= NVSLOT_RQSENSE;
4549			if (nv_start_rqsense_pio(nvp, nv_slotp) ==
4550			    NV_FAILURE) {
4551				nv_copy_registers(nvp, &spkt->satapkt_device,
4552				    spkt);
4553				nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4554				spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4555			}
4556
4557			sata_cmdp->satacmd_status_reg = nv_get8(ctlhdl,
4558			    nvp->nvp_altstatus);
4559			sata_cmdp->satacmd_error_reg = nv_get8(cmdhdl,
4560			    nvp->nvp_error);
4561		} else {
4562			nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4563			spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4564
4565			nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
4566		}
4567
4568		NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4569		    "nv_intr_pkt_pio: error (status 0x%x)", status));
4570
4571		return;
4572	}
4573
4574	if ((nv_slotp->nvslot_flags & NVSLOT_RQSENSE) != 0) {
4575		/*
4576		 * REQUEST SENSE command processing
4577		 */
4578
4579		if ((status & (SATA_STATUS_DRQ)) != 0) {
4580			/* ATAPI state - HP4: Transfer_Data */
4581
4582			/* read the byte count from the controller */
4583			ctlr_count =
4584			    (uint16_t)nv_get8(cmdhdl, nvp->nvp_hcyl) << 8;
4585			ctlr_count |= nv_get8(cmdhdl, nvp->nvp_lcyl);
4586
4587			NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4588			    "nv_intr_pkt_pio: ctlr byte count - %d",
4589			    ctlr_count));
4590
4591			if (ctlr_count == 0) {
4592				/* no data to transfer - some devices do this */
4593
4594				spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4595				nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4596
4597				NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4598				    "nv_intr_pkt_pio: done (no data)"));
4599
4600				return;
4601			}
4602
4603			count = min(ctlr_count, SATA_ATAPI_RQSENSE_LEN);
4604
4605			/* transfer the data */
4606			ddi_rep_get16(cmdhdl,
4607			    (ushort_t *)nv_slotp->nvslot_rqsense_buff,
4608			    (ushort_t *)nvp->nvp_data, (count >> 1),
4609			    DDI_DEV_NO_AUTOINCR);
4610
4611			/* consume residual bytes */
4612			ctlr_count -= count;
4613
4614			if (ctlr_count > 0) {
4615				for (; ctlr_count > 0; ctlr_count -= 2)
4616					(void) ddi_get16(cmdhdl,
4617					    (ushort_t *)nvp->nvp_data);
4618			}
4619
4620			NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4621			    "nv_intr_pkt_pio: transition to HP2"));
4622		} else {
4623			/* still in ATAPI state - HP2 */
4624
4625			/*
4626			 * In order to avoid clobbering the rqsense data
4627			 * set by the SATA framework, the sense data read
4628			 * from the device is put in a separate buffer and
4629			 * copied into the packet after the request sense
4630			 * command successfully completes.
4631			 */
4632			bcopy(nv_slotp->nvslot_rqsense_buff,
4633			    spkt->satapkt_cmd.satacmd_rqsense,
4634			    SATA_ATAPI_RQSENSE_LEN);
4635
4636			nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4637			spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4638
4639			NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4640			    "nv_intr_pkt_pio: request sense done"));
4641		}
4642
4643		return;
4644	}
4645
4646	/*
4647	 * Normal command processing
4648	 */
4649
4650	if ((status & (SATA_STATUS_DRQ)) != 0) {
4651		/* ATAPI protocol state - HP4: Transfer_Data */
4652
4653		/* read the byte count from the controller */
4654		ctlr_count = (uint16_t)nv_get8(cmdhdl, nvp->nvp_hcyl) << 8;
4655		ctlr_count |= nv_get8(cmdhdl, nvp->nvp_lcyl);
4656
4657		if (ctlr_count == 0) {
4658			/* no data to transfer - some devices do this */
4659
4660			spkt->satapkt_reason = SATA_PKT_COMPLETED;
4661			nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4662
4663			NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4664			    "nv_intr_pkt_pio: done (no data)"));
4665
4666			return;
4667		}
4668
4669		count = min(ctlr_count, nv_slotp->nvslot_byte_count);
4670
4671		NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4672		    "nv_intr_pkt_pio: drive_bytes 0x%x", ctlr_count));
4673
4674		NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4675		    "nv_intr_pkt_pio: byte_count 0x%x",
4676		    nv_slotp->nvslot_byte_count));
4677
4678		/* transfer the data */
4679
4680		if (direction == SATA_DIR_READ) {
4681			ddi_rep_get16(cmdhdl,
4682			    (ushort_t *)nv_slotp->nvslot_v_addr,
4683			    (ushort_t *)nvp->nvp_data, (count >> 1),
4684			    DDI_DEV_NO_AUTOINCR);
4685
4686			ctlr_count -= count;
4687
4688			if (ctlr_count > 0) {
4689				/* consume remainding bytes */
4690
4691				for (; ctlr_count > 0;
4692				    ctlr_count -= 2)
4693					(void) ddi_get16(cmdhdl,
4694					    (ushort_t *)nvp->nvp_data);
4695
4696				NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4697				    "nv_intr_pkt_pio: bytes remained"));
4698			}
4699		} else {
4700			ddi_rep_put16(cmdhdl,
4701			    (ushort_t *)nv_slotp->nvslot_v_addr,
4702			    (ushort_t *)nvp->nvp_data, (count >> 1),
4703			    DDI_DEV_NO_AUTOINCR);
4704		}
4705
4706		nv_slotp->nvslot_v_addr += count;
4707		nv_slotp->nvslot_byte_count -= count;
4708
4709		NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4710		    "nv_intr_pkt_pio: transition to HP2"));
4711	} else {
4712		/* still in ATAPI state - HP2 */
4713
4714		spkt->satapkt_reason = SATA_PKT_COMPLETED;
4715		nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4716
4717		NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4718		    "nv_intr_pkt_pio: done"));
4719	}
4720}
4721
4722
4723/*
4724 * ATA command, DMA data in/out
4725 */
4726static void
4727nv_intr_dma(nv_port_t *nvp, struct nv_slot *nv_slotp)
4728{
4729	uchar_t status;
4730	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4731	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
4732	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
4733	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4734	ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl;
4735	uchar_t	bmicx;
4736	uchar_t bm_status;
4737
4738	nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4739
4740	/*
4741	 * stop DMA engine.
4742	 */
4743	bmicx = nv_get8(bmhdl, nvp->nvp_bmicx);
4744	nv_put8(bmhdl, nvp->nvp_bmicx,  bmicx & ~BMICX_SSBM);
4745
4746	/*
4747	 * get the status and clear the IRQ, and check for DMA error
4748	 */
4749	status = nv_get8(cmdhdl, nvp->nvp_status);
4750
4751	/*
4752	 * check for drive errors
4753	 */
4754	if (status & (SATA_STATUS_DF | SATA_STATUS_ERR)) {
4755		nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
4756		spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4757		(void) nv_bm_status_clear(nvp);
4758
4759		return;
4760	}
4761
4762	bm_status = nv_bm_status_clear(nvp);
4763
4764	/*
4765	 * check for bus master errors
4766	 */
4767	if (bm_status & BMISX_IDERR) {
4768		spkt->satapkt_reason = SATA_PKT_RESET;
4769		sata_cmdp->satacmd_status_reg = nv_get8(ctlhdl,
4770		    nvp->nvp_altstatus);
4771		sata_cmdp->satacmd_error_reg = nv_get8(cmdhdl, nvp->nvp_error);
4772		nv_reset(nvp);
4773
4774		return;
4775	}
4776
4777	spkt->satapkt_reason = SATA_PKT_COMPLETED;
4778}
4779
4780
4781/*
4782 * Wait for a register of a controller to achieve a specific state.
4783 * To return normally, all the bits in the first sub-mask must be ON,
4784 * all the bits in the second sub-mask must be OFF.
4785 * If timeout_usec microseconds pass without the controller achieving
4786 * the desired bit configuration, return TRUE, else FALSE.
4787 *
4788 * hybrid waiting algorithm: if not in interrupt context, busy looping will
4789 * occur for the first 250 us, then switch over to a sleeping wait.
4790 *
4791 */
4792int
4793nv_wait(nv_port_t *nvp, uchar_t onbits, uchar_t offbits, uint_t timeout_usec,
4794    int type_wait)
4795{
4796	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
4797	hrtime_t end, cur, start_sleep, start;
4798	int first_time = B_TRUE;
4799	ushort_t val;
4800
4801	for (;;) {
4802		val = nv_get8(ctlhdl, nvp->nvp_altstatus);
4803
4804		if ((val & onbits) == onbits && (val & offbits) == 0) {
4805
4806			return (B_TRUE);
4807		}
4808
4809		cur = gethrtime();
4810
4811		/*
4812		 * store the start time and calculate the end
4813		 * time.  also calculate "start_sleep" which is
4814		 * the point after which the driver will stop busy
4815		 * waiting and change to sleep waiting.
4816		 */
4817		if (first_time) {
4818			first_time = B_FALSE;
4819			/*
4820			 * start and end are in nanoseconds
4821			 */
4822			start = cur;
4823			end = start + timeout_usec * 1000;
4824			/*
4825			 * add 1 ms to start
4826			 */
4827			start_sleep =  start + 250000;
4828
4829			if (servicing_interrupt()) {
4830				type_wait = NV_NOSLEEP;
4831			}
4832		}
4833
4834		if (cur > end) {
4835
4836			break;
4837		}
4838
4839		if ((type_wait != NV_NOSLEEP) && (cur > start_sleep)) {
4840#if ! defined(__lock_lint)
4841			delay(1);
4842#endif
4843		} else {
4844			drv_usecwait(nv_usec_delay);
4845		}
4846	}
4847
4848	return (B_FALSE);
4849}
4850
4851
4852/*
4853 * This is a slightly more complicated version that checks
4854 * for error conditions and bails-out rather than looping
4855 * until the timeout is exceeded.
4856 *
4857 * hybrid waiting algorithm: if not in interrupt context, busy looping will
4858 * occur for the first 250 us, then switch over to a sleeping wait.
4859 */
4860int
4861nv_wait3(
4862	nv_port_t	*nvp,
4863	uchar_t		onbits1,
4864	uchar_t		offbits1,
4865	uchar_t		failure_onbits2,
4866	uchar_t		failure_offbits2,
4867	uchar_t		failure_onbits3,
4868	uchar_t		failure_offbits3,
4869	uint_t		timeout_usec,
4870	int		type_wait)
4871{
4872	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
4873	hrtime_t end, cur, start_sleep, start;
4874	int first_time = B_TRUE;
4875	ushort_t val;
4876
4877	for (;;) {
4878		val = nv_get8(ctlhdl, nvp->nvp_altstatus);
4879
4880		/*
4881		 * check for expected condition
4882		 */
4883		if ((val & onbits1) == onbits1 && (val & offbits1) == 0) {
4884
4885			return (B_TRUE);
4886		}
4887
4888		/*
4889		 * check for error conditions
4890		 */
4891		if ((val & failure_onbits2) == failure_onbits2 &&
4892		    (val & failure_offbits2) == 0) {
4893
4894			return (B_FALSE);
4895		}
4896
4897		if ((val & failure_onbits3) == failure_onbits3 &&
4898		    (val & failure_offbits3) == 0) {
4899
4900			return (B_FALSE);
4901		}
4902
4903		/*
4904		 * store the start time and calculate the end
4905		 * time.  also calculate "start_sleep" which is
4906		 * the point after which the driver will stop busy
4907		 * waiting and change to sleep waiting.
4908		 */
4909		if (first_time) {
4910			first_time = B_FALSE;
4911			/*
4912			 * start and end are in nanoseconds
4913			 */
4914			cur = start = gethrtime();
4915			end = start + timeout_usec * 1000;
4916			/*
4917			 * add 1 ms to start
4918			 */
4919			start_sleep =  start + 250000;
4920
4921			if (servicing_interrupt()) {
4922				type_wait = NV_NOSLEEP;
4923			}
4924		} else {
4925			cur = gethrtime();
4926		}
4927
4928		if (cur > end) {
4929
4930			break;
4931		}
4932
4933		if ((type_wait != NV_NOSLEEP) && (cur > start_sleep)) {
4934#if ! defined(__lock_lint)
4935			delay(1);
4936#endif
4937		} else {
4938			drv_usecwait(nv_usec_delay);
4939		}
4940	}
4941
4942	return (B_FALSE);
4943}
4944
4945
4946/*
4947 * nv_check_link() checks if a specified link is active device present
4948 * and communicating.
4949 */
4950static boolean_t
4951nv_check_link(uint32_t sstatus)
4952{
4953	uint8_t det;
4954
4955	det = (sstatus & SSTATUS_DET) >> SSTATUS_DET_SHIFT;
4956
4957	return (det == SSTATUS_DET_DEVPRE_PHYCOM);
4958}
4959
4960
4961/*
4962 * nv_port_state_change() reports the state of the port to the
4963 * sata module by calling sata_hba_event_notify().  This
4964 * function is called any time the state of the port is changed
4965 */
4966static void
4967nv_port_state_change(nv_port_t *nvp, int event, uint8_t addr_type, int state)
4968{
4969	sata_device_t sd;
4970
4971	bzero((void *)&sd, sizeof (sata_device_t));
4972	sd.satadev_rev = SATA_DEVICE_REV;
4973	nv_copy_registers(nvp, &sd, NULL);
4974
4975	/*
4976	 * When NCQ is implemented sactive and snotific field need to be
4977	 * updated.
4978	 */
4979	sd.satadev_addr.cport = nvp->nvp_port_num;
4980	sd.satadev_addr.qual = addr_type;
4981	sd.satadev_state = state;
4982
4983	sata_hba_event_notify(nvp->nvp_ctlp->nvc_dip, &sd, event);
4984}
4985
4986
4987/*
4988 * timeout processing:
4989 *
4990 * Check if any packets have crossed a timeout threshold.  If so, then
4991 * abort the packet.  This function is not NCQ aware.
4992 *
4993 * If reset was invoked in any other place than nv_sata_probe(), then
4994 * monitor for reset completion here.
4995 *
4996 */
4997static void
4998nv_timeout(void *arg)
4999{
5000	nv_port_t *nvp = arg;
5001	nv_slot_t *nv_slotp;
5002	int restart_timeout = B_FALSE;
5003
5004	mutex_enter(&nvp->nvp_mutex);
5005
5006	/*
5007	 * If the probe entry point is driving the reset and signature
5008	 * acquisition, just return.
5009	 */
5010	if (nvp->nvp_state & NV_PORT_RESET_PROBE) {
5011		goto finished;
5012	}
5013
5014	/*
5015	 * If the port is not in the init state, it likely
5016	 * means the link was lost while a timeout was active.
5017	 */
5018	if ((nvp->nvp_state & NV_PORT_INIT) == 0) {
5019		NVLOG((NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp,
5020		    "nv_timeout: port uninitialized"));
5021
5022		goto finished;
5023	}
5024
5025	if (nvp->nvp_state & NV_PORT_RESET) {
5026		ddi_acc_handle_t bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5];
5027		uint32_t sstatus;
5028
5029		NVLOG((NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp,
5030		    "nv_timeout(): port waiting for signature"));
5031
5032		sstatus = nv_get32(bar5_hdl, nvp->nvp_sstatus);
5033
5034		/*
5035		 * check for link presence.  If the link remains
5036		 * missing for more than 2 seconds, send a remove
5037		 * event and abort signature acquisition.
5038		 */
5039		if (nv_check_link(sstatus) == B_FALSE) {
5040			clock_t e_link_lost = ddi_get_lbolt();
5041
5042			if (nvp->nvp_link_lost_time == 0) {
5043				nvp->nvp_link_lost_time = e_link_lost;
5044			}
5045			if (TICK_TO_SEC(e_link_lost -
5046			    nvp->nvp_link_lost_time) < NV_LINK_LOST_OK) {
5047				NVLOG((NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp,
5048				    "probe: intermittent link lost while"
5049				    " resetting"));
5050				restart_timeout = B_TRUE;
5051			} else {
5052				NVLOG((NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp,
5053				    "link lost during signature acquisition."
5054				    "  Giving up"));
5055				nv_port_state_change(nvp,
5056				    SATA_EVNT_DEVICE_DETACHED|
5057				    SATA_EVNT_LINK_LOST,
5058				    SATA_ADDR_CPORT, 0);
5059				nvp->nvp_state |= NV_PORT_HOTREMOVED;
5060				nvp->nvp_state &= ~NV_PORT_RESET;
5061			}
5062
5063			goto finished;
5064		} else {
5065
5066			nvp->nvp_link_lost_time = 0;
5067		}
5068
5069		nv_read_signature(nvp);
5070
5071		if (nvp->nvp_signature != 0) {
5072			if ((nvp->nvp_type == SATA_DTYPE_ATADISK) ||
5073			    (nvp->nvp_type == SATA_DTYPE_ATAPICD)) {
5074				nvp->nvp_state |= NV_PORT_RESTORE;
5075				nv_port_state_change(nvp,
5076				    SATA_EVNT_DEVICE_RESET,
5077				    SATA_ADDR_DCPORT,
5078				    SATA_DSTATE_RESET|SATA_DSTATE_PWR_ACTIVE);
5079			}
5080
5081			goto finished;
5082		}
5083
5084		/*
5085		 * Reset if more than 5 seconds has passed without
5086		 * acquiring a signature.
5087		 */
5088		if (TICK_TO_SEC(ddi_get_lbolt() - nvp->nvp_reset_time) > 5) {
5089			nv_reset(nvp);
5090		}
5091
5092		restart_timeout = B_TRUE;
5093		goto finished;
5094	}
5095
5096
5097	/*
5098	 * not yet NCQ aware
5099	 */
5100	nv_slotp = &(nvp->nvp_slot[0]);
5101
5102	/*
5103	 * this happens early on before nv_slotp is set
5104	 * up OR when a device was unexpectedly removed and
5105	 * there was an active packet.
5106	 */
5107	if (nv_slotp == NULL) {
5108		NVLOG((NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp,
5109		    "nv_timeout: nv_slotp == NULL"));
5110
5111		goto finished;
5112	}
5113
5114	/*
5115	 * perform timeout checking and processing only if there is an
5116	 * active packet on the port
5117	 */
5118	if (nv_slotp->nvslot_spkt != NULL)  {
5119		sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
5120		sata_cmd_t *satacmd = &spkt->satapkt_cmd;
5121		uint8_t cmd = satacmd->satacmd_cmd_reg;
5122		uint64_t lba;
5123
5124#if ! defined(__lock_lint) && defined(DEBUG)
5125
5126		lba = (uint64_t)satacmd->satacmd_lba_low_lsb |
5127		    ((uint64_t)satacmd->satacmd_lba_mid_lsb << 8) |
5128		    ((uint64_t)satacmd->satacmd_lba_high_lsb << 16) |
5129		    ((uint64_t)satacmd->satacmd_lba_low_msb << 24) |
5130		    ((uint64_t)satacmd->satacmd_lba_mid_msb << 32) |
5131		    ((uint64_t)satacmd->satacmd_lba_high_msb << 40);
5132#endif
5133
5134		/*
5135		 * timeout not needed if there is a polling thread
5136		 */
5137		if (spkt->satapkt_op_mode & SATA_OPMODE_POLLING) {
5138
5139			goto finished;
5140		}
5141
5142		if (TICK_TO_SEC(ddi_get_lbolt() - nv_slotp->nvslot_stime) >
5143		    spkt->satapkt_time) {
5144			NVLOG((NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp,
5145			    "abort timeout: "
5146			    "nvslot_stime: %ld max ticks till timeout: "
5147			    "%ld cur_time: %ld cmd=%x lba=%d",
5148			    nv_slotp->nvslot_stime, drv_usectohz(MICROSEC *
5149			    spkt->satapkt_time), ddi_get_lbolt(), cmd, lba));
5150
5151			(void) nv_abort_active(nvp, spkt, SATA_PKT_TIMEOUT);
5152
5153		} else {
5154			NVLOG((NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp, "nv_timeout:"
5155			    " still in use so restarting timeout"));
5156		}
5157		restart_timeout = B_TRUE;
5158
5159	} else {
5160		/*
5161		 * there was no active packet, so do not re-enable timeout
5162		 */
5163		NVLOG((NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp,
5164		    "nv_timeout: no active packet so not re-arming timeout"));
5165	}
5166
5167	finished:
5168
5169	if (restart_timeout == B_TRUE) {
5170		nvp->nvp_timeout_id = timeout(nv_timeout, (void *)nvp,
5171		    drv_usectohz(NV_ONE_SEC));
5172	} else {
5173		nvp->nvp_timeout_id = 0;
5174	}
5175	mutex_exit(&nvp->nvp_mutex);
5176}
5177
5178
5179/*
5180 * enable or disable the 3 interrupt types the driver is
5181 * interested in: completion, add and remove.
5182 */
5183static void
5184ck804_set_intr(nv_port_t *nvp, int flag)
5185{
5186	nv_ctl_t *nvc = nvp->nvp_ctlp;
5187	ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5];
5188	uchar_t *bar5  = nvc->nvc_bar_addr[5];
5189	uint8_t intr_bits[] = { CK804_INT_PDEV_HOT|CK804_INT_PDEV_INT,
5190	    CK804_INT_SDEV_HOT|CK804_INT_SDEV_INT };
5191	uint8_t clear_all_bits[] = { CK804_INT_PDEV_ALL, CK804_INT_SDEV_ALL };
5192	uint8_t int_en, port = nvp->nvp_port_num, intr_status;
5193
5194	if (flag & NV_INTR_DISABLE_NON_BLOCKING) {
5195		int_en = nv_get8(bar5_hdl,
5196		    (uint8_t *)(bar5 + CK804_SATA_INT_EN));
5197		int_en &= ~intr_bits[port];
5198		nv_put8(bar5_hdl, (uint8_t *)(bar5 + CK804_SATA_INT_EN),
5199		    int_en);
5200		return;
5201	}
5202
5203	ASSERT(mutex_owned(&nvp->nvp_mutex));
5204
5205	/*
5206	 * controller level lock also required since access to an 8-bit
5207	 * interrupt register is shared between both channels.
5208	 */
5209	mutex_enter(&nvc->nvc_mutex);
5210
5211	if (flag & NV_INTR_CLEAR_ALL) {
5212		NVLOG((NVDBG_INTR, nvc, nvp,
5213		    "ck804_set_intr: NV_INTR_CLEAR_ALL"));
5214
5215		intr_status = nv_get8(nvc->nvc_bar_hdl[5],
5216		    (uint8_t *)(nvc->nvc_ck804_int_status));
5217
5218		if (intr_status & clear_all_bits[port]) {
5219
5220			nv_put8(nvc->nvc_bar_hdl[5],
5221			    (uint8_t *)(nvc->nvc_ck804_int_status),
5222			    clear_all_bits[port]);
5223
5224			NVLOG((NVDBG_INTR, nvc, nvp,
5225			    "interrupt bits cleared %x",
5226			    intr_status & clear_all_bits[port]));
5227		}
5228	}
5229
5230	if (flag & NV_INTR_DISABLE) {
5231		NVLOG((NVDBG_INTR, nvc, nvp,
5232		    "ck804_set_intr: NV_INTR_DISABLE"));
5233		int_en = nv_get8(bar5_hdl,
5234		    (uint8_t *)(bar5 + CK804_SATA_INT_EN));
5235		int_en &= ~intr_bits[port];
5236		nv_put8(bar5_hdl, (uint8_t *)(bar5 + CK804_SATA_INT_EN),
5237		    int_en);
5238	}
5239
5240	if (flag & NV_INTR_ENABLE) {
5241		NVLOG((NVDBG_INTR, nvc, nvp, "ck804_set_intr: NV_INTR_ENABLE"));
5242		int_en = nv_get8(bar5_hdl,
5243		    (uint8_t *)(bar5 + CK804_SATA_INT_EN));
5244		int_en |= intr_bits[port];
5245		nv_put8(bar5_hdl, (uint8_t *)(bar5 + CK804_SATA_INT_EN),
5246		    int_en);
5247	}
5248
5249	mutex_exit(&nvc->nvc_mutex);
5250}
5251
5252
5253/*
5254 * enable or disable the 3 interrupts the driver is interested in:
5255 * completion interrupt, hot add, and hot remove interrupt.
5256 */
5257static void
5258mcp5x_set_intr(nv_port_t *nvp, int flag)
5259{
5260	nv_ctl_t *nvc = nvp->nvp_ctlp;
5261	ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5];
5262	uint16_t intr_bits =
5263	    MCP5X_INT_ADD|MCP5X_INT_REM|MCP5X_INT_COMPLETE;
5264	uint16_t int_en;
5265
5266	if (flag & NV_INTR_DISABLE_NON_BLOCKING) {
5267		int_en = nv_get16(bar5_hdl, nvp->nvp_mcp5x_int_ctl);
5268		int_en &= ~intr_bits;
5269		nv_put16(bar5_hdl, nvp->nvp_mcp5x_int_ctl, int_en);
5270		return;
5271	}
5272
5273	ASSERT(mutex_owned(&nvp->nvp_mutex));
5274
5275	NVLOG((NVDBG_HOT, nvc, nvp, "mcp055_set_intr: enter flag: %d", flag));
5276
5277	if (flag & NV_INTR_CLEAR_ALL) {
5278		NVLOG((NVDBG_INTR, nvc, nvp,
5279		    "mcp5x_set_intr: NV_INTR_CLEAR_ALL"));
5280		nv_put16(bar5_hdl, nvp->nvp_mcp5x_int_status, MCP5X_INT_CLEAR);
5281	}
5282
5283	if (flag & NV_INTR_ENABLE) {
5284		NVLOG((NVDBG_INTR, nvc, nvp, "mcp5x_set_intr: NV_INTR_ENABLE"));
5285		int_en = nv_get16(bar5_hdl, nvp->nvp_mcp5x_int_ctl);
5286		int_en |= intr_bits;
5287		nv_put16(bar5_hdl, nvp->nvp_mcp5x_int_ctl, int_en);
5288	}
5289
5290	if (flag & NV_INTR_DISABLE) {
5291		NVLOG((NVDBG_INTR, nvc, nvp,
5292		    "mcp5x_set_intr: NV_INTR_DISABLE"));
5293		int_en = nv_get16(bar5_hdl, nvp->nvp_mcp5x_int_ctl);
5294		int_en &= ~intr_bits;
5295		nv_put16(bar5_hdl, nvp->nvp_mcp5x_int_ctl, int_en);
5296	}
5297}
5298
5299
5300/*
5301 * The PM functions for suspend and resume are incomplete and need additional
5302 * work.  It may or may not work in the current state.
5303 */
5304static void
5305nv_resume(nv_port_t *nvp)
5306{
5307	NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp, "nv_resume()"));
5308
5309	mutex_enter(&nvp->nvp_mutex);
5310
5311	if (nvp->nvp_state & NV_PORT_INACTIVE) {
5312		mutex_exit(&nvp->nvp_mutex);
5313
5314		return;
5315	}
5316
5317#ifdef SGPIO_SUPPORT
5318	nv_sgp_drive_connect(nvp->nvp_ctlp, SGP_CTLR_PORT_TO_DRV(
5319	    nvp->nvp_ctlp->nvc_ctlr_num, nvp->nvp_port_num));
5320#endif
5321
5322	(*(nvp->nvp_ctlp->nvc_set_intr))(nvp, NV_INTR_CLEAR_ALL|NV_INTR_ENABLE);
5323
5324	/*
5325	 * power may have been removed to the port and the
5326	 * drive, and/or a drive may have been added or removed.
5327	 * Force a reset which will cause a probe and re-establish
5328	 * any state needed on the drive.
5329	 * nv_reset(nvp);
5330	 */
5331
5332	nv_reset(nvp);
5333
5334	mutex_exit(&nvp->nvp_mutex);
5335}
5336
5337/*
5338 * The PM functions for suspend and resume are incomplete and need additional
5339 * work.  It may or may not work in the current state.
5340 */
5341static void
5342nv_suspend(nv_port_t *nvp)
5343{
5344	NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp, "nv_suspend()"));
5345
5346	mutex_enter(&nvp->nvp_mutex);
5347
5348#ifdef SGPIO_SUPPORT
5349	nv_sgp_drive_disconnect(nvp->nvp_ctlp, SGP_CTLR_PORT_TO_DRV(
5350	    nvp->nvp_ctlp->nvc_ctlr_num, nvp->nvp_port_num));
5351#endif
5352
5353	if (nvp->nvp_state & NV_PORT_INACTIVE) {
5354		mutex_exit(&nvp->nvp_mutex);
5355
5356		return;
5357	}
5358
5359	(*(nvp->nvp_ctlp->nvc_set_intr))(nvp, NV_INTR_DISABLE);
5360
5361	/*
5362	 * power may have been removed to the port and the
5363	 * drive, and/or a drive may have been added or removed.
5364	 * Force a reset which will cause a probe and re-establish
5365	 * any state needed on the drive.
5366	 * nv_reset(nvp);
5367	 */
5368
5369	mutex_exit(&nvp->nvp_mutex);
5370}
5371
5372
5373static void
5374nv_copy_registers(nv_port_t *nvp, sata_device_t *sd, sata_pkt_t *spkt)
5375{
5376	ddi_acc_handle_t bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5];
5377	sata_cmd_t *scmd = &spkt->satapkt_cmd;
5378	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
5379	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
5380	uchar_t status;
5381	struct sata_cmd_flags flags;
5382
5383	NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp, "nv_copy_registers()"));
5384
5385	sd->satadev_scr.sstatus = nv_get32(bar5_hdl, nvp->nvp_sstatus);
5386	sd->satadev_scr.serror = nv_get32(bar5_hdl, nvp->nvp_serror);
5387	sd->satadev_scr.scontrol = nv_get32(bar5_hdl, nvp->nvp_sctrl);
5388
5389	if (spkt == NULL) {
5390
5391		return;
5392	}
5393
5394	/*
5395	 * in the error case, implicitly set the return of regs needed
5396	 * for error handling.
5397	 */
5398	status = scmd->satacmd_status_reg = nv_get8(ctlhdl,
5399	    nvp->nvp_altstatus);
5400
5401	flags = scmd->satacmd_flags;
5402
5403	if (status & SATA_STATUS_ERR) {
5404		flags.sata_copy_out_lba_low_msb = B_TRUE;
5405		flags.sata_copy_out_lba_mid_msb = B_TRUE;
5406		flags.sata_copy_out_lba_high_msb = B_TRUE;
5407		flags.sata_copy_out_lba_low_lsb = B_TRUE;
5408		flags.sata_copy_out_lba_mid_lsb = B_TRUE;
5409		flags.sata_copy_out_lba_high_lsb = B_TRUE;
5410		flags.sata_copy_out_error_reg = B_TRUE;
5411		flags.sata_copy_out_sec_count_msb = B_TRUE;
5412		flags.sata_copy_out_sec_count_lsb = B_TRUE;
5413		scmd->satacmd_status_reg = status;
5414	}
5415
5416	if (scmd->satacmd_addr_type & ATA_ADDR_LBA48) {
5417
5418		/*
5419		 * set HOB so that high byte will be read
5420		 */
5421		nv_put8(ctlhdl, nvp->nvp_devctl, ATDC_HOB|ATDC_D3);
5422
5423		/*
5424		 * get the requested high bytes
5425		 */
5426		if (flags.sata_copy_out_sec_count_msb) {
5427			scmd->satacmd_sec_count_msb =
5428			    nv_get8(cmdhdl, nvp->nvp_count);
5429		}
5430
5431		if (flags.sata_copy_out_lba_low_msb) {
5432			scmd->satacmd_lba_low_msb =
5433			    nv_get8(cmdhdl, nvp->nvp_sect);
5434		}
5435
5436		if (flags.sata_copy_out_lba_mid_msb) {
5437			scmd->satacmd_lba_mid_msb =
5438			    nv_get8(cmdhdl, nvp->nvp_lcyl);
5439		}
5440
5441		if (flags.sata_copy_out_lba_high_msb) {
5442			scmd->satacmd_lba_high_msb =
5443			    nv_get8(cmdhdl, nvp->nvp_hcyl);
5444		}
5445	}
5446
5447	/*
5448	 * disable HOB so that low byte is read
5449	 */
5450	nv_put8(ctlhdl, nvp->nvp_devctl, ATDC_D3);
5451
5452	/*
5453	 * get the requested low bytes
5454	 */
5455	if (flags.sata_copy_out_sec_count_lsb) {
5456		scmd->satacmd_sec_count_lsb = nv_get8(cmdhdl, nvp->nvp_count);
5457	}
5458
5459	if (flags.sata_copy_out_lba_low_lsb) {
5460		scmd->satacmd_lba_low_lsb = nv_get8(cmdhdl, nvp->nvp_sect);
5461	}
5462
5463	if (flags.sata_copy_out_lba_mid_lsb) {
5464		scmd->satacmd_lba_mid_lsb = nv_get8(cmdhdl, nvp->nvp_lcyl);
5465	}
5466
5467	if (flags.sata_copy_out_lba_high_lsb) {
5468		scmd->satacmd_lba_high_lsb = nv_get8(cmdhdl, nvp->nvp_hcyl);
5469	}
5470
5471	/*
5472	 * get the device register if requested
5473	 */
5474	if (flags.sata_copy_out_device_reg) {
5475		scmd->satacmd_device_reg =  nv_get8(cmdhdl, nvp->nvp_drvhd);
5476	}
5477
5478	/*
5479	 * get the error register if requested
5480	 */
5481	if (flags.sata_copy_out_error_reg) {
5482		scmd->satacmd_error_reg = nv_get8(cmdhdl, nvp->nvp_error);
5483	}
5484}
5485
5486
5487/*
5488 * Hot plug and remove interrupts can occur when the device is reset.  Just
5489 * masking the interrupt doesn't always work well because if a
5490 * different interrupt arrives on the other port, the driver can still
5491 * end up checking the state of the other port and discover the hot
5492 * interrupt flag is set even though it was masked.  Checking for recent
5493 * reset activity and then ignoring turns out to be the easiest way.
5494 */
5495static void
5496nv_report_add_remove(nv_port_t *nvp, int flags)
5497{
5498	ddi_acc_handle_t bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5];
5499	clock_t time_diff = ddi_get_lbolt() - nvp->nvp_reset_time;
5500	uint32_t sstatus;
5501	int i;
5502
5503	/*
5504	 * If reset within last 1 second ignore.  This should be
5505	 * reworked and improved instead of having this somewhat
5506	 * heavy handed clamping job.
5507	 */
5508	if (time_diff < drv_usectohz(NV_ONE_SEC)) {
5509		NVLOG((NVDBG_HOT, nvp->nvp_ctlp, nvp, "nv_report_add_remove()"
5510		    "ignoring plug interrupt was %dms ago",
5511		    TICK_TO_MSEC(time_diff)));
5512
5513		return;
5514	}
5515
5516	/*
5517	 * wait up to 1ms for sstatus to settle and reflect the true
5518	 * status of the port.  Failure to do so can create confusion
5519	 * in probe, where the incorrect sstatus value can still
5520	 * persist.
5521	 */
5522	for (i = 0; i < 1000; i++) {
5523		sstatus = nv_get32(bar5_hdl, nvp->nvp_sstatus);
5524
5525		if ((flags == NV_PORT_HOTREMOVED) &&
5526		    ((sstatus & SSTATUS_DET_DEVPRE_PHYCOM) !=
5527		    SSTATUS_DET_DEVPRE_PHYCOM)) {
5528			break;
5529		}
5530
5531		if ((flags != NV_PORT_HOTREMOVED) &&
5532		    ((sstatus & SSTATUS_DET_DEVPRE_PHYCOM) ==
5533		    SSTATUS_DET_DEVPRE_PHYCOM)) {
5534			break;
5535		}
5536		drv_usecwait(1);
5537	}
5538
5539	NVLOG((NVDBG_HOT, nvp->nvp_ctlp, nvp,
5540	    "sstatus took %i us for DEVPRE_PHYCOM to settle", i));
5541
5542	if (flags == NV_PORT_HOTREMOVED) {
5543		NVLOG((NVDBG_HOT, nvp->nvp_ctlp, nvp,
5544		    "nv_report_add_remove() hot removed"));
5545		nv_port_state_change(nvp,
5546		    SATA_EVNT_DEVICE_DETACHED,
5547		    SATA_ADDR_CPORT, 0);
5548
5549		nvp->nvp_state |= NV_PORT_HOTREMOVED;
5550	} else {
5551		NVLOG((NVDBG_HOT, nvp->nvp_ctlp, nvp,
5552		    "nv_report_add_remove() hot plugged"));
5553		nv_port_state_change(nvp, SATA_EVNT_DEVICE_ATTACHED,
5554		    SATA_ADDR_CPORT, 0);
5555	}
5556}
5557
5558/*
5559 * Get request sense data and stuff it the command's sense buffer.
5560 * Start a request sense command in order to get sense data to insert
5561 * in the sata packet's rqsense buffer.  The command completion
5562 * processing is in nv_intr_pkt_pio.
5563 *
5564 * The sata framework provides a function to allocate and set-up a
5565 * request sense packet command. The reasons it is not being used here is:
5566 * a) it cannot be called in an interrupt context and this function is
5567 *    called in an interrupt context.
5568 * b) it allocates DMA resources that are not used here because this is
5569 *    implemented using PIO.
5570 *
5571 * If, in the future, this is changed to use DMA, the sata framework should
5572 * be used to allocate and set-up the error retrieval (request sense)
5573 * command.
5574 */
5575static int
5576nv_start_rqsense_pio(nv_port_t *nvp, nv_slot_t *nv_slotp)
5577{
5578	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
5579	sata_cmd_t *satacmd = &spkt->satapkt_cmd;
5580	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
5581	int cdb_len = spkt->satapkt_cmd.satacmd_acdb_len;
5582
5583	NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
5584	    "nv_start_rqsense_pio: start"));
5585
5586	/* clear the local request sense buffer before starting the command */
5587	bzero(nv_slotp->nvslot_rqsense_buff, SATA_ATAPI_RQSENSE_LEN);
5588
5589	/* Write the request sense PACKET command */
5590
5591	/* select the drive */
5592	nv_put8(cmdhdl, nvp->nvp_drvhd, satacmd->satacmd_device_reg);
5593
5594	/* make certain the drive selected */
5595	if (nv_wait(nvp, SATA_STATUS_DRDY, SATA_STATUS_BSY,
5596	    NV_SEC2USEC(5), 0) == B_FALSE) {
5597		NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
5598		    "nv_start_rqsense_pio: drive select failed"));
5599		return (NV_FAILURE);
5600	}
5601
5602	/* set up the command */
5603	nv_put8(cmdhdl, nvp->nvp_feature, 0);	/* deassert DMA and OVL */
5604	nv_put8(cmdhdl, nvp->nvp_hcyl, SATA_ATAPI_MAX_BYTES_PER_DRQ >> 8);
5605	nv_put8(cmdhdl, nvp->nvp_lcyl, SATA_ATAPI_MAX_BYTES_PER_DRQ & 0xff);
5606	nv_put8(cmdhdl, nvp->nvp_sect, 0);
5607	nv_put8(cmdhdl, nvp->nvp_count, 0);	/* no tag */
5608
5609	/* initiate the command by writing the command register last */
5610	nv_put8(cmdhdl, nvp->nvp_cmd, SATAC_PACKET);
5611
5612	/* Give the host ctlr time to do its thing, according to ATA/ATAPI */
5613	NV_DELAY_NSEC(400);
5614
5615	/*
5616	 * Wait for the device to indicate that it is ready for the command
5617	 * ATAPI protocol state - HP0: Check_Status_A
5618	 */
5619
5620	if (nv_wait3(nvp, SATA_STATUS_DRQ, SATA_STATUS_BSY, /* okay */
5621	    SATA_STATUS_ERR, SATA_STATUS_BSY, /* cmd failed */
5622	    SATA_STATUS_DF, SATA_STATUS_BSY, /* drive failed */
5623	    4000000, 0) == B_FALSE) {
5624		if (nv_get8(cmdhdl, nvp->nvp_status) &
5625		    (SATA_STATUS_ERR | SATA_STATUS_DF)) {
5626			NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
5627			    "nv_start_rqsense_pio: rqsense dev error (HP0)"));
5628		} else {
5629			NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
5630			    "nv_start_rqsense_pio: rqsense timeout (HP0)"));
5631		}
5632
5633		nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
5634		nv_complete_io(nvp, spkt, 0);
5635		nv_reset(nvp);
5636
5637		return (NV_FAILURE);
5638	}
5639
5640	/*
5641	 * Put the ATAPI command in the data register
5642	 * ATAPI protocol state - HP1: Send_Packet
5643	 */
5644
5645	ddi_rep_put16(cmdhdl, (ushort_t *)nv_rqsense_cdb,
5646	    (ushort_t *)nvp->nvp_data,
5647	    (cdb_len >> 1), DDI_DEV_NO_AUTOINCR);
5648
5649	NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
5650	    "nv_start_rqsense_pio: exiting into HP3"));
5651
5652	return (NV_SUCCESS);
5653}
5654
5655/*
5656 * quiesce(9E) entry point.
5657 *
5658 * This function is called when the system is single-threaded at high
5659 * PIL with preemption disabled. Therefore, this function must not be
5660 * blocked.
5661 *
5662 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
5663 * DDI_FAILURE indicates an error condition and should almost never happen.
5664 */
5665static int
5666nv_quiesce(dev_info_t *dip)
5667{
5668	int port, instance = ddi_get_instance(dip);
5669	nv_ctl_t *nvc;
5670
5671	if ((nvc = (nv_ctl_t *)ddi_get_soft_state(nv_statep, instance)) == NULL)
5672		return (DDI_FAILURE);
5673
5674	for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
5675		nv_port_t *nvp = &(nvc->nvc_port[port]);
5676		ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
5677		ddi_acc_handle_t bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5];
5678		uint32_t sctrl;
5679
5680		/*
5681		 * Stop the controllers from generating interrupts.
5682		 */
5683		(*(nvc->nvc_set_intr))(nvp, NV_INTR_DISABLE_NON_BLOCKING);
5684
5685		/*
5686		 * clear signature registers
5687		 */
5688		nv_put8(cmdhdl, nvp->nvp_sect, 0);
5689		nv_put8(cmdhdl, nvp->nvp_lcyl, 0);
5690		nv_put8(cmdhdl, nvp->nvp_hcyl, 0);
5691		nv_put8(cmdhdl, nvp->nvp_count, 0);
5692
5693		nvp->nvp_signature = 0;
5694		nvp->nvp_type = 0;
5695		nvp->nvp_state |= NV_PORT_RESET;
5696		nvp->nvp_reset_time = ddi_get_lbolt();
5697		nvp->nvp_link_lost_time = 0;
5698
5699		/*
5700		 * assert reset in PHY by writing a 1 to bit 0 scontrol
5701		 */
5702		sctrl = nv_get32(bar5_hdl, nvp->nvp_sctrl);
5703
5704		nv_put32(bar5_hdl, nvp->nvp_sctrl,
5705		    sctrl | SCONTROL_DET_COMRESET);
5706
5707		/*
5708		 * wait 1ms
5709		 */
5710		drv_usecwait(1000);
5711
5712		/*
5713		 * de-assert reset in PHY
5714		 */
5715		nv_put32(bar5_hdl, nvp->nvp_sctrl, sctrl);
5716	}
5717
5718	return (DDI_SUCCESS);
5719}
5720
5721
5722#ifdef SGPIO_SUPPORT
5723/*
5724 * NVIDIA specific SGPIO LED support
5725 * Please refer to the NVIDIA documentation for additional details
5726 */
5727
5728/*
5729 * nv_sgp_led_init
5730 * Detect SGPIO support.  If present, initialize.
5731 */
5732static void
5733nv_sgp_led_init(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle)
5734{
5735	uint16_t csrp;		/* SGPIO_CSRP from PCI config space */
5736	uint32_t cbp;		/* SGPIO_CBP from PCI config space */
5737	nv_sgp_cmn_t *cmn;	/* shared data structure */
5738	char tqname[SGPIO_TQ_NAME_LEN];
5739	extern caddr_t psm_map_phys_new(paddr_t, size_t, int);
5740
5741	/*
5742	 * Initialize with appropriately invalid values in case this function
5743	 * exits without initializing SGPIO (for example, there is no SGPIO
5744	 * support).
5745	 */
5746	nvc->nvc_sgp_csr = 0;
5747	nvc->nvc_sgp_cbp = NULL;
5748
5749	/*
5750	 * Only try to initialize SGPIO LED support if this property
5751	 * indicates it should be.
5752	 */
5753	if (ddi_getprop(DDI_DEV_T_ANY, nvc->nvc_dip, DDI_PROP_DONTPASS,
5754	    "enable-sgpio-leds", 0) != 1)
5755		return;
5756
5757	/*
5758	 * CK804 can pass the sgpio_detect test even though it does not support
5759	 * SGPIO, so don't even look at a CK804.
5760	 */
5761	if (nvc->nvc_mcp5x_flag != B_TRUE)
5762		return;
5763
5764	/*
5765	 * The NVIDIA SGPIO support can nominally handle 6 drives.
5766	 * However, the current implementation only supports 4 drives.
5767	 * With two drives per controller, that means only look at the
5768	 * first two controllers.
5769	 */
5770	if ((nvc->nvc_ctlr_num != 0) && (nvc->nvc_ctlr_num != 1))
5771		return;
5772
5773	/* confirm that the SGPIO registers are there */
5774	if (nv_sgp_detect(pci_conf_handle, &csrp, &cbp) != NV_SUCCESS) {
5775		NVLOG((NVDBG_INIT, nvc, NULL,
5776		    "SGPIO registers not detected"));
5777		return;
5778	}
5779
5780	/* save off the SGPIO_CSR I/O address */
5781	nvc->nvc_sgp_csr = csrp;
5782
5783	/* map in Command Block */
5784	nvc->nvc_sgp_cbp = (nv_sgp_cb_t *)psm_map_phys_new(cbp,
5785	    sizeof (nv_sgp_cb_t), PROT_READ | PROT_WRITE);
5786
5787	/* initialize the SGPIO h/w */
5788	if (nv_sgp_init(nvc) == NV_FAILURE) {
5789		nv_cmn_err(CE_WARN, nvc, NULL,
5790		    "!Unable to initialize SGPIO");
5791	}
5792
5793	if (nvc->nvc_ctlr_num == 0) {
5794		/*
5795		 * Controller 0 on the MCP5X/IO55 initialized the SGPIO
5796		 * and the data that is shared between the controllers.
5797		 * The clever thing to do would be to let the first controller
5798		 * that comes up be the one that initializes all this.
5799		 * However, SGPIO state is not necessarily zeroed between
5800		 * between OS reboots, so there might be old data there.
5801		 */
5802
5803		/* allocate shared space */
5804		cmn = (nv_sgp_cmn_t *)kmem_zalloc(sizeof (nv_sgp_cmn_t),
5805		    KM_SLEEP);
5806		if (cmn == NULL) {
5807			nv_cmn_err(CE_WARN, nvc, NULL,
5808			    "!Failed to allocate shared data");
5809			return;
5810		}
5811
5812		nvc->nvc_sgp_cmn = cmn;
5813
5814		/* initialize the shared data structure */
5815		cmn->nvs_magic = SGPIO_MAGIC;
5816		cmn->nvs_in_use = (1 << nvc->nvc_ctlr_num);
5817		cmn->nvs_connected = 0;
5818		cmn->nvs_activity = 0;
5819
5820		mutex_init(&cmn->nvs_slock, NULL, MUTEX_DRIVER, NULL);
5821		mutex_init(&cmn->nvs_tlock, NULL, MUTEX_DRIVER, NULL);
5822		cv_init(&cmn->nvs_cv, NULL, CV_DRIVER, NULL);
5823
5824		/* put the address in the SGPIO scratch register */
5825#if defined(__amd64)
5826		nvc->nvc_sgp_cbp->sgpio_sr = (uint64_t)cmn;
5827#else
5828		nvc->nvc_sgp_cbp->sgpio_sr = (uint32_t)cmn;
5829#endif
5830
5831		/* start the activity LED taskq */
5832
5833		/*
5834		 * The taskq name should be unique and the time
5835		 */
5836		(void) snprintf(tqname, SGPIO_TQ_NAME_LEN,
5837		    "nvSataLed%x", (short)(ddi_get_lbolt() & 0xffff));
5838		cmn->nvs_taskq = ddi_taskq_create(nvc->nvc_dip, tqname, 1,
5839		    TASKQ_DEFAULTPRI, 0);
5840		if (cmn->nvs_taskq == NULL) {
5841			cmn->nvs_taskq_delay = 0;
5842			nv_cmn_err(CE_WARN, nvc, NULL,
5843			    "!Failed to start activity LED taskq");
5844		} else {
5845			cmn->nvs_taskq_delay = SGPIO_LOOP_WAIT_USECS;
5846			(void) ddi_taskq_dispatch(cmn->nvs_taskq,
5847			    nv_sgp_activity_led_ctl, nvc, DDI_SLEEP);
5848		}
5849
5850	} else if (nvc->nvc_ctlr_num == 1) {
5851		/*
5852		 * Controller 1 confirms that SGPIO has been initialized
5853		 * and, if so, try to get the shared data pointer, otherwise
5854		 * get the shared data pointer when accessing the data.
5855		 */
5856
5857		if (nvc->nvc_sgp_cbp->sgpio_sr != 0) {
5858			cmn = (nv_sgp_cmn_t *)nvc->nvc_sgp_cbp->sgpio_sr;
5859
5860			/*
5861			 * It looks like a pointer, but is it the shared data?
5862			 */
5863			if (cmn->nvs_magic == SGPIO_MAGIC) {
5864				nvc->nvc_sgp_cmn = cmn;
5865
5866				cmn->nvs_in_use |= (1 << nvc->nvc_ctlr_num);
5867			}
5868		}
5869	}
5870}
5871
5872/*
5873 * nv_sgp_detect
5874 * Read the SGPIO_CSR and SGPIO_CBP values from PCI config space and
5875 * report back whether both were readable.
5876 */
5877static int
5878nv_sgp_detect(ddi_acc_handle_t pci_conf_handle, uint16_t *csrpp,
5879    uint32_t *cbpp)
5880{
5881	/* get the SGPIO_CSRP */
5882	*csrpp = pci_config_get16(pci_conf_handle, SGPIO_CSRP);
5883	if (*csrpp == 0) {
5884		return (NV_FAILURE);
5885	}
5886
5887	/* SGPIO_CSRP is good, get the SGPIO_CBP */
5888	*cbpp = pci_config_get32(pci_conf_handle, SGPIO_CBP);
5889	if (*cbpp == 0) {
5890		return (NV_FAILURE);
5891	}
5892
5893	/* SGPIO_CBP is good, so we must support SGPIO */
5894	return (NV_SUCCESS);
5895}
5896
5897/*
5898 * nv_sgp_init
5899 * Initialize SGPIO.  The process is specified by NVIDIA.
5900 */
5901static int
5902nv_sgp_init(nv_ctl_t *nvc)
5903{
5904	uint32_t status;
5905	int drive_count;
5906
5907	/*
5908	 * if SGPIO status set to SGPIO_STATE_RESET, logic has been
5909	 * reset and needs to be initialized.
5910	 */
5911	status = nv_sgp_csr_read(nvc);
5912	if (SGPIO_CSR_SSTAT(status) == SGPIO_STATE_RESET) {
5913		if (nv_sgp_init_cmd(nvc) == NV_FAILURE) {
5914			/* reset and try again */
5915			nv_sgp_reset(nvc);
5916			if (nv_sgp_init_cmd(nvc) == NV_FAILURE) {
5917				NVLOG((NVDBG_ALWAYS, nvc, NULL,
5918				    "SGPIO init failed"));
5919				return (NV_FAILURE);
5920			}
5921		}
5922	}
5923
5924	/*
5925	 * NVIDIA recommends reading the supported drive count even
5926	 * though they also indicate that it is 4 at this time.
5927	 */
5928	drive_count = SGP_CR0_DRV_CNT(nvc->nvc_sgp_cbp->sgpio_cr0);
5929	if (drive_count != SGPIO_DRV_CNT_VALUE) {
5930		NVLOG((NVDBG_ALWAYS, nvc, NULL,
5931		    "SGPIO reported undocumented drive count - %d",
5932		    drive_count));
5933	}
5934
5935	NVLOG((NVDBG_INIT, nvc, NULL,
5936	    "initialized ctlr: %d csr: 0x%08x",
5937	    nvc->nvc_ctlr_num, nvc->nvc_sgp_csr));
5938
5939	return (NV_SUCCESS);
5940}
5941
5942static void
5943nv_sgp_reset(nv_ctl_t *nvc)
5944{
5945	uint32_t cmd;
5946	uint32_t status;
5947
5948	cmd = SGPIO_CMD_RESET;
5949	nv_sgp_csr_write(nvc, cmd);
5950
5951	status = nv_sgp_csr_read(nvc);
5952
5953	if (SGPIO_CSR_CSTAT(status) != SGPIO_CMD_OK) {
5954		NVLOG((NVDBG_ALWAYS, nvc, NULL,
5955		    "SGPIO reset failed: CSR - 0x%x", status));
5956	}
5957}
5958
5959static int
5960nv_sgp_init_cmd(nv_ctl_t *nvc)
5961{
5962	int seq;
5963	hrtime_t start, end;
5964	uint32_t status;
5965	uint32_t cmd;
5966
5967	/* get the old sequence value */
5968	status = nv_sgp_csr_read(nvc);
5969	seq = SGPIO_CSR_SEQ(status);
5970
5971	/* check the state since we have the info anyway */
5972	if (SGPIO_CSR_SSTAT(status) != SGPIO_STATE_OPERATIONAL) {
5973		NVLOG((NVDBG_ALWAYS, nvc, NULL,
5974		    "SGPIO init_cmd: state not operational"));
5975	}
5976
5977	/* issue command */
5978	cmd = SGPIO_CSR_CMD_SET(SGPIO_CMD_READ_PARAMS);
5979	nv_sgp_csr_write(nvc, cmd);
5980
5981	DTRACE_PROBE2(sgpio__cmd, int, cmd, int, status);
5982
5983	/* poll for completion */
5984	start = gethrtime();
5985	end = start + NV_SGP_CMD_TIMEOUT;
5986	for (;;) {
5987		status = nv_sgp_csr_read(nvc);
5988
5989		/* break on error */
5990		if (SGPIO_CSR_CSTAT(status) == SGPIO_CMD_ERROR)
5991			break;
5992
5993		/* break on command completion (seq changed) */
5994		if (SGPIO_CSR_SEQ(status) != seq) {
5995			if (SGPIO_CSR_CSTAT(status) == SGPIO_CMD_ACTIVE) {
5996				NVLOG((NVDBG_ALWAYS, nvc, NULL,
5997				    "Seq changed but command still active"));
5998			}
5999
6000			break;
6001		}
6002
6003		/* Wait 400 ns and try again */
6004		NV_DELAY_NSEC(400);
6005
6006		if (gethrtime() > end)
6007			break;
6008	}
6009
6010	if (SGPIO_CSR_CSTAT(status) == SGPIO_CMD_OK)
6011		return (NV_SUCCESS);
6012
6013	return (NV_FAILURE);
6014}
6015
6016static int
6017nv_sgp_check_set_cmn(nv_ctl_t *nvc)
6018{
6019	nv_sgp_cmn_t *cmn;
6020
6021	if (nvc->nvc_sgp_cbp == NULL)
6022		return (NV_FAILURE);
6023
6024	/* check to see if Scratch Register is set */
6025	if (nvc->nvc_sgp_cbp->sgpio_sr != 0) {
6026		nvc->nvc_sgp_cmn =
6027		    (nv_sgp_cmn_t *)nvc->nvc_sgp_cbp->sgpio_sr;
6028
6029		if (nvc->nvc_sgp_cmn->nvs_magic != SGPIO_MAGIC)
6030			return (NV_FAILURE);
6031
6032		cmn = nvc->nvc_sgp_cmn;
6033
6034		mutex_enter(&cmn->nvs_slock);
6035		cmn->nvs_in_use |= (1 << nvc->nvc_ctlr_num);
6036		mutex_exit(&cmn->nvs_slock);
6037
6038		return (NV_SUCCESS);
6039	}
6040
6041	return (NV_FAILURE);
6042}
6043
6044/*
6045 * nv_sgp_csr_read
6046 * This is just a 32-bit port read from the value that was obtained from the
6047 * PCI config space.
6048 *
6049 * XXX It was advised to use the in[bwl] function for this, even though they
6050 * are obsolete interfaces.
6051 */
6052static int
6053nv_sgp_csr_read(nv_ctl_t *nvc)
6054{
6055	return (inl(nvc->nvc_sgp_csr));
6056}
6057
6058/*
6059 * nv_sgp_csr_write
6060 * This is just a 32-bit I/O port write.  The port number was obtained from
6061 * the PCI config space.
6062 *
6063 * XXX It was advised to use the out[bwl] function for this, even though they
6064 * are obsolete interfaces.
6065 */
6066static void
6067nv_sgp_csr_write(nv_ctl_t *nvc, uint32_t val)
6068{
6069	outl(nvc->nvc_sgp_csr, val);
6070}
6071
6072/*
6073 * nv_sgp_write_data
6074 * Cause SGPIO to send Command Block data
6075 */
6076static int
6077nv_sgp_write_data(nv_ctl_t *nvc)
6078{
6079	hrtime_t start, end;
6080	uint32_t status;
6081	uint32_t cmd;
6082
6083	/* issue command */
6084	cmd = SGPIO_CSR_CMD_SET(SGPIO_CMD_WRITE_DATA);
6085	nv_sgp_csr_write(nvc, cmd);
6086
6087	/* poll for completion */
6088	start = gethrtime();
6089	end = start + NV_SGP_CMD_TIMEOUT;
6090	for (;;) {
6091		status = nv_sgp_csr_read(nvc);
6092
6093		/* break on error completion */
6094		if (SGPIO_CSR_CSTAT(status) == SGPIO_CMD_ERROR)
6095			break;
6096
6097		/* break on successful completion */
6098		if (SGPIO_CSR_CSTAT(status) == SGPIO_CMD_OK)
6099			break;
6100
6101		/* Wait 400 ns and try again */
6102		NV_DELAY_NSEC(400);
6103
6104		if (gethrtime() > end)
6105			break;
6106	}
6107
6108	if (SGPIO_CSR_CSTAT(status) == SGPIO_CMD_OK)
6109		return (NV_SUCCESS);
6110
6111	return (NV_FAILURE);
6112}
6113
6114/*
6115 * nv_sgp_activity_led_ctl
6116 * This is run as a taskq.  It wakes up at a fixed interval and checks to
6117 * see if any of the activity LEDs need to be changed.
6118 */
6119static void
6120nv_sgp_activity_led_ctl(void *arg)
6121{
6122	nv_ctl_t *nvc = (nv_ctl_t *)arg;
6123	nv_sgp_cmn_t *cmn;
6124	volatile nv_sgp_cb_t *cbp;
6125	clock_t ticks;
6126	uint8_t drv_leds;
6127	uint32_t old_leds;
6128	uint32_t new_led_state;
6129	int i;
6130
6131	cmn = nvc->nvc_sgp_cmn;
6132	cbp = nvc->nvc_sgp_cbp;
6133
6134	do {
6135		/* save off the old state of all of the LEDs */
6136		old_leds = cbp->sgpio0_tr;
6137
6138		DTRACE_PROBE3(sgpio__activity__state,
6139		    int, cmn->nvs_connected, int, cmn->nvs_activity,
6140		    int, old_leds);
6141
6142		new_led_state = 0;
6143
6144		/* for each drive */
6145		for (i = 0; i < SGPIO_DRV_CNT_VALUE; i++) {
6146
6147			/* get the current state of the LEDs for the drive */
6148			drv_leds = SGPIO0_TR_DRV(old_leds, i);
6149
6150			if ((cmn->nvs_connected & (1 << i)) == 0) {
6151				/* if not connected, turn off activity */
6152				drv_leds &= ~TR_ACTIVE_MASK;
6153				drv_leds |= TR_ACTIVE_SET(TR_ACTIVE_DISABLE);
6154
6155				new_led_state &= SGPIO0_TR_DRV_CLR(i);
6156				new_led_state |=
6157				    SGPIO0_TR_DRV_SET(drv_leds, i);
6158
6159				continue;
6160			}
6161
6162			if ((cmn->nvs_activity & (1 << i)) == 0) {
6163				/* connected, but not active */
6164				drv_leds &= ~TR_ACTIVE_MASK;
6165				drv_leds |= TR_ACTIVE_SET(TR_ACTIVE_ENABLE);
6166
6167				new_led_state &= SGPIO0_TR_DRV_CLR(i);
6168				new_led_state |=
6169				    SGPIO0_TR_DRV_SET(drv_leds, i);
6170
6171				continue;
6172			}
6173
6174			/* connected and active */
6175			if (TR_ACTIVE(drv_leds) == TR_ACTIVE_ENABLE) {
6176				/* was enabled, so disable */
6177				drv_leds &= ~TR_ACTIVE_MASK;
6178				drv_leds |=
6179				    TR_ACTIVE_SET(TR_ACTIVE_DISABLE);
6180
6181				new_led_state &= SGPIO0_TR_DRV_CLR(i);
6182				new_led_state |=
6183				    SGPIO0_TR_DRV_SET(drv_leds, i);
6184			} else {
6185				/* was disabled, so enable */
6186				drv_leds &= ~TR_ACTIVE_MASK;
6187				drv_leds |= TR_ACTIVE_SET(TR_ACTIVE_ENABLE);
6188
6189				new_led_state &= SGPIO0_TR_DRV_CLR(i);
6190				new_led_state |=
6191				    SGPIO0_TR_DRV_SET(drv_leds, i);
6192			}
6193
6194			/*
6195			 * clear the activity bit
6196			 * if there is drive activity again within the
6197			 * loop interval (now 1/16 second), nvs_activity
6198			 * will be reset and the "connected and active"
6199			 * condition above will cause the LED to blink
6200			 * off and on at the loop interval rate.  The
6201			 * rate may be increased (interval shortened) as
6202			 * long as it is not more than 1/30 second.
6203			 */
6204			mutex_enter(&cmn->nvs_slock);
6205			cmn->nvs_activity &= ~(1 << i);
6206			mutex_exit(&cmn->nvs_slock);
6207		}
6208
6209		DTRACE_PROBE1(sgpio__new__led__state, int, new_led_state);
6210
6211		/* write out LED values */
6212
6213		mutex_enter(&cmn->nvs_slock);
6214		cbp->sgpio0_tr &= ~TR_ACTIVE_MASK_ALL;
6215		cbp->sgpio0_tr |= new_led_state;
6216		cbp->sgpio_cr0 = SGP_CR0_ENABLE_MASK;
6217		mutex_exit(&cmn->nvs_slock);
6218
6219		if (nv_sgp_write_data(nvc) == NV_FAILURE) {
6220			NVLOG((NVDBG_VERBOSE, nvc, NULL,
6221			    "nv_sgp_write_data failure updating active LED"));
6222		}
6223
6224		/* now rest for the interval */
6225		mutex_enter(&cmn->nvs_tlock);
6226		ticks = drv_usectohz(cmn->nvs_taskq_delay);
6227		if (ticks > 0)
6228			(void) cv_timedwait(&cmn->nvs_cv, &cmn->nvs_tlock,
6229			    ddi_get_lbolt() + ticks);
6230		mutex_exit(&cmn->nvs_tlock);
6231	} while (ticks > 0);
6232}
6233
6234/*
6235 * nv_sgp_drive_connect
6236 * Set the flag used to indicate that the drive is attached to the HBA.
6237 * Used to let the taskq know that it should turn the Activity LED on.
6238 */
6239static void
6240nv_sgp_drive_connect(nv_ctl_t *nvc, int drive)
6241{
6242	nv_sgp_cmn_t *cmn;
6243
6244	if (nv_sgp_check_set_cmn(nvc) == NV_FAILURE)
6245		return;
6246	cmn = nvc->nvc_sgp_cmn;
6247
6248	mutex_enter(&cmn->nvs_slock);
6249	cmn->nvs_connected |= (1 << drive);
6250	mutex_exit(&cmn->nvs_slock);
6251}
6252
6253/*
6254 * nv_sgp_drive_disconnect
6255 * Clears the flag used to indicate that the drive is no longer attached
6256 * to the HBA.  Used to let the taskq know that it should turn the
6257 * Activity LED off.  The flag that indicates that the drive is in use is
6258 * also cleared.
6259 */
6260static void
6261nv_sgp_drive_disconnect(nv_ctl_t *nvc, int drive)
6262{
6263	nv_sgp_cmn_t *cmn;
6264
6265	if (nv_sgp_check_set_cmn(nvc) == NV_FAILURE)
6266		return;
6267	cmn = nvc->nvc_sgp_cmn;
6268
6269	mutex_enter(&cmn->nvs_slock);
6270	cmn->nvs_connected &= ~(1 << drive);
6271	cmn->nvs_activity &= ~(1 << drive);
6272	mutex_exit(&cmn->nvs_slock);
6273}
6274
6275/*
6276 * nv_sgp_drive_active
6277 * Sets the flag used to indicate that the drive has been accessed and the
6278 * LED should be flicked off, then on.  It is cleared at a fixed time
6279 * interval by the LED taskq and set by the sata command start.
6280 */
6281static void
6282nv_sgp_drive_active(nv_ctl_t *nvc, int drive)
6283{
6284	nv_sgp_cmn_t *cmn;
6285
6286	if (nv_sgp_check_set_cmn(nvc) == NV_FAILURE)
6287		return;
6288	cmn = nvc->nvc_sgp_cmn;
6289
6290	DTRACE_PROBE1(sgpio__active, int, drive);
6291
6292	mutex_enter(&cmn->nvs_slock);
6293	cmn->nvs_connected |= (1 << drive);
6294	cmn->nvs_activity |= (1 << drive);
6295	mutex_exit(&cmn->nvs_slock);
6296}
6297
6298
6299/*
6300 * nv_sgp_locate
6301 * Turns the Locate/OK2RM LED off or on for a particular drive.  State is
6302 * maintained in the SGPIO Command Block.
6303 */
6304static void
6305nv_sgp_locate(nv_ctl_t *nvc, int drive, int value)
6306{
6307	uint8_t leds;
6308	volatile nv_sgp_cb_t *cb = nvc->nvc_sgp_cbp;
6309	nv_sgp_cmn_t *cmn;
6310
6311	if (nv_sgp_check_set_cmn(nvc) == NV_FAILURE)
6312		return;
6313	cmn = nvc->nvc_sgp_cmn;
6314
6315	if ((drive < 0) || (drive >= SGPIO_DRV_CNT_VALUE))
6316		return;
6317
6318	DTRACE_PROBE2(sgpio__locate, int, drive, int, value);
6319
6320	mutex_enter(&cmn->nvs_slock);
6321
6322	leds = SGPIO0_TR_DRV(cb->sgpio0_tr, drive);
6323
6324	leds &= ~TR_LOCATE_MASK;
6325	leds |= TR_LOCATE_SET(value);
6326
6327	cb->sgpio0_tr &= SGPIO0_TR_DRV_CLR(drive);
6328	cb->sgpio0_tr |= SGPIO0_TR_DRV_SET(leds, drive);
6329
6330	cb->sgpio_cr0 = SGP_CR0_ENABLE_MASK;
6331
6332	mutex_exit(&cmn->nvs_slock);
6333
6334	if (nv_sgp_write_data(nvc) == NV_FAILURE) {
6335		nv_cmn_err(CE_WARN, nvc, NULL,
6336		    "!nv_sgp_write_data failure updating OK2RM/Locate LED");
6337	}
6338}
6339
6340/*
6341 * nv_sgp_error
6342 * Turns the Error/Failure LED off or on for a particular drive.  State is
6343 * maintained in the SGPIO Command Block.
6344 */
6345static void
6346nv_sgp_error(nv_ctl_t *nvc, int drive, int value)
6347{
6348	uint8_t leds;
6349	volatile nv_sgp_cb_t *cb = nvc->nvc_sgp_cbp;
6350	nv_sgp_cmn_t *cmn;
6351
6352	if (nv_sgp_check_set_cmn(nvc) == NV_FAILURE)
6353		return;
6354	cmn = nvc->nvc_sgp_cmn;
6355
6356	if ((drive < 0) || (drive >= SGPIO_DRV_CNT_VALUE))
6357		return;
6358
6359	DTRACE_PROBE2(sgpio__error, int, drive, int, value);
6360
6361	mutex_enter(&cmn->nvs_slock);
6362
6363	leds = SGPIO0_TR_DRV(cb->sgpio0_tr, drive);
6364
6365	leds &= ~TR_ERROR_MASK;
6366	leds |= TR_ERROR_SET(value);
6367
6368	cb->sgpio0_tr &= SGPIO0_TR_DRV_CLR(drive);
6369	cb->sgpio0_tr |= SGPIO0_TR_DRV_SET(leds, drive);
6370
6371	cb->sgpio_cr0 = SGP_CR0_ENABLE_MASK;
6372
6373	mutex_exit(&cmn->nvs_slock);
6374
6375	if (nv_sgp_write_data(nvc) == NV_FAILURE) {
6376		nv_cmn_err(CE_WARN, nvc, NULL,
6377		    "!nv_sgp_write_data failure updating Fail/Error LED");
6378	}
6379}
6380
6381static void
6382nv_sgp_cleanup(nv_ctl_t *nvc)
6383{
6384	int drive;
6385	uint8_t drv_leds;
6386	uint32_t led_state;
6387	volatile nv_sgp_cb_t *cb = nvc->nvc_sgp_cbp;
6388	nv_sgp_cmn_t *cmn = nvc->nvc_sgp_cmn;
6389	extern void psm_unmap_phys(caddr_t, size_t);
6390
6391	/*
6392	 * If the SGPIO command block isn't mapped or the shared data
6393	 * structure isn't present in this instance, there isn't much that
6394	 * can be cleaned up.
6395	 */
6396	if ((cb == NULL) || (cmn == NULL))
6397		return;
6398
6399	/* turn off activity LEDs for this controller */
6400	drv_leds = TR_ACTIVE_SET(TR_ACTIVE_DISABLE);
6401
6402	/* get the existing LED state */
6403	led_state = cb->sgpio0_tr;
6404
6405	/* turn off port 0 */
6406	drive = SGP_CTLR_PORT_TO_DRV(nvc->nvc_ctlr_num, 0);
6407	led_state &= SGPIO0_TR_DRV_CLR(drive);
6408	led_state |= SGPIO0_TR_DRV_SET(drv_leds, drive);
6409
6410	/* turn off port 1 */
6411	drive = SGP_CTLR_PORT_TO_DRV(nvc->nvc_ctlr_num, 1);
6412	led_state &= SGPIO0_TR_DRV_CLR(drive);
6413	led_state |= SGPIO0_TR_DRV_SET(drv_leds, drive);
6414
6415	/* set the new led state, which should turn off this ctrl's LEDs */
6416	cb->sgpio_cr0 = SGP_CR0_ENABLE_MASK;
6417	(void) nv_sgp_write_data(nvc);
6418
6419	/* clear the controller's in use bit */
6420	mutex_enter(&cmn->nvs_slock);
6421	cmn->nvs_in_use &= ~(1 << nvc->nvc_ctlr_num);
6422	mutex_exit(&cmn->nvs_slock);
6423
6424	if (cmn->nvs_in_use == 0) {
6425		/* if all "in use" bits cleared, take everything down */
6426
6427		if (cmn->nvs_taskq != NULL) {
6428			/* allow activity taskq to exit */
6429			cmn->nvs_taskq_delay = 0;
6430			cv_broadcast(&cmn->nvs_cv);
6431
6432			/* then destroy it */
6433			ddi_taskq_destroy(cmn->nvs_taskq);
6434		}
6435
6436		/* turn off all of the LEDs */
6437		cb->sgpio0_tr = 0;
6438		cb->sgpio_cr0 = SGP_CR0_ENABLE_MASK;
6439		(void) nv_sgp_write_data(nvc);
6440
6441		cb->sgpio_sr = NULL;
6442
6443		/* free resources */
6444		cv_destroy(&cmn->nvs_cv);
6445		mutex_destroy(&cmn->nvs_tlock);
6446		mutex_destroy(&cmn->nvs_slock);
6447
6448		kmem_free(nvc->nvc_sgp_cmn, sizeof (nv_sgp_cmn_t));
6449	}
6450
6451	nvc->nvc_sgp_cmn = NULL;
6452
6453	/* unmap the SGPIO Command Block */
6454	psm_unmap_phys((caddr_t)nvc->nvc_sgp_cbp, sizeof (nv_sgp_cb_t));
6455}
6456#endif	/* SGPIO_SUPPORT */
6457