nv_sata.c revision 7564:34c3bf16874f
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22/*
23 * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
24 * Use is subject to license terms.
25 */
26
27/*
28 *
29 * nv_sata is a combo SATA HBA driver for ck804/mcp55 based chipsets.
30 *
31 * NCQ
32 * ---
33 *
34 * A portion of the NCQ is in place, but is incomplete.  NCQ is disabled
35 * and is likely to be revisited in the future.
36 *
37 *
38 * Power Management
39 * ----------------
40 *
41 * Normally power management would be responsible for ensuring the device
42 * is quiescent and then changing power states to the device, such as
43 * powering down parts or all of the device.  mcp55/ck804 is unique in
44 * that it is only available as part of a larger southbridge chipset, so
45 * removing power to the device isn't possible.  Switches to control
46 * power management states D0/D3 in the PCI configuration space appear to
47 * be supported but changes to these states are apparently are ignored.
48 * The only further PM that the driver _could_ do is shut down the PHY,
49 * but in order to deliver the first rev of the driver sooner than later,
50 * that will be deferred until some future phase.
51 *
52 * Since the driver currently will not directly change any power state to
53 * the device, no power() entry point will be required.  However, it is
54 * possible that in ACPI power state S3, aka suspend to RAM, that power
55 * can be removed to the device, and the driver cannot rely on BIOS to
56 * have reset any state.  For the time being, there is no known
57 * non-default configurations that need to be programmed.  This judgement
58 * is based on the port of the legacy ata driver not having any such
59 * functionality and based on conversations with the PM team.  If such a
60 * restoration is later deemed necessary it can be incorporated into the
61 * DDI_RESUME processing.
62 *
63 */
64
65#include <sys/scsi/scsi.h>
66#include <sys/pci.h>
67#include <sys/byteorder.h>
68#include <sys/sunddi.h>
69#include <sys/sata/sata_hba.h>
70#ifdef SGPIO_SUPPORT
71#include <sys/sata/adapters/nv_sata/nv_sgpio.h>
72#include <sys/devctl.h>
73#include <sys/sdt.h>
74#endif
75#include <sys/sata/adapters/nv_sata/nv_sata.h>
76#include <sys/disp.h>
77#include <sys/note.h>
78#include <sys/promif.h>
79
80
81/*
82 * Function prototypes for driver entry points
83 */
84static int nv_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
85static int nv_detach(dev_info_t *dip, ddi_detach_cmd_t cmd);
86static int nv_getinfo(dev_info_t *dip, ddi_info_cmd_t infocmd,
87    void *arg, void **result);
88
89/*
90 * Function prototypes for entry points from sata service module
91 * These functions are distinguished from other local functions
92 * by the prefix "nv_sata_"
93 */
94static int nv_sata_start(dev_info_t *dip, sata_pkt_t *spkt);
95static int nv_sata_abort(dev_info_t *dip, sata_pkt_t *spkt, int);
96static int nv_sata_reset(dev_info_t *dip, sata_device_t *sd);
97static int nv_sata_activate(dev_info_t *dip, sata_device_t *sd);
98static int nv_sata_deactivate(dev_info_t *dip, sata_device_t *sd);
99
100/*
101 * Local function prototypes
102 */
103static uint_t mcp55_intr(caddr_t arg1, caddr_t arg2);
104static uint_t mcp04_intr(caddr_t arg1, caddr_t arg2);
105static int nv_add_legacy_intrs(nv_ctl_t *nvc);
106#ifdef NV_MSI_SUPPORTED
107static int nv_add_msi_intrs(nv_ctl_t *nvc);
108#endif
109static void nv_rem_intrs(nv_ctl_t *nvc);
110static int nv_start_common(nv_port_t *nvp, sata_pkt_t *spkt);
111static int nv_start_nodata(nv_port_t *nvp, int slot);
112static void nv_intr_nodata(nv_port_t *nvp, nv_slot_t *spkt);
113static int nv_start_pio_in(nv_port_t *nvp, int slot);
114static int nv_start_pio_out(nv_port_t *nvp, int slot);
115static void nv_intr_pio_in(nv_port_t *nvp, nv_slot_t *spkt);
116static void nv_intr_pio_out(nv_port_t *nvp, nv_slot_t *spkt);
117static int nv_start_pkt_pio(nv_port_t *nvp, int slot);
118static void nv_intr_pkt_pio(nv_port_t *nvp, nv_slot_t *nv_slotp);
119static int nv_start_dma(nv_port_t *nvp, int slot);
120static void nv_intr_dma(nv_port_t *nvp, struct nv_slot *spkt);
121static void nv_log(uint_t flag, nv_ctl_t *nvc, nv_port_t *nvp, char *fmt, ...);
122static void nv_uninit_ctl(nv_ctl_t *nvc);
123static void mcp55_reg_init(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle);
124static void mcp04_reg_init(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle);
125static void nv_uninit_port(nv_port_t *nvp);
126static int nv_init_port(nv_port_t *nvp);
127static int nv_init_ctl(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle);
128static int mcp55_packet_complete_intr(nv_ctl_t *nvc, nv_port_t *nvp);
129#ifdef NCQ
130static int mcp55_dma_setup_intr(nv_ctl_t *nvc, nv_port_t *nvp);
131#endif
132static void nv_start_dma_engine(nv_port_t *nvp, int slot);
133static void nv_port_state_change(nv_port_t *nvp, int event, uint8_t addr_type,
134    int state);
135static boolean_t nv_check_link(uint32_t sstatus);
136static void nv_common_reg_init(nv_ctl_t *nvc);
137static void mcp04_intr_process(nv_ctl_t *nvc, uint8_t intr_status);
138static void nv_reset(nv_port_t *nvp);
139static void nv_complete_io(nv_port_t *nvp,  sata_pkt_t *spkt, int slot);
140static void nv_timeout(void *);
141static int nv_poll_wait(nv_port_t *nvp, sata_pkt_t *spkt);
142static void nv_cmn_err(int ce, nv_ctl_t *nvc, nv_port_t *nvp, char *fmt, ...);
143static void nv_read_signature(nv_port_t *nvp);
144static void mcp55_set_intr(nv_port_t *nvp, int flag);
145static void mcp04_set_intr(nv_port_t *nvp, int flag);
146static void nv_resume(nv_port_t *nvp);
147static void nv_suspend(nv_port_t *nvp);
148static int nv_start_sync(nv_port_t *nvp, sata_pkt_t *spkt);
149static int nv_abort_active(nv_port_t *nvp, sata_pkt_t *spkt, int abort_reason);
150static void nv_copy_registers(nv_port_t *nvp, sata_device_t *sd,
151    sata_pkt_t *spkt);
152static void nv_report_add_remove(nv_port_t *nvp, int flags);
153static int nv_start_async(nv_port_t *nvp, sata_pkt_t *spkt);
154static int nv_wait3(nv_port_t *nvp, uchar_t onbits1, uchar_t offbits1,
155    uchar_t failure_onbits2, uchar_t failure_offbits2,
156    uchar_t failure_onbits3, uchar_t failure_offbits3,
157    uint_t timeout_usec, int type_wait);
158static int nv_wait(nv_port_t *nvp, uchar_t onbits, uchar_t offbits,
159    uint_t timeout_usec, int type_wait);
160static int nv_start_rqsense_pio(nv_port_t *nvp, nv_slot_t *nv_slotp);
161
162#ifdef SGPIO_SUPPORT
163static int nv_open(dev_t *devp, int flag, int otyp, cred_t *credp);
164static int nv_close(dev_t dev, int flag, int otyp, cred_t *credp);
165static int nv_ioctl(dev_t dev, int cmd, intptr_t arg, int mode,
166    cred_t *credp, int *rvalp);
167
168static void nv_sgp_led_init(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle);
169static int nv_sgp_detect(ddi_acc_handle_t pci_conf_handle, uint16_t *csrpp,
170    uint32_t *cbpp);
171static int nv_sgp_init(nv_ctl_t *nvc);
172static void nv_sgp_reset(nv_ctl_t *nvc);
173static int nv_sgp_init_cmd(nv_ctl_t *nvc);
174static int nv_sgp_check_set_cmn(nv_ctl_t *nvc);
175static int nv_sgp_csr_read(nv_ctl_t *nvc);
176static void nv_sgp_csr_write(nv_ctl_t *nvc, uint32_t val);
177static int nv_sgp_write_data(nv_ctl_t *nvc);
178static void nv_sgp_activity_led_ctl(void *arg);
179static void nv_sgp_drive_connect(nv_ctl_t *nvc, int drive);
180static void nv_sgp_drive_disconnect(nv_ctl_t *nvc, int drive);
181static void nv_sgp_drive_active(nv_ctl_t *nvc, int drive);
182static void nv_sgp_locate(nv_ctl_t *nvc, int drive, int value);
183static void nv_sgp_error(nv_ctl_t *nvc, int drive, int value);
184static void nv_sgp_cleanup(nv_ctl_t *nvc);
185#endif
186
187
188/*
189 * DMA attributes for the data buffer for x86.  dma_attr_burstsizes is unused.
190 * Verify if needed if ported to other ISA.
191 */
192static ddi_dma_attr_t buffer_dma_attr = {
193	DMA_ATTR_V0,		/* dma_attr_version */
194	0,			/* dma_attr_addr_lo: lowest bus address */
195	0xffffffffull,		/* dma_attr_addr_hi: */
196	NV_BM_64K_BOUNDARY - 1,	/* dma_attr_count_max i.e for one cookie */
197	4,			/* dma_attr_align */
198	1,			/* dma_attr_burstsizes. */
199	1,			/* dma_attr_minxfer */
200	0xffffffffull,		/* dma_attr_max xfer including all cookies */
201	0xffffffffull,		/* dma_attr_seg */
202	NV_DMA_NSEGS,		/* dma_attr_sgllen */
203	512,			/* dma_attr_granular */
204	0,			/* dma_attr_flags */
205};
206
207
208/*
209 * DMA attributes for PRD tables
210 */
211ddi_dma_attr_t nv_prd_dma_attr = {
212	DMA_ATTR_V0,		/* dma_attr_version */
213	0,			/* dma_attr_addr_lo */
214	0xffffffffull,		/* dma_attr_addr_hi */
215	NV_BM_64K_BOUNDARY - 1,	/* dma_attr_count_max */
216	4,			/* dma_attr_align */
217	1,			/* dma_attr_burstsizes */
218	1,			/* dma_attr_minxfer */
219	NV_BM_64K_BOUNDARY,	/* dma_attr_maxxfer */
220	NV_BM_64K_BOUNDARY - 1,	/* dma_attr_seg */
221	1,			/* dma_attr_sgllen */
222	1,			/* dma_attr_granular */
223	0			/* dma_attr_flags */
224};
225
226/*
227 * Device access attributes
228 */
229static ddi_device_acc_attr_t accattr = {
230    DDI_DEVICE_ATTR_V0,
231    DDI_STRUCTURE_LE_ACC,
232    DDI_STRICTORDER_ACC
233};
234
235
236#ifdef SGPIO_SUPPORT
237static struct cb_ops nv_cb_ops = {
238	nv_open,		/* open */
239	nv_close,		/* close */
240	nodev,			/* strategy (block) */
241	nodev,			/* print (block) */
242	nodev,			/* dump (block) */
243	nodev,			/* read */
244	nodev,			/* write */
245	nv_ioctl,		/* ioctl */
246	nodev,			/* devmap */
247	nodev,			/* mmap */
248	nodev,			/* segmap */
249	nochpoll,		/* chpoll */
250	ddi_prop_op,		/* prop_op */
251	NULL,			/* streams */
252	D_NEW | D_MP |
253	D_64BIT | D_HOTPLUG,	/* flags */
254	CB_REV			/* rev */
255};
256#endif  /* SGPIO_SUPPORT */
257
258
259static struct dev_ops nv_dev_ops = {
260	DEVO_REV,		/* devo_rev */
261	0,			/* refcnt  */
262	nv_getinfo,		/* info */
263	nulldev,		/* identify */
264	nulldev,		/* probe */
265	nv_attach,		/* attach */
266	nv_detach,		/* detach */
267	nodev,			/* no reset */
268#ifdef SGPIO_SUPPORT
269	&nv_cb_ops,		/* driver operations */
270#else
271	(struct cb_ops *)0,	/* driver operations */
272#endif
273	NULL,			/* bus operations */
274	NULL			/* power */
275};
276
277
278/*
279 * Request Sense CDB for ATAPI
280 */
281static const uint8_t nv_rqsense_cdb[16] = {
282	SCMD_REQUEST_SENSE,
283	0,
284	0,
285	0,
286	SATA_ATAPI_MIN_RQSENSE_LEN,
287	0,
288	0, 0, 0, 0, 0, 0, 0, 0, 0, 0	/* pad out to max CDB length */
289};
290
291
292static sata_tran_hotplug_ops_t nv_hotplug_ops;
293
294extern struct mod_ops mod_driverops;
295
296static  struct modldrv modldrv = {
297	&mod_driverops,	/* driverops */
298	"Nvidia ck804/mcp55 HBA",
299	&nv_dev_ops,	/* driver ops */
300};
301
302static  struct modlinkage modlinkage = {
303	MODREV_1,
304	&modldrv,
305	NULL
306};
307
308
309/*
310 * wait between checks of reg status
311 */
312int nv_usec_delay = NV_WAIT_REG_CHECK;
313
314/*
315 * The following is needed for nv_vcmn_err()
316 */
317static kmutex_t nv_log_mutex; /* protects nv_log_buf */
318static char nv_log_buf[NV_STRING_512];
319int nv_debug_flags = NVDBG_ALWAYS;
320int nv_log_to_console = B_FALSE;
321
322int nv_log_delay = 0;
323int nv_prom_print = B_FALSE;
324
325/*
326 * for debugging
327 */
328#ifdef DEBUG
329int ncq_commands = 0;
330int non_ncq_commands = 0;
331#endif
332
333/*
334 * Opaque state pointer to be initialized by ddi_soft_state_init()
335 */
336static void *nv_statep	= NULL;
337
338
339static sata_tran_hotplug_ops_t nv_hotplug_ops = {
340	SATA_TRAN_HOTPLUG_OPS_REV_1,	/* structure version */
341	nv_sata_activate,	/* activate port. cfgadm -c connect */
342	nv_sata_deactivate	/* deactivate port. cfgadm -c disconnect */
343};
344
345
346/*
347 *  nv module initialization
348 */
349int
350_init(void)
351{
352	int	error;
353
354	error = ddi_soft_state_init(&nv_statep, sizeof (nv_ctl_t), 0);
355
356	if (error != 0) {
357
358		return (error);
359	}
360
361	mutex_init(&nv_log_mutex, NULL, MUTEX_DRIVER, NULL);
362
363	if ((error = sata_hba_init(&modlinkage)) != 0) {
364		ddi_soft_state_fini(&nv_statep);
365		mutex_destroy(&nv_log_mutex);
366
367		return (error);
368	}
369
370	error = mod_install(&modlinkage);
371	if (error != 0) {
372		sata_hba_fini(&modlinkage);
373		ddi_soft_state_fini(&nv_statep);
374		mutex_destroy(&nv_log_mutex);
375
376		return (error);
377	}
378
379	return (error);
380}
381
382
383/*
384 * nv module uninitialize
385 */
386int
387_fini(void)
388{
389	int	error;
390
391	error = mod_remove(&modlinkage);
392
393	if (error != 0) {
394		return (error);
395	}
396
397	/*
398	 * remove the resources allocated in _init()
399	 */
400	mutex_destroy(&nv_log_mutex);
401	sata_hba_fini(&modlinkage);
402	ddi_soft_state_fini(&nv_statep);
403
404	return (error);
405}
406
407
408/*
409 * nv _info entry point
410 */
411int
412_info(struct modinfo *modinfop)
413{
414	return (mod_info(&modlinkage, modinfop));
415}
416
417
418/*
419 * these wrappers for ddi_{get,put}8 are for observability
420 * with dtrace
421 */
422#ifdef DEBUG
423
424static void
425nv_put8(ddi_acc_handle_t handle, uint8_t *dev_addr, uint8_t value)
426{
427	ddi_put8(handle, dev_addr, value);
428}
429
430static void
431nv_put32(ddi_acc_handle_t handle, uint32_t *dev_addr, uint32_t value)
432{
433	ddi_put32(handle, dev_addr, value);
434}
435
436static uint32_t
437nv_get32(ddi_acc_handle_t handle, uint32_t *dev_addr)
438{
439	return (ddi_get32(handle, dev_addr));
440}
441
442static void
443nv_put16(ddi_acc_handle_t handle, uint16_t *dev_addr, uint16_t value)
444{
445	ddi_put16(handle, dev_addr, value);
446}
447
448static uint16_t
449nv_get16(ddi_acc_handle_t handle, uint16_t *dev_addr)
450{
451	return (ddi_get16(handle, dev_addr));
452}
453
454static uint8_t
455nv_get8(ddi_acc_handle_t handle, uint8_t *dev_addr)
456{
457	return (ddi_get8(handle, dev_addr));
458}
459
460#else
461
462#define	nv_put8 ddi_put8
463#define	nv_put32 ddi_put32
464#define	nv_get32 ddi_get32
465#define	nv_put16 ddi_put16
466#define	nv_get16 ddi_get16
467#define	nv_get8 ddi_get8
468
469#endif
470
471
472/*
473 * Driver attach
474 */
475static int
476nv_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
477{
478	int status, attach_state, intr_types, bar, i, command;
479	int inst = ddi_get_instance(dip);
480	ddi_acc_handle_t pci_conf_handle;
481	nv_ctl_t *nvc;
482	uint8_t subclass;
483	uint32_t reg32;
484#ifdef SGPIO_SUPPORT
485	pci_regspec_t *regs;
486	int rlen;
487#endif
488
489	switch (cmd) {
490
491	case DDI_ATTACH:
492
493		NVLOG((NVDBG_INIT, NULL, NULL,
494		    "nv_attach(): DDI_ATTACH inst %d", inst));
495
496		attach_state = ATTACH_PROGRESS_NONE;
497
498		status = ddi_soft_state_zalloc(nv_statep, inst);
499
500		if (status != DDI_SUCCESS) {
501			break;
502		}
503
504		nvc = ddi_get_soft_state(nv_statep, inst);
505
506		nvc->nvc_dip = dip;
507
508		attach_state |= ATTACH_PROGRESS_STATEP_ALLOC;
509
510		if (pci_config_setup(dip, &pci_conf_handle) == DDI_SUCCESS) {
511			nvc->nvc_revid = pci_config_get8(pci_conf_handle,
512			    PCI_CONF_REVID);
513			NVLOG((NVDBG_INIT, NULL, NULL,
514			    "inst %d: silicon revid is %x nv_debug_flags=%x",
515			    inst, nvc->nvc_revid, nv_debug_flags));
516		} else {
517			break;
518		}
519
520		attach_state |= ATTACH_PROGRESS_CONF_HANDLE;
521
522		/*
523		 * If a device is attached after a suspend/resume, sometimes
524		 * the command register is zero, as it might not be set by
525		 * BIOS or a parent.  Set it again here.
526		 */
527		command = pci_config_get16(pci_conf_handle, PCI_CONF_COMM);
528
529		if (command == 0) {
530			cmn_err(CE_WARN, "nv_sata%d: restoring PCI command"
531			    " register", inst);
532			pci_config_put16(pci_conf_handle, PCI_CONF_COMM,
533			    PCI_COMM_IO|PCI_COMM_MAE|PCI_COMM_ME);
534		}
535
536		subclass = pci_config_get8(pci_conf_handle, PCI_CONF_SUBCLASS);
537
538		if (subclass & PCI_MASS_RAID) {
539			cmn_err(CE_WARN,
540			    "attach failed: RAID mode not supported");
541			break;
542		}
543
544		/*
545		 * the 6 bars of the controller are:
546		 * 0: port 0 task file
547		 * 1: port 0 status
548		 * 2: port 1 task file
549		 * 3: port 1 status
550		 * 4: bus master for both ports
551		 * 5: extended registers for SATA features
552		 */
553		for (bar = 0; bar < 6; bar++) {
554			status = ddi_regs_map_setup(dip, bar + 1,
555			    (caddr_t *)&nvc->nvc_bar_addr[bar], 0, 0, &accattr,
556			    &nvc->nvc_bar_hdl[bar]);
557
558			if (status != DDI_SUCCESS) {
559				NVLOG((NVDBG_INIT, nvc, NULL,
560				    "ddi_regs_map_setup failure for bar"
561				    " %d status = %d", bar, status));
562				break;
563			}
564		}
565
566		attach_state |= ATTACH_PROGRESS_BARS;
567
568		/*
569		 * initialize controller and driver core
570		 */
571		status = nv_init_ctl(nvc, pci_conf_handle);
572
573		if (status == NV_FAILURE) {
574			NVLOG((NVDBG_INIT, nvc, NULL, "nv_init_ctl failed"));
575
576			break;
577		}
578
579		attach_state |= ATTACH_PROGRESS_CTL_SETUP;
580
581		/*
582		 * initialize mutexes
583		 */
584		mutex_init(&nvc->nvc_mutex, NULL, MUTEX_DRIVER,
585		    DDI_INTR_PRI(nvc->nvc_intr_pri));
586
587		attach_state |= ATTACH_PROGRESS_MUTEX_INIT;
588
589		/*
590		 * get supported interrupt types
591		 */
592		if (ddi_intr_get_supported_types(dip, &intr_types) !=
593		    DDI_SUCCESS) {
594			nv_cmn_err(CE_WARN, nvc, NULL,
595			    "!ddi_intr_get_supported_types failed");
596			NVLOG((NVDBG_INIT, nvc, NULL,
597			    "interrupt supported types failed"));
598
599			break;
600		}
601
602		NVLOG((NVDBG_INIT, nvc, NULL,
603		    "ddi_intr_get_supported_types() returned: 0x%x",
604		    intr_types));
605
606#ifdef NV_MSI_SUPPORTED
607		if (intr_types & DDI_INTR_TYPE_MSI) {
608			NVLOG((NVDBG_INIT, nvc, NULL,
609			    "using MSI interrupt type"));
610
611			/*
612			 * Try MSI first, but fall back to legacy if MSI
613			 * attach fails
614			 */
615			if (nv_add_msi_intrs(nvc) == DDI_SUCCESS) {
616				nvc->nvc_intr_type = DDI_INTR_TYPE_MSI;
617				attach_state |= ATTACH_PROGRESS_INTR_ADDED;
618				NVLOG((NVDBG_INIT, nvc, NULL,
619				    "MSI interrupt setup done"));
620			} else {
621				nv_cmn_err(CE_CONT, nvc, NULL,
622				    "!MSI registration failed "
623				    "will try Legacy interrupts");
624			}
625		}
626#endif
627
628		/*
629		 * Either the MSI interrupt setup has failed or only
630		 * the fixed interrupts are available on the system.
631		 */
632		if (!(attach_state & ATTACH_PROGRESS_INTR_ADDED) &&
633		    (intr_types & DDI_INTR_TYPE_FIXED)) {
634
635			NVLOG((NVDBG_INIT, nvc, NULL,
636			    "using Legacy interrupt type"));
637
638			if (nv_add_legacy_intrs(nvc) == DDI_SUCCESS) {
639				nvc->nvc_intr_type = DDI_INTR_TYPE_FIXED;
640				attach_state |= ATTACH_PROGRESS_INTR_ADDED;
641				NVLOG((NVDBG_INIT, nvc, NULL,
642				    "Legacy interrupt setup done"));
643			} else {
644				nv_cmn_err(CE_WARN, nvc, NULL,
645				    "!legacy interrupt setup failed");
646				NVLOG((NVDBG_INIT, nvc, NULL,
647				    "legacy interrupt setup failed"));
648				break;
649			}
650		}
651
652		if (!(attach_state & ATTACH_PROGRESS_INTR_ADDED)) {
653			NVLOG((NVDBG_INIT, nvc, NULL,
654			    "no interrupts registered"));
655			break;
656		}
657
658#ifdef SGPIO_SUPPORT
659		/*
660		 * save off the controller number
661		 */
662		(void) ddi_getlongprop(DDI_DEV_T_NONE, dip, DDI_PROP_DONTPASS,
663		    "reg", (caddr_t)&regs, &rlen);
664		nvc->nvc_ctlr_num = PCI_REG_FUNC_G(regs->pci_phys_hi);
665		kmem_free(regs, rlen);
666
667		/*
668		 * initialize SGPIO
669		 */
670		nv_sgp_led_init(nvc, pci_conf_handle);
671#endif	/* SGPIO_SUPPORT */
672
673		/*
674		 * attach to sata module
675		 */
676		if (sata_hba_attach(nvc->nvc_dip,
677		    &nvc->nvc_sata_hba_tran,
678		    DDI_ATTACH) != DDI_SUCCESS) {
679			attach_state |= ATTACH_PROGRESS_SATA_MODULE;
680
681			break;
682		}
683
684		pci_config_teardown(&pci_conf_handle);
685
686		NVLOG((NVDBG_INIT, nvc, NULL, "nv_attach DDI_SUCCESS"));
687
688		return (DDI_SUCCESS);
689
690	case DDI_RESUME:
691
692		nvc = ddi_get_soft_state(nv_statep, inst);
693
694		NVLOG((NVDBG_INIT, nvc, NULL,
695		    "nv_attach(): DDI_RESUME inst %d", inst));
696
697		if (pci_config_setup(dip, &pci_conf_handle) != DDI_SUCCESS) {
698			return (DDI_FAILURE);
699		}
700
701		/*
702		 * If a device is attached after a suspend/resume, sometimes
703		 * the command register is zero, as it might not be set by
704		 * BIOS or a parent.  Set it again here.
705		 */
706		command = pci_config_get16(pci_conf_handle, PCI_CONF_COMM);
707
708		if (command == 0) {
709			pci_config_put16(pci_conf_handle, PCI_CONF_COMM,
710			    PCI_COMM_IO|PCI_COMM_MAE|PCI_COMM_ME);
711		}
712
713		/*
714		 * Need to set bit 2 to 1 at config offset 0x50
715		 * to enable access to the bar5 registers.
716		 */
717		reg32 = pci_config_get32(pci_conf_handle, NV_SATA_CFG_20);
718
719		if ((reg32 & NV_BAR5_SPACE_EN) != NV_BAR5_SPACE_EN) {
720			pci_config_put32(pci_conf_handle, NV_SATA_CFG_20,
721			    reg32 | NV_BAR5_SPACE_EN);
722		}
723
724		nvc->nvc_state &= ~NV_CTRL_SUSPEND;
725
726		for (i = 0; i < NV_MAX_PORTS(nvc); i++) {
727			nv_resume(&(nvc->nvc_port[i]));
728		}
729
730		pci_config_teardown(&pci_conf_handle);
731
732		return (DDI_SUCCESS);
733
734	default:
735		return (DDI_FAILURE);
736	}
737
738
739	/*
740	 * DDI_ATTACH failure path starts here
741	 */
742
743	if (attach_state & ATTACH_PROGRESS_INTR_ADDED) {
744		nv_rem_intrs(nvc);
745	}
746
747	if (attach_state & ATTACH_PROGRESS_SATA_MODULE) {
748		/*
749		 * Remove timers
750		 */
751		int port = 0;
752		nv_port_t *nvp;
753
754		for (; port < NV_MAX_PORTS(nvc); port++) {
755			nvp = &(nvc->nvc_port[port]);
756			if (nvp->nvp_timeout_id != 0) {
757				(void) untimeout(nvp->nvp_timeout_id);
758			}
759		}
760	}
761
762	if (attach_state & ATTACH_PROGRESS_MUTEX_INIT) {
763		mutex_destroy(&nvc->nvc_mutex);
764	}
765
766	if (attach_state & ATTACH_PROGRESS_CTL_SETUP) {
767		nv_uninit_ctl(nvc);
768	}
769
770	if (attach_state & ATTACH_PROGRESS_BARS) {
771		while (--bar >= 0) {
772			ddi_regs_map_free(&nvc->nvc_bar_hdl[bar]);
773		}
774	}
775
776	if (attach_state & ATTACH_PROGRESS_STATEP_ALLOC) {
777		ddi_soft_state_free(nv_statep, inst);
778	}
779
780	if (attach_state & ATTACH_PROGRESS_CONF_HANDLE) {
781		pci_config_teardown(&pci_conf_handle);
782	}
783
784	cmn_err(CE_WARN, "nv_sata%d attach failed", inst);
785
786	return (DDI_FAILURE);
787}
788
789
790static int
791nv_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
792{
793	int i, port, inst = ddi_get_instance(dip);
794	nv_ctl_t *nvc;
795	nv_port_t *nvp;
796
797	nvc = ddi_get_soft_state(nv_statep, inst);
798
799	switch (cmd) {
800
801	case DDI_DETACH:
802
803		NVLOG((NVDBG_INIT, nvc, NULL, "nv_detach: DDI_DETACH"));
804
805		/*
806		 * Remove interrupts
807		 */
808		nv_rem_intrs(nvc);
809
810		/*
811		 * Remove timers
812		 */
813		for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
814			nvp = &(nvc->nvc_port[port]);
815			if (nvp->nvp_timeout_id != 0) {
816				(void) untimeout(nvp->nvp_timeout_id);
817			}
818		}
819
820		/*
821		 * Remove maps
822		 */
823		for (i = 0; i < 6; i++) {
824			ddi_regs_map_free(&nvc->nvc_bar_hdl[i]);
825		}
826
827		/*
828		 * Destroy mutexes
829		 */
830		mutex_destroy(&nvc->nvc_mutex);
831
832		/*
833		 * Uninitialize the controller
834		 */
835		nv_uninit_ctl(nvc);
836
837#ifdef SGPIO_SUPPORT
838		/*
839		 * release SGPIO resources
840		 */
841		nv_sgp_cleanup(nvc);
842#endif
843
844		/*
845		 * unregister from the sata module
846		 */
847		(void) sata_hba_detach(nvc->nvc_dip, DDI_DETACH);
848
849		/*
850		 * Free soft state
851		 */
852		ddi_soft_state_free(nv_statep, inst);
853
854		return (DDI_SUCCESS);
855
856	case DDI_SUSPEND:
857		/*
858		 * The PM functions for suspend and resume are incomplete
859		 * and need additional work.  It may or may not work in
860		 * the current state.
861		 */
862		NVLOG((NVDBG_INIT, nvc, NULL, "nv_detach: DDI_SUSPEND"));
863
864		for (i = 0; i < NV_MAX_PORTS(nvc); i++) {
865			nv_suspend(&(nvc->nvc_port[i]));
866		}
867
868		nvc->nvc_state |= NV_CTRL_SUSPEND;
869
870		return (DDI_SUCCESS);
871
872	default:
873		return (DDI_FAILURE);
874	}
875}
876
877
878/*ARGSUSED*/
879static int
880nv_getinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
881{
882	nv_ctl_t *nvc;
883	int instance;
884	dev_t dev;
885
886	dev = (dev_t)arg;
887	instance = getminor(dev);
888
889	switch (infocmd) {
890	case DDI_INFO_DEVT2DEVINFO:
891		nvc = ddi_get_soft_state(nv_statep,  instance);
892		if (nvc != NULL) {
893			*result = nvc->nvc_dip;
894			return (DDI_SUCCESS);
895		} else {
896			*result = NULL;
897			return (DDI_FAILURE);
898		}
899	case DDI_INFO_DEVT2INSTANCE:
900		*(int *)result = instance;
901		break;
902	default:
903		break;
904	}
905	return (DDI_SUCCESS);
906}
907
908
909#ifdef SGPIO_SUPPORT
910/* ARGSUSED */
911static int
912nv_open(dev_t *devp, int flag, int otyp, cred_t *credp)
913{
914	nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, getminor(*devp));
915
916	if (nvc == NULL) {
917		return (ENXIO);
918	}
919
920	return (0);
921}
922
923
924/* ARGSUSED */
925static int
926nv_close(dev_t dev, int flag, int otyp, cred_t *credp)
927{
928	return (0);
929}
930
931
932/* ARGSUSED */
933static int
934nv_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp, int *rvalp)
935{
936	nv_ctl_t *nvc;
937	int inst;
938	int status;
939	int ctlr, port;
940	int drive;
941	uint8_t curr_led;
942	struct dc_led_ctl led;
943
944	inst = getminor(dev);
945	if (inst == -1) {
946		return (EBADF);
947	}
948
949	nvc = ddi_get_soft_state(nv_statep, inst);
950	if (nvc == NULL) {
951		return (EBADF);
952	}
953
954	if ((nvc->nvc_sgp_cbp == NULL) || (nvc->nvc_sgp_cmn == NULL)) {
955		return (EBADF);
956	}
957
958	switch (cmd) {
959	case DEVCTL_SET_LED:
960		status = ddi_copyin((void *)arg, &led,
961		    sizeof (struct dc_led_ctl), mode);
962		if (status != 0)
963			return (EFAULT);
964
965		/*
966		 * Since only the first two controller currently support
967		 * SGPIO (as per NVIDIA docs), this code will as well.
968		 * Note that this validate the port value within led_state
969		 * as well.
970		 */
971
972		ctlr = SGP_DRV_TO_CTLR(led.led_number);
973		if ((ctlr != 0) && (ctlr != 1))
974			return (ENXIO);
975
976		if ((led.led_state & DCL_STATE_FAST_BLNK) ||
977		    (led.led_state & DCL_STATE_SLOW_BLNK)) {
978			return (EINVAL);
979		}
980
981		drive = led.led_number;
982
983		if ((led.led_ctl_active == DCL_CNTRL_OFF) ||
984		    (led.led_state == DCL_STATE_OFF)) {
985
986			if (led.led_type == DCL_TYPE_DEVICE_FAIL) {
987				nv_sgp_error(nvc, drive, TR_ERROR_DISABLE);
988			} else if (led.led_type == DCL_TYPE_DEVICE_OK2RM) {
989				nv_sgp_locate(nvc, drive, TR_LOCATE_DISABLE);
990			} else {
991				return (ENXIO);
992			}
993
994			port = SGP_DRV_TO_PORT(led.led_number);
995			nvc->nvc_port[port].nvp_sgp_ioctl_mod |= led.led_type;
996		}
997
998		if (led.led_ctl_active == DCL_CNTRL_ON) {
999			if (led.led_type == DCL_TYPE_DEVICE_FAIL) {
1000				nv_sgp_error(nvc, drive, TR_ERROR_ENABLE);
1001			} else if (led.led_type == DCL_TYPE_DEVICE_OK2RM) {
1002				nv_sgp_locate(nvc, drive, TR_LOCATE_ENABLE);
1003			} else {
1004				return (ENXIO);
1005			}
1006
1007			port = SGP_DRV_TO_PORT(led.led_number);
1008			nvc->nvc_port[port].nvp_sgp_ioctl_mod |= led.led_type;
1009		}
1010
1011		break;
1012
1013	case DEVCTL_GET_LED:
1014		status = ddi_copyin((void *)arg, &led,
1015		    sizeof (struct dc_led_ctl), mode);
1016		if (status != 0)
1017			return (EFAULT);
1018
1019		/*
1020		 * Since only the first two controller currently support
1021		 * SGPIO (as per NVIDIA docs), this code will as well.
1022		 * Note that this validate the port value within led_state
1023		 * as well.
1024		 */
1025
1026		ctlr = SGP_DRV_TO_CTLR(led.led_number);
1027		if ((ctlr != 0) && (ctlr != 1))
1028			return (ENXIO);
1029
1030		curr_led = SGPIO0_TR_DRV(nvc->nvc_sgp_cbp->sgpio0_tr,
1031		    led.led_number);
1032
1033		port = SGP_DRV_TO_PORT(led.led_number);
1034		if (nvc->nvc_port[port].nvp_sgp_ioctl_mod & led.led_type) {
1035			led.led_ctl_active = DCL_CNTRL_ON;
1036
1037			if (led.led_type == DCL_TYPE_DEVICE_FAIL) {
1038				if (TR_ERROR(curr_led) == TR_ERROR_DISABLE)
1039					led.led_state = DCL_STATE_OFF;
1040				else
1041					led.led_state = DCL_STATE_ON;
1042			} else if (led.led_type == DCL_TYPE_DEVICE_OK2RM) {
1043				if (TR_LOCATE(curr_led) == TR_LOCATE_DISABLE)
1044					led.led_state = DCL_STATE_OFF;
1045				else
1046					led.led_state = DCL_STATE_ON;
1047			} else {
1048				return (ENXIO);
1049			}
1050		} else {
1051			led.led_ctl_active = DCL_CNTRL_OFF;
1052			/*
1053			 * Not really off, but never set and no constant for
1054			 * tri-state
1055			 */
1056			led.led_state = DCL_STATE_OFF;
1057		}
1058
1059		status = ddi_copyout(&led, (void *)arg,
1060		    sizeof (struct dc_led_ctl), mode);
1061		if (status != 0)
1062			return (EFAULT);
1063
1064		break;
1065
1066	case DEVCTL_NUM_LEDS:
1067		led.led_number = SGPIO_DRV_CNT_VALUE;
1068		led.led_ctl_active = 1;
1069		led.led_type = 3;
1070
1071		/*
1072		 * According to documentation, NVIDIA SGPIO is supposed to
1073		 * support blinking, but it does not seem to work in practice.
1074		 */
1075		led.led_state = DCL_STATE_ON;
1076
1077		status = ddi_copyout(&led, (void *)arg,
1078		    sizeof (struct dc_led_ctl), mode);
1079		if (status != 0)
1080			return (EFAULT);
1081
1082		break;
1083
1084	default:
1085		return (EINVAL);
1086	}
1087
1088	return (0);
1089}
1090#endif	/* SGPIO_SUPPORT */
1091
1092
1093/*
1094 * Called by sata module to probe a port.  Port and device state
1095 * are not changed here... only reported back to the sata module.
1096 *
1097 * If probe confirms a device is present for the first time, it will
1098 * initiate a device reset, then probe will be called again and the
1099 * signature will be check.  If the signature is valid, data structures
1100 * will be initialized.
1101 */
1102static int
1103nv_sata_probe(dev_info_t *dip, sata_device_t *sd)
1104{
1105	nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip));
1106	uint8_t cport = sd->satadev_addr.cport;
1107	uint8_t pmport = sd->satadev_addr.pmport;
1108	uint8_t qual = sd->satadev_addr.qual;
1109	clock_t nv_lbolt = ddi_get_lbolt();
1110	nv_port_t *nvp;
1111
1112	if (cport >= NV_MAX_PORTS(nvc)) {
1113		sd->satadev_type = SATA_DTYPE_NONE;
1114		sd->satadev_state = SATA_STATE_UNKNOWN;
1115
1116		return (SATA_FAILURE);
1117	}
1118
1119	ASSERT(nvc->nvc_port != NULL);
1120	nvp = &(nvc->nvc_port[cport]);
1121	ASSERT(nvp != NULL);
1122
1123	NVLOG((NVDBG_PROBE, nvc, nvp,
1124	    "nv_sata_probe: enter cport: 0x%x, pmport: 0x%x, "
1125	    "qual: 0x%x", cport, pmport, qual));
1126
1127	mutex_enter(&nvp->nvp_mutex);
1128
1129	/*
1130	 * This check seems to be done in the SATA module.
1131	 * It may not be required here
1132	 */
1133	if (nvp->nvp_state & NV_PORT_INACTIVE) {
1134		nv_cmn_err(CE_WARN, nvc, nvp,
1135		    "port inactive.  Use cfgadm to activate");
1136		sd->satadev_type = SATA_DTYPE_UNKNOWN;
1137		sd->satadev_state = SATA_PSTATE_SHUTDOWN;
1138		mutex_exit(&nvp->nvp_mutex);
1139
1140		return (SATA_FAILURE);
1141	}
1142
1143	if (qual == SATA_ADDR_PMPORT) {
1144		sd->satadev_type = SATA_DTYPE_NONE;
1145		sd->satadev_state = SATA_STATE_UNKNOWN;
1146		mutex_exit(&nvp->nvp_mutex);
1147		nv_cmn_err(CE_WARN, nvc, nvp,
1148		    "controller does not support port multiplier");
1149
1150		return (SATA_FAILURE);
1151	}
1152
1153	sd->satadev_state = SATA_PSTATE_PWRON;
1154
1155	nv_copy_registers(nvp, sd, NULL);
1156
1157	/*
1158	 * determine link status
1159	 */
1160	if (nv_check_link(sd->satadev_scr.sstatus) == B_FALSE) {
1161		uint8_t det;
1162
1163		/*
1164		 * Reset will cause the link to go down for a short period of
1165		 * time.  If link is lost for less than 2 seconds ignore it
1166		 * so that the reset can progress.
1167		 */
1168		if (nvp->nvp_state & NV_PORT_RESET_PROBE) {
1169
1170			if (nvp->nvp_link_lost_time == 0) {
1171				nvp->nvp_link_lost_time = nv_lbolt;
1172			}
1173
1174			if (TICK_TO_SEC(nv_lbolt -
1175			    nvp->nvp_link_lost_time) < NV_LINK_LOST_OK) {
1176				NVLOG((NVDBG_ALWAYS, nvp->nvp_ctlp, nvp,
1177				    "probe: intermittent link lost while"
1178				    " resetting"));
1179				/*
1180				 * fake status of link so that probe continues
1181				 */
1182				SSTATUS_SET_IPM(sd->satadev_scr.sstatus,
1183				    SSTATUS_IPM_ACTIVE);
1184				SSTATUS_SET_DET(sd->satadev_scr.sstatus,
1185				    SSTATUS_DET_DEVPRE_PHYCOM);
1186				sd->satadev_type = SATA_DTYPE_UNKNOWN;
1187				mutex_exit(&nvp->nvp_mutex);
1188
1189				return (SATA_SUCCESS);
1190			} else {
1191				nvp->nvp_state &=
1192				    ~(NV_PORT_RESET_PROBE|NV_PORT_RESET);
1193			}
1194		}
1195
1196		/*
1197		 * no link, so tear down port and abort all active packets
1198		 */
1199
1200		det = (sd->satadev_scr.sstatus & SSTATUS_DET) >>
1201		    SSTATUS_DET_SHIFT;
1202
1203		switch (det) {
1204		case SSTATUS_DET_NODEV:
1205		case SSTATUS_DET_PHYOFFLINE:
1206			sd->satadev_type = SATA_DTYPE_NONE;
1207			break;
1208		default:
1209			sd->satadev_type = SATA_DTYPE_UNKNOWN;
1210			break;
1211		}
1212
1213		NVLOG((NVDBG_PROBE, nvp->nvp_ctlp, nvp,
1214		    "probe: link lost invoking nv_abort_active"));
1215
1216		(void) nv_abort_active(nvp, NULL, SATA_PKT_TIMEOUT);
1217		nv_uninit_port(nvp);
1218
1219		mutex_exit(&nvp->nvp_mutex);
1220
1221		return (SATA_SUCCESS);
1222	} else {
1223		nvp->nvp_link_lost_time = 0;
1224	}
1225
1226	/*
1227	 * A device is present so clear hotremoved flag
1228	 */
1229	nvp->nvp_state &= ~NV_PORT_HOTREMOVED;
1230
1231#ifdef SGPIO_SUPPORT
1232	nv_sgp_drive_connect(nvp->nvp_ctlp, SGP_CTLR_PORT_TO_DRV(
1233	    nvp->nvp_ctlp->nvc_ctlr_num, nvp->nvp_port_num));
1234#endif
1235
1236	/*
1237	 * If the signature was acquired previously there is no need to
1238	 * do it again.
1239	 */
1240	if (nvp->nvp_signature != 0) {
1241		NVLOG((NVDBG_PROBE, nvp->nvp_ctlp, nvp,
1242		    "probe: signature acquired previously"));
1243		sd->satadev_type = nvp->nvp_type;
1244		mutex_exit(&nvp->nvp_mutex);
1245
1246		return (SATA_SUCCESS);
1247	}
1248
1249	/*
1250	 * If NV_PORT_RESET is not set, this is the first time through
1251	 * so perform reset and return.
1252	 */
1253	if ((nvp->nvp_state & NV_PORT_RESET) == 0) {
1254		NVLOG((NVDBG_PROBE, nvp->nvp_ctlp, nvp,
1255		    "probe: first reset to get sig"));
1256		nvp->nvp_state |= NV_PORT_RESET_PROBE;
1257		nv_reset(nvp);
1258		sd->satadev_type = nvp->nvp_type = SATA_DTYPE_UNKNOWN;
1259		nvp->nvp_probe_time = nv_lbolt;
1260		mutex_exit(&nvp->nvp_mutex);
1261
1262		return (SATA_SUCCESS);
1263	}
1264
1265	/*
1266	 * Reset was done previously.  see if the signature is
1267	 * available.
1268	 */
1269	nv_read_signature(nvp);
1270	sd->satadev_type = nvp->nvp_type;
1271
1272	/*
1273	 * Some drives may require additional resets to get a
1274	 * valid signature.  If a drive was not just powered up, the signature
1275	 * should arrive within half a second of reset.  Therefore if more
1276	 * than 5 seconds has elapsed while waiting for a signature, reset
1277	 * again.  These extra resets do not appear to create problems when
1278	 * the drive is spinning up for more than this reset period.
1279	 */
1280	if (nvp->nvp_signature == 0) {
1281		if (TICK_TO_SEC(nv_lbolt - nvp->nvp_reset_time) > 5) {
1282			NVLOG((NVDBG_PROBE, nvc, nvp, "additional reset"
1283			    " during signature acquisition"));
1284			nv_reset(nvp);
1285		}
1286
1287		mutex_exit(&nvp->nvp_mutex);
1288
1289		return (SATA_SUCCESS);
1290	}
1291
1292	NVLOG((NVDBG_PROBE, nvc, nvp, "signature acquired after %d ms",
1293	    TICK_TO_MSEC(nv_lbolt - nvp->nvp_probe_time)));
1294
1295	/*
1296	 * nv_sata only deals with ATA disks and ATAPI CD/DVDs so far.  If
1297	 * it is not either of those, then just return.
1298	 */
1299	if ((nvp->nvp_type != SATA_DTYPE_ATADISK) &&
1300	    (nvp->nvp_type != SATA_DTYPE_ATAPICD)) {
1301		NVLOG((NVDBG_PROBE, nvc, nvp, "Driver currently handles only"
1302		    " disks/CDs/DVDs.  Signature acquired was %X",
1303		    nvp->nvp_signature));
1304		mutex_exit(&nvp->nvp_mutex);
1305
1306		return (SATA_SUCCESS);
1307	}
1308
1309	/*
1310	 * make sure structures are initialized
1311	 */
1312	if (nv_init_port(nvp) == NV_SUCCESS) {
1313		NVLOG((NVDBG_PROBE, nvc, nvp,
1314		    "device detected and set up at port %d", cport));
1315		mutex_exit(&nvp->nvp_mutex);
1316
1317		return (SATA_SUCCESS);
1318	} else {
1319		nv_cmn_err(CE_WARN, nvc, nvp, "failed to set up data "
1320		    "structures for port %d", cport);
1321		mutex_exit(&nvp->nvp_mutex);
1322
1323		return (SATA_FAILURE);
1324	}
1325	/*NOTREACHED*/
1326}
1327
1328
1329/*
1330 * Called by sata module to start a new command.
1331 */
1332static int
1333nv_sata_start(dev_info_t *dip, sata_pkt_t *spkt)
1334{
1335	int cport = spkt->satapkt_device.satadev_addr.cport;
1336	nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip));
1337	nv_port_t *nvp = &(nvc->nvc_port[cport]);
1338	int ret;
1339
1340	NVLOG((NVDBG_ENTRY, nvc, nvp, "nv_sata_start: opmode: 0x%x cmd=%x",
1341	    spkt->satapkt_op_mode, spkt->satapkt_cmd.satacmd_cmd_reg));
1342
1343	mutex_enter(&nvp->nvp_mutex);
1344
1345	/*
1346	 * hotremoved is an intermediate state where the link was lost,
1347	 * but the hotplug event has not yet been processed by the sata
1348	 * module.  Fail the request.
1349	 */
1350	if (nvp->nvp_state & NV_PORT_HOTREMOVED) {
1351		spkt->satapkt_reason = SATA_PKT_PORT_ERROR;
1352		spkt->satapkt_device.satadev_state = SATA_STATE_UNKNOWN;
1353		NVLOG((NVDBG_ERRS, nvc, nvp,
1354		    "nv_sata_start: NV_PORT_HOTREMOVED"));
1355		nv_copy_registers(nvp, &spkt->satapkt_device, NULL);
1356		mutex_exit(&nvp->nvp_mutex);
1357
1358		return (SATA_TRAN_PORT_ERROR);
1359	}
1360
1361	if (nvp->nvp_state & NV_PORT_RESET) {
1362		NVLOG((NVDBG_ERRS, nvc, nvp,
1363		    "still waiting for reset completion"));
1364		spkt->satapkt_reason = SATA_PKT_BUSY;
1365		mutex_exit(&nvp->nvp_mutex);
1366
1367		/*
1368		 * If in panic, timeouts do not occur, so fake one
1369		 * so that the signature can be acquired to complete
1370		 * the reset handling.
1371		 */
1372		if (ddi_in_panic()) {
1373			nv_timeout(nvp);
1374		}
1375
1376		return (SATA_TRAN_BUSY);
1377	}
1378
1379	if (nvp->nvp_type == SATA_DTYPE_NONE) {
1380		spkt->satapkt_reason = SATA_PKT_PORT_ERROR;
1381		NVLOG((NVDBG_ERRS, nvc, nvp,
1382		    "nv_sata_start: SATA_DTYPE_NONE"));
1383		nv_copy_registers(nvp, &spkt->satapkt_device, NULL);
1384		mutex_exit(&nvp->nvp_mutex);
1385
1386		return (SATA_TRAN_PORT_ERROR);
1387	}
1388
1389	if (spkt->satapkt_device.satadev_type == SATA_DTYPE_PMULT) {
1390		ASSERT(nvp->nvp_type == SATA_DTYPE_PMULT);
1391		nv_cmn_err(CE_WARN, nvc, nvp,
1392		    "port multipliers not supported by controller");
1393		spkt->satapkt_reason = SATA_PKT_CMD_UNSUPPORTED;
1394		mutex_exit(&nvp->nvp_mutex);
1395
1396		return (SATA_TRAN_CMD_UNSUPPORTED);
1397	}
1398
1399	if ((nvp->nvp_state & NV_PORT_INIT) == 0) {
1400		spkt->satapkt_reason = SATA_PKT_PORT_ERROR;
1401		NVLOG((NVDBG_ERRS, nvc, nvp,
1402		    "nv_sata_start: port not yet initialized"));
1403		nv_copy_registers(nvp, &spkt->satapkt_device, NULL);
1404		mutex_exit(&nvp->nvp_mutex);
1405
1406		return (SATA_TRAN_PORT_ERROR);
1407	}
1408
1409	if (nvp->nvp_state & NV_PORT_INACTIVE) {
1410		spkt->satapkt_reason = SATA_PKT_PORT_ERROR;
1411		NVLOG((NVDBG_ERRS, nvc, nvp,
1412		    "nv_sata_start: NV_PORT_INACTIVE"));
1413		nv_copy_registers(nvp, &spkt->satapkt_device, NULL);
1414		mutex_exit(&nvp->nvp_mutex);
1415
1416		return (SATA_TRAN_PORT_ERROR);
1417	}
1418
1419	if (nvp->nvp_state & NV_PORT_FAILED) {
1420		spkt->satapkt_reason = SATA_PKT_PORT_ERROR;
1421		NVLOG((NVDBG_ERRS, nvc, nvp,
1422		    "nv_sata_start: NV_PORT_FAILED state"));
1423		nv_copy_registers(nvp, &spkt->satapkt_device, NULL);
1424		mutex_exit(&nvp->nvp_mutex);
1425
1426		return (SATA_TRAN_PORT_ERROR);
1427	}
1428
1429	/*
1430	 * after a device reset, and then when sata module restore processing
1431	 * is complete, the sata module will set sata_clear_dev_reset which
1432	 * indicates that restore processing has completed and normal
1433	 * non-restore related commands should be processed.
1434	 */
1435	if (spkt->satapkt_cmd.satacmd_flags.sata_clear_dev_reset) {
1436		nvp->nvp_state &= ~NV_PORT_RESTORE;
1437		NVLOG((NVDBG_ENTRY, nvc, nvp,
1438		    "nv_sata_start: clearing NV_PORT_RESTORE"));
1439	}
1440
1441	/*
1442	 * if the device was recently reset as indicated by NV_PORT_RESTORE,
1443	 * only allow commands which restore device state.  The sata module
1444	 * marks such commands with with sata_ignore_dev_reset.
1445	 *
1446	 * during coredump, nv_reset is called and but then the restore
1447	 * doesn't happen.  For now, workaround by ignoring the wait for
1448	 * restore if the system is panicing.
1449	 */
1450	if ((nvp->nvp_state & NV_PORT_RESTORE) &&
1451	    !(spkt->satapkt_cmd.satacmd_flags.sata_ignore_dev_reset) &&
1452	    (ddi_in_panic() == 0)) {
1453		spkt->satapkt_reason = SATA_PKT_BUSY;
1454		NVLOG((NVDBG_ENTRY, nvc, nvp,
1455		    "nv_sata_start: waiting for restore "));
1456		mutex_exit(&nvp->nvp_mutex);
1457
1458		return (SATA_TRAN_BUSY);
1459	}
1460
1461	if (nvp->nvp_state & NV_PORT_ABORTING) {
1462		spkt->satapkt_reason = SATA_PKT_BUSY;
1463		NVLOG((NVDBG_ERRS, nvc, nvp,
1464		    "nv_sata_start: NV_PORT_ABORTING"));
1465		mutex_exit(&nvp->nvp_mutex);
1466
1467		return (SATA_TRAN_BUSY);
1468	}
1469
1470	if (spkt->satapkt_op_mode &
1471	    (SATA_OPMODE_POLLING|SATA_OPMODE_SYNCH)) {
1472
1473		ret = nv_start_sync(nvp, spkt);
1474
1475		mutex_exit(&nvp->nvp_mutex);
1476
1477		return (ret);
1478	}
1479
1480	/*
1481	 * start command asynchronous command
1482	 */
1483	ret = nv_start_async(nvp, spkt);
1484
1485	mutex_exit(&nvp->nvp_mutex);
1486
1487	return (ret);
1488}
1489
1490
1491/*
1492 * SATA_OPMODE_POLLING implies the driver is in a
1493 * synchronous mode, and SATA_OPMODE_SYNCH is also set.
1494 * If only SATA_OPMODE_SYNCH is set, the driver can use
1495 * interrupts and sleep wait on a cv.
1496 *
1497 * If SATA_OPMODE_POLLING is set, the driver can't use
1498 * interrupts and must busy wait and simulate the
1499 * interrupts by waiting for BSY to be cleared.
1500 *
1501 * Synchronous mode has to return BUSY if there are
1502 * any other commands already on the drive.
1503 */
1504static int
1505nv_start_sync(nv_port_t *nvp, sata_pkt_t *spkt)
1506{
1507	nv_ctl_t *nvc = nvp->nvp_ctlp;
1508	int ret;
1509
1510	NVLOG((NVDBG_SYNC, nvp->nvp_ctlp, nvp, "nv_sata_satapkt_sync: entry"));
1511
1512	if (nvp->nvp_ncq_run != 0 || nvp->nvp_non_ncq_run != 0) {
1513		spkt->satapkt_reason = SATA_PKT_BUSY;
1514		NVLOG((NVDBG_SYNC, nvp->nvp_ctlp, nvp,
1515		    "nv_sata_satapkt_sync: device is busy, sync cmd rejected"
1516		    "ncq_run: %d non_ncq_run: %d  spkt: %p",
1517		    nvp->nvp_ncq_run, nvp->nvp_non_ncq_run,
1518		    (&(nvp->nvp_slot[0]))->nvslot_spkt));
1519
1520		return (SATA_TRAN_BUSY);
1521	}
1522
1523	/*
1524	 * if SYNC but not POLL, verify that this is not on interrupt thread.
1525	 */
1526	if (!(spkt->satapkt_op_mode & SATA_OPMODE_POLLING) &&
1527	    servicing_interrupt()) {
1528		spkt->satapkt_reason = SATA_PKT_BUSY;
1529		NVLOG((NVDBG_SYNC, nvp->nvp_ctlp, nvp,
1530		    "SYNC mode not allowed during interrupt"));
1531
1532		return (SATA_TRAN_BUSY);
1533
1534	}
1535
1536	/*
1537	 * disable interrupt generation if in polled mode
1538	 */
1539	if (spkt->satapkt_op_mode & SATA_OPMODE_POLLING) {
1540		(*(nvc->nvc_set_intr))(nvp, NV_INTR_DISABLE);
1541	}
1542
1543	if ((ret = nv_start_common(nvp, spkt)) != SATA_TRAN_ACCEPTED) {
1544		if (spkt->satapkt_op_mode & SATA_OPMODE_POLLING) {
1545			(*(nvc->nvc_set_intr))(nvp, NV_INTR_ENABLE);
1546		}
1547
1548		return (ret);
1549	}
1550
1551	if (spkt->satapkt_op_mode & SATA_OPMODE_POLLING) {
1552		mutex_exit(&nvp->nvp_mutex);
1553		ret = nv_poll_wait(nvp, spkt);
1554		mutex_enter(&nvp->nvp_mutex);
1555
1556		(*(nvc->nvc_set_intr))(nvp, NV_INTR_ENABLE);
1557
1558		NVLOG((NVDBG_SYNC, nvp->nvp_ctlp, nvp, "nv_sata_satapkt_sync:"
1559		    " done % reason %d", ret));
1560
1561		return (ret);
1562	}
1563
1564	/*
1565	 * non-polling synchronous mode handling.  The interrupt will signal
1566	 * when the IO is completed.
1567	 */
1568	cv_wait(&nvp->nvp_poll_cv, &nvp->nvp_mutex);
1569
1570	if (spkt->satapkt_reason != SATA_PKT_COMPLETED) {
1571
1572		spkt->satapkt_reason = SATA_PKT_TIMEOUT;
1573	}
1574
1575	NVLOG((NVDBG_SYNC, nvp->nvp_ctlp, nvp, "nv_sata_satapkt_sync:"
1576	    " done % reason %d", spkt->satapkt_reason));
1577
1578	return (SATA_TRAN_ACCEPTED);
1579}
1580
1581
1582static int
1583nv_poll_wait(nv_port_t *nvp, sata_pkt_t *spkt)
1584{
1585	int ret;
1586	nv_ctl_t *nvc = nvp->nvp_ctlp;
1587#if ! defined(__lock_lint)
1588	nv_slot_t *nv_slotp = &(nvp->nvp_slot[0]); /* not NCQ aware */
1589#endif
1590
1591	NVLOG((NVDBG_SYNC, nvc, nvp, "nv_poll_wait: enter"));
1592
1593	for (;;) {
1594
1595		NV_DELAY_NSEC(400);
1596
1597		NVLOG((NVDBG_SYNC, nvc, nvp, "nv_poll_wait: before nv_wait"));
1598		if (nv_wait(nvp, 0, SATA_STATUS_BSY,
1599		    NV_SEC2USEC(spkt->satapkt_time), NV_NOSLEEP) == B_FALSE) {
1600			mutex_enter(&nvp->nvp_mutex);
1601			spkt->satapkt_reason = SATA_PKT_TIMEOUT;
1602			nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
1603			nv_reset(nvp);
1604			nv_complete_io(nvp, spkt, 0);
1605			mutex_exit(&nvp->nvp_mutex);
1606			NVLOG((NVDBG_SYNC, nvc, nvp, "nv_poll_wait: "
1607			    "SATA_STATUS_BSY"));
1608
1609			return (SATA_TRAN_ACCEPTED);
1610		}
1611
1612		NVLOG((NVDBG_SYNC, nvc, nvp, "nv_poll_wait: before nvc_intr"));
1613
1614		/*
1615		 * Simulate interrupt.
1616		 */
1617		ret = (*(nvc->nvc_interrupt))((caddr_t)nvc, NULL);
1618		NVLOG((NVDBG_SYNC, nvc, nvp, "nv_poll_wait: after nvc_intr"));
1619
1620		if (ret != DDI_INTR_CLAIMED) {
1621			NVLOG((NVDBG_SYNC, nvc, nvp, "nv_poll_wait:"
1622			    " unclaimed -- resetting"));
1623			mutex_enter(&nvp->nvp_mutex);
1624			nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
1625			nv_reset(nvp);
1626			spkt->satapkt_reason = SATA_PKT_TIMEOUT;
1627			nv_complete_io(nvp, spkt, 0);
1628			mutex_exit(&nvp->nvp_mutex);
1629
1630			return (SATA_TRAN_ACCEPTED);
1631		}
1632
1633#if ! defined(__lock_lint)
1634		if (nv_slotp->nvslot_flags == NVSLOT_COMPLETE) {
1635			/*
1636			 * packet is complete
1637			 */
1638			return (SATA_TRAN_ACCEPTED);
1639		}
1640#endif
1641	}
1642	/*NOTREACHED*/
1643}
1644
1645
1646/*
1647 * Called by sata module to abort outstanding packets.
1648 */
1649/*ARGSUSED*/
1650static int
1651nv_sata_abort(dev_info_t *dip, sata_pkt_t *spkt, int flag)
1652{
1653	int cport = spkt->satapkt_device.satadev_addr.cport;
1654	nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip));
1655	nv_port_t *nvp = &(nvc->nvc_port[cport]);
1656	int c_a, ret;
1657
1658	ASSERT(cport < NV_MAX_PORTS(nvc));
1659	NVLOG((NVDBG_ENTRY, nvc, nvp, "nv_sata_abort %d %p", flag, spkt));
1660
1661	mutex_enter(&nvp->nvp_mutex);
1662
1663	if (nvp->nvp_state & NV_PORT_INACTIVE) {
1664		mutex_exit(&nvp->nvp_mutex);
1665		nv_cmn_err(CE_WARN, nvc, nvp,
1666		    "abort request failed: port inactive");
1667
1668		return (SATA_FAILURE);
1669	}
1670
1671	/*
1672	 * spkt == NULL then abort all commands
1673	 */
1674	c_a = nv_abort_active(nvp, spkt, SATA_PKT_ABORTED);
1675
1676	if (c_a) {
1677		NVLOG((NVDBG_ENTRY, nvc, nvp,
1678		    "packets aborted running=%d", c_a));
1679		ret = SATA_SUCCESS;
1680	} else {
1681		if (spkt == NULL) {
1682			NVLOG((NVDBG_ENTRY, nvc, nvp, "no spkts to abort"));
1683		} else {
1684			NVLOG((NVDBG_ENTRY, nvc, nvp,
1685			    "can't find spkt to abort"));
1686		}
1687		ret = SATA_FAILURE;
1688	}
1689
1690	mutex_exit(&nvp->nvp_mutex);
1691
1692	return (ret);
1693}
1694
1695
1696/*
1697 * if spkt == NULL abort all pkts running, otherwise
1698 * abort the requested packet.  must be called with nv_mutex
1699 * held and returns with it held.  Not NCQ aware.
1700 */
1701static int
1702nv_abort_active(nv_port_t *nvp, sata_pkt_t *spkt, int abort_reason)
1703{
1704	int aborted = 0, i, reset_once = B_FALSE;
1705	struct nv_slot *nv_slotp;
1706	sata_pkt_t *spkt_slot;
1707
1708	ASSERT(MUTEX_HELD(&nvp->nvp_mutex));
1709
1710	/*
1711	 * return if the port is not configured
1712	 */
1713	if (nvp->nvp_slot == NULL) {
1714		NVLOG((NVDBG_ENTRY, nvp->nvp_ctlp, nvp,
1715		    "nv_abort_active: not configured so returning"));
1716
1717		return (0);
1718	}
1719
1720	NVLOG((NVDBG_ENTRY, nvp->nvp_ctlp, nvp, "nv_abort_active"));
1721
1722	nvp->nvp_state |= NV_PORT_ABORTING;
1723
1724	for (i = 0; i < nvp->nvp_queue_depth; i++) {
1725
1726		nv_slotp = &(nvp->nvp_slot[i]);
1727		spkt_slot = nv_slotp->nvslot_spkt;
1728
1729		/*
1730		 * skip if not active command in slot
1731		 */
1732		if (spkt_slot == NULL) {
1733			continue;
1734		}
1735
1736		/*
1737		 * if a specific packet was requested, skip if
1738		 * this is not a match
1739		 */
1740		if ((spkt != NULL) && (spkt != spkt_slot)) {
1741			continue;
1742		}
1743
1744		/*
1745		 * stop the hardware.  This could need reworking
1746		 * when NCQ is enabled in the driver.
1747		 */
1748		if (reset_once == B_FALSE) {
1749			ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl;
1750
1751			/*
1752			 * stop DMA engine
1753			 */
1754			nv_put8(bmhdl, nvp->nvp_bmicx,  0);
1755
1756			nv_reset(nvp);
1757			reset_once = B_TRUE;
1758		}
1759
1760		spkt_slot->satapkt_reason = abort_reason;
1761		nv_complete_io(nvp, spkt_slot, i);
1762		aborted++;
1763	}
1764
1765	nvp->nvp_state &= ~NV_PORT_ABORTING;
1766
1767	return (aborted);
1768}
1769
1770
1771/*
1772 * Called by sata module to reset a port, device, or the controller.
1773 */
1774static int
1775nv_sata_reset(dev_info_t *dip, sata_device_t *sd)
1776{
1777	int cport = sd->satadev_addr.cport;
1778	nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip));
1779	nv_port_t *nvp = &(nvc->nvc_port[cport]);
1780	int ret = SATA_SUCCESS;
1781
1782	ASSERT(cport < NV_MAX_PORTS(nvc));
1783
1784	NVLOG((NVDBG_ENTRY, nvc, nvp, "nv_sata_reset"));
1785
1786	mutex_enter(&nvp->nvp_mutex);
1787
1788	switch (sd->satadev_addr.qual) {
1789
1790	case SATA_ADDR_CPORT:
1791		/*FALLTHROUGH*/
1792	case SATA_ADDR_DCPORT:
1793		nv_reset(nvp);
1794		(void) nv_abort_active(nvp, NULL, SATA_PKT_RESET);
1795
1796		break;
1797	case SATA_ADDR_CNTRL:
1798		NVLOG((NVDBG_ENTRY, nvc, nvp,
1799		    "nv_sata_reset: constroller reset not supported"));
1800
1801		break;
1802	case SATA_ADDR_PMPORT:
1803	case SATA_ADDR_DPMPORT:
1804		NVLOG((NVDBG_ENTRY, nvc, nvp,
1805		    "nv_sata_reset: port multipliers not supported"));
1806		/*FALLTHROUGH*/
1807	default:
1808		/*
1809		 * unsupported case
1810		 */
1811		ret = SATA_FAILURE;
1812		break;
1813	}
1814
1815	if (ret == SATA_SUCCESS) {
1816		/*
1817		 * If the port is inactive, do a quiet reset and don't attempt
1818		 * to wait for reset completion or do any post reset processing
1819		 */
1820		if (nvp->nvp_state & NV_PORT_INACTIVE) {
1821			nvp->nvp_state &= ~NV_PORT_RESET;
1822			nvp->nvp_reset_time = 0;
1823		}
1824
1825		/*
1826		 * clear the port failed flag
1827		 */
1828		nvp->nvp_state &= ~NV_PORT_FAILED;
1829	}
1830
1831	mutex_exit(&nvp->nvp_mutex);
1832
1833	return (ret);
1834}
1835
1836
1837/*
1838 * Sata entry point to handle port activation.  cfgadm -c connect
1839 */
1840static int
1841nv_sata_activate(dev_info_t *dip, sata_device_t *sd)
1842{
1843	int cport = sd->satadev_addr.cport;
1844	nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip));
1845	nv_port_t *nvp = &(nvc->nvc_port[cport]);
1846
1847	ASSERT(cport < NV_MAX_PORTS(nvc));
1848	NVLOG((NVDBG_ENTRY, nvc, nvp, "nv_sata_activate"));
1849
1850	mutex_enter(&nvp->nvp_mutex);
1851
1852	sd->satadev_state = SATA_STATE_READY;
1853
1854	nv_copy_registers(nvp, sd, NULL);
1855
1856	(*(nvc->nvc_set_intr))(nvp, NV_INTR_ENABLE);
1857
1858	nvp->nvp_state = 0;
1859
1860	mutex_exit(&nvp->nvp_mutex);
1861
1862	return (SATA_SUCCESS);
1863}
1864
1865
1866/*
1867 * Sata entry point to handle port deactivation.  cfgadm -c disconnect
1868 */
1869static int
1870nv_sata_deactivate(dev_info_t *dip, sata_device_t *sd)
1871{
1872	int cport = sd->satadev_addr.cport;
1873	nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip));
1874	nv_port_t *nvp = &(nvc->nvc_port[cport]);
1875
1876	ASSERT(cport < NV_MAX_PORTS(nvc));
1877	NVLOG((NVDBG_ENTRY, nvc, nvp, "nv_sata_deactivate"));
1878
1879	mutex_enter(&nvp->nvp_mutex);
1880
1881	(void) nv_abort_active(nvp, NULL, SATA_PKT_RESET);
1882
1883	/*
1884	 * mark the device as inaccessible
1885	 */
1886	nvp->nvp_state &= ~NV_PORT_INACTIVE;
1887
1888	/*
1889	 * disable the interrupts on port
1890	 */
1891	(*(nvc->nvc_set_intr))(nvp, NV_INTR_DISABLE);
1892
1893	nv_uninit_port(nvp);
1894
1895	sd->satadev_state = SATA_PSTATE_SHUTDOWN;
1896	nv_copy_registers(nvp, sd, NULL);
1897
1898	mutex_exit(&nvp->nvp_mutex);
1899
1900	return (SATA_SUCCESS);
1901}
1902
1903
1904/*
1905 * find an empty slot in the driver's queue, increment counters,
1906 * and then invoke the appropriate PIO or DMA start routine.
1907 */
1908static int
1909nv_start_common(nv_port_t *nvp, sata_pkt_t *spkt)
1910{
1911	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
1912	int on_bit = 0x01, slot, sactive, ret, ncq = 0;
1913	uint8_t cmd = spkt->satapkt_cmd.satacmd_cmd_reg;
1914	int direction = sata_cmdp->satacmd_flags.sata_data_direction;
1915	nv_ctl_t *nvc = nvp->nvp_ctlp;
1916	nv_slot_t *nv_slotp;
1917	boolean_t dma_cmd;
1918
1919	NVLOG((NVDBG_DELIVER, nvc, nvp, "nv_start_common  entered: cmd: 0x%x",
1920	    sata_cmdp->satacmd_cmd_reg));
1921
1922	if ((cmd == SATAC_WRITE_FPDMA_QUEUED) ||
1923	    (cmd == SATAC_READ_FPDMA_QUEUED)) {
1924		nvp->nvp_ncq_run++;
1925		/*
1926		 * search for an empty NCQ slot.  by the time, it's already
1927		 * been determined by the caller that there is room on the
1928		 * queue.
1929		 */
1930		for (slot = 0; slot < nvp->nvp_queue_depth; slot++,
1931		    on_bit <<= 1) {
1932			if ((nvp->nvp_sactive_cache & on_bit) == 0) {
1933				break;
1934			}
1935		}
1936
1937		/*
1938		 * the first empty slot found, should not exceed the queue
1939		 * depth of the drive.  if it does it's an error.
1940		 */
1941		ASSERT(slot != nvp->nvp_queue_depth);
1942
1943		sactive = nv_get32(nvc->nvc_bar_hdl[5],
1944		    nvp->nvp_sactive);
1945		ASSERT((sactive & on_bit) == 0);
1946		nv_put32(nvc->nvc_bar_hdl[5], nvp->nvp_sactive, on_bit);
1947		NVLOG((NVDBG_INIT, nvc, nvp, "setting SACTIVE onbit: %X",
1948		    on_bit));
1949		nvp->nvp_sactive_cache |= on_bit;
1950
1951		ncq = NVSLOT_NCQ;
1952
1953	} else {
1954		nvp->nvp_non_ncq_run++;
1955		slot = 0;
1956	}
1957
1958	nv_slotp = (nv_slot_t *)&nvp->nvp_slot[slot];
1959
1960	ASSERT(nv_slotp->nvslot_spkt == NULL);
1961
1962	nv_slotp->nvslot_spkt = spkt;
1963	nv_slotp->nvslot_flags = ncq;
1964
1965	/*
1966	 * the sata module doesn't indicate which commands utilize the
1967	 * DMA engine, so find out using this switch table.
1968	 */
1969	switch (spkt->satapkt_cmd.satacmd_cmd_reg) {
1970	case SATAC_READ_DMA_EXT:
1971	case SATAC_WRITE_DMA_EXT:
1972	case SATAC_WRITE_DMA:
1973	case SATAC_READ_DMA:
1974	case SATAC_READ_DMA_QUEUED:
1975	case SATAC_READ_DMA_QUEUED_EXT:
1976	case SATAC_WRITE_DMA_QUEUED:
1977	case SATAC_WRITE_DMA_QUEUED_EXT:
1978	case SATAC_READ_FPDMA_QUEUED:
1979	case SATAC_WRITE_FPDMA_QUEUED:
1980		dma_cmd = B_TRUE;
1981		break;
1982	default:
1983		dma_cmd = B_FALSE;
1984	}
1985
1986	if (sata_cmdp->satacmd_num_dma_cookies != 0 && dma_cmd == B_TRUE) {
1987		NVLOG((NVDBG_DELIVER, nvc,  nvp, "DMA command"));
1988		nv_slotp->nvslot_start = nv_start_dma;
1989		nv_slotp->nvslot_intr = nv_intr_dma;
1990	} else if (spkt->satapkt_cmd.satacmd_cmd_reg == SATAC_PACKET) {
1991		NVLOG((NVDBG_DELIVER, nvc,  nvp, "packet command"));
1992		nv_slotp->nvslot_start = nv_start_pkt_pio;
1993		nv_slotp->nvslot_intr = nv_intr_pkt_pio;
1994		if ((direction == SATA_DIR_READ) ||
1995		    (direction == SATA_DIR_WRITE)) {
1996			nv_slotp->nvslot_byte_count =
1997			    spkt->satapkt_cmd.satacmd_bp->b_bcount;
1998			nv_slotp->nvslot_v_addr =
1999			    spkt->satapkt_cmd.satacmd_bp->b_un.b_addr;
2000			/*
2001			 * Freeing DMA resources allocated by the framework
2002			 * now to avoid buffer overwrite (dma sync) problems
2003			 * when the buffer is released at command completion.
2004			 * Primarily an issue on systems with more than
2005			 * 4GB of memory.
2006			 */
2007			sata_free_dma_resources(spkt);
2008		}
2009	} else if (direction == SATA_DIR_NODATA_XFER) {
2010		NVLOG((NVDBG_DELIVER, nvc, nvp, "non-data command"));
2011		nv_slotp->nvslot_start = nv_start_nodata;
2012		nv_slotp->nvslot_intr = nv_intr_nodata;
2013	} else if (direction == SATA_DIR_READ) {
2014		NVLOG((NVDBG_DELIVER, nvc, nvp, "pio in command"));
2015		nv_slotp->nvslot_start = nv_start_pio_in;
2016		nv_slotp->nvslot_intr = nv_intr_pio_in;
2017		nv_slotp->nvslot_byte_count =
2018		    spkt->satapkt_cmd.satacmd_bp->b_bcount;
2019		nv_slotp->nvslot_v_addr =
2020		    spkt->satapkt_cmd.satacmd_bp->b_un.b_addr;
2021		/*
2022		 * Freeing DMA resources allocated by the framework now to
2023		 * avoid buffer overwrite (dma sync) problems when the buffer
2024		 * is released at command completion.  This is not an issue
2025		 * for write because write does not update the buffer.
2026		 * Primarily an issue on systems with more than 4GB of memory.
2027		 */
2028		sata_free_dma_resources(spkt);
2029	} else if (direction == SATA_DIR_WRITE) {
2030		NVLOG((NVDBG_DELIVER, nvc, nvp, "pio out command"));
2031		nv_slotp->nvslot_start = nv_start_pio_out;
2032		nv_slotp->nvslot_intr = nv_intr_pio_out;
2033		nv_slotp->nvslot_byte_count =
2034		    spkt->satapkt_cmd.satacmd_bp->b_bcount;
2035		nv_slotp->nvslot_v_addr =
2036		    spkt->satapkt_cmd.satacmd_bp->b_un.b_addr;
2037	} else {
2038		nv_cmn_err(CE_WARN, nvc, nvp, "malformed command: direction"
2039		    " %d cookies %d cmd %x",
2040		    sata_cmdp->satacmd_flags.sata_data_direction,
2041		    sata_cmdp->satacmd_num_dma_cookies,  cmd);
2042		spkt->satapkt_reason = SATA_PKT_CMD_UNSUPPORTED;
2043		ret = SATA_TRAN_CMD_UNSUPPORTED;
2044
2045		goto fail;
2046	}
2047
2048	if ((ret = (*nv_slotp->nvslot_start)(nvp, slot)) ==
2049	    SATA_TRAN_ACCEPTED) {
2050#ifdef SGPIO_SUPPORT
2051		nv_sgp_drive_active(nvp->nvp_ctlp,
2052		    (nvp->nvp_ctlp->nvc_ctlr_num * 2) + nvp->nvp_port_num);
2053#endif
2054		nv_slotp->nvslot_stime = ddi_get_lbolt();
2055
2056		/*
2057		 * start timer if it's not already running and this packet
2058		 * is not requesting polled mode.
2059		 */
2060		if ((nvp->nvp_timeout_id == 0) &&
2061		    ((spkt->satapkt_op_mode & SATA_OPMODE_POLLING) == 0)) {
2062			nvp->nvp_timeout_id = timeout(nv_timeout, (void *)nvp,
2063			    drv_usectohz(NV_ONE_SEC));
2064		}
2065
2066		return (SATA_TRAN_ACCEPTED);
2067	}
2068
2069	fail:
2070
2071	spkt->satapkt_reason = SATA_TRAN_PORT_ERROR;
2072
2073	if (ncq == NVSLOT_NCQ) {
2074		nvp->nvp_ncq_run--;
2075		nvp->nvp_sactive_cache &= ~on_bit;
2076	} else {
2077		nvp->nvp_non_ncq_run--;
2078	}
2079	nv_slotp->nvslot_spkt = NULL;
2080	nv_slotp->nvslot_flags = 0;
2081
2082	return (ret);
2083}
2084
2085
2086/*
2087 * Check if the signature is ready and if non-zero translate
2088 * it into a solaris sata defined type.
2089 */
2090static void
2091nv_read_signature(nv_port_t *nvp)
2092{
2093	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
2094
2095	nvp->nvp_signature = nv_get8(cmdhdl, nvp->nvp_count);
2096	nvp->nvp_signature |= (nv_get8(cmdhdl, nvp->nvp_sect) << 8);
2097	nvp->nvp_signature |= (nv_get8(cmdhdl, nvp->nvp_lcyl) << 16);
2098	nvp->nvp_signature |= (nv_get8(cmdhdl, nvp->nvp_hcyl) << 24);
2099
2100	switch (nvp->nvp_signature) {
2101
2102	case NV_SIG_DISK:
2103		NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp, "drive is a disk"));
2104		nvp->nvp_type = SATA_DTYPE_ATADISK;
2105		break;
2106	case NV_SIG_ATAPI:
2107		NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp,
2108		    "drive is an optical device"));
2109		nvp->nvp_type = SATA_DTYPE_ATAPICD;
2110		break;
2111	case NV_SIG_PM:
2112		NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp,
2113		    "device is a port multiplier"));
2114		nvp->nvp_type = SATA_DTYPE_PMULT;
2115		break;
2116	case NV_SIG_NOTREADY:
2117		NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp,
2118		    "signature not ready"));
2119		nvp->nvp_type = SATA_DTYPE_UNKNOWN;
2120		break;
2121	default:
2122		nv_cmn_err(CE_WARN, nvp->nvp_ctlp, nvp, "signature %X not"
2123		    " recognized", nvp->nvp_signature);
2124		nvp->nvp_type = SATA_DTYPE_UNKNOWN;
2125		break;
2126	}
2127
2128	if (nvp->nvp_signature) {
2129		nvp->nvp_state &= ~(NV_PORT_RESET_PROBE|NV_PORT_RESET);
2130	}
2131}
2132
2133
2134/*
2135 * Reset the port
2136 */
2137static void
2138nv_reset(nv_port_t *nvp)
2139{
2140	ddi_acc_handle_t bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5];
2141	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
2142	nv_ctl_t *nvc = nvp->nvp_ctlp;
2143	uint32_t sctrl;
2144
2145	NVLOG((NVDBG_ENTRY, nvc, nvp, "nv_reset()"));
2146
2147	ASSERT(mutex_owned(&nvp->nvp_mutex));
2148
2149	/*
2150	 * clear signature registers
2151	 */
2152	nv_put8(cmdhdl, nvp->nvp_sect, 0);
2153	nv_put8(cmdhdl, nvp->nvp_lcyl, 0);
2154	nv_put8(cmdhdl, nvp->nvp_hcyl, 0);
2155	nv_put8(cmdhdl, nvp->nvp_count, 0);
2156
2157	nvp->nvp_signature = 0;
2158	nvp->nvp_type = 0;
2159	nvp->nvp_state |= NV_PORT_RESET;
2160	nvp->nvp_reset_time = ddi_get_lbolt();
2161	nvp->nvp_link_lost_time = 0;
2162
2163	/*
2164	 * assert reset in PHY by writing a 1 to bit 0 scontrol
2165	 */
2166	sctrl = nv_get32(bar5_hdl, nvp->nvp_sctrl);
2167
2168	nv_put32(bar5_hdl, nvp->nvp_sctrl, sctrl | SCONTROL_DET_COMRESET);
2169
2170	/*
2171	 * wait 1ms
2172	 */
2173	drv_usecwait(1000);
2174
2175	/*
2176	 * de-assert reset in PHY
2177	 */
2178	nv_put32(bar5_hdl, nvp->nvp_sctrl, sctrl);
2179
2180	/*
2181	 * make sure timer is running
2182	 */
2183	if (nvp->nvp_timeout_id == 0) {
2184		nvp->nvp_timeout_id = timeout(nv_timeout, (void *)nvp,
2185		    drv_usectohz(NV_ONE_SEC));
2186	}
2187}
2188
2189
2190/*
2191 * Initialize register handling specific to mcp55
2192 */
2193/* ARGSUSED */
2194static void
2195mcp55_reg_init(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle)
2196{
2197	nv_port_t *nvp;
2198	uchar_t *bar5  = nvc->nvc_bar_addr[5];
2199	uint8_t off, port;
2200
2201	nvc->nvc_mcp55_ctl = (uint32_t *)(bar5 + MCP55_CTL);
2202	nvc->nvc_mcp55_ncq = (uint32_t *)(bar5 + MCP55_NCQ);
2203
2204	for (port = 0, off = 0; port < NV_MAX_PORTS(nvc); port++, off += 2) {
2205		nvp = &(nvc->nvc_port[port]);
2206		nvp->nvp_mcp55_int_status =
2207		    (uint16_t *)(bar5 + MCP55_INT_STATUS + off);
2208		nvp->nvp_mcp55_int_ctl =
2209		    (uint16_t *)(bar5 + MCP55_INT_CTL + off);
2210
2211		/*
2212		 * clear any previous interrupts asserted
2213		 */
2214		nv_put16(nvc->nvc_bar_hdl[5], nvp->nvp_mcp55_int_status,
2215		    MCP55_INT_CLEAR);
2216
2217		/*
2218		 * These are the interrupts to accept for now.  The spec
2219		 * says these are enable bits, but nvidia has indicated
2220		 * these are masking bits.  Even though they may be masked
2221		 * out to prevent asserting the main interrupt, they can
2222		 * still be asserted while reading the interrupt status
2223		 * register, so that needs to be considered in the interrupt
2224		 * handler.
2225		 */
2226		nv_put16(nvc->nvc_bar_hdl[5], nvp->nvp_mcp55_int_ctl,
2227		    ~(MCP55_INT_IGNORE));
2228	}
2229
2230	/*
2231	 * Allow the driver to program the BM on the first command instead
2232	 * of waiting for an interrupt.
2233	 */
2234#ifdef NCQ
2235	flags = MCP_SATA_AE_NCQ_PDEV_FIRST_CMD | MCP_SATA_AE_NCQ_SDEV_FIRST_CMD;
2236	nv_put32(nvc->nvc_bar_hdl[5], nvc->nvc_mcp55_ncq, flags);
2237	flags = MCP_SATA_AE_CTL_PRI_SWNCQ | MCP_SATA_AE_CTL_SEC_SWNCQ;
2238	nv_put32(nvc->nvc_bar_hdl[5], nvc->nvc_mcp55_ctl, flags);
2239#endif
2240
2241
2242#if 0
2243	/*
2244	 * This caused problems on some but not all mcp55 based systems.
2245	 * DMA writes would never complete.  This happens even on small
2246	 * mem systems, and only setting NV_40BIT_PRD below and not
2247	 * buffer_dma_attr.dma_attr_addr_hi, so it seems to be a hardware
2248	 * issue that needs further investigation.
2249	 */
2250
2251	/*
2252	 * mcp55 rev A03 and above supports 40-bit physical addressing.
2253	 * Enable DMA to take advantage of that.
2254	 *
2255	 */
2256	if (nvc->nvc_revid >= 0xa3) {
2257		uint32_t reg32;
2258		NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp, "rev id is %X and"
2259		    " is capable of 40-bit addressing", nvc->nvc_revid));
2260		buffer_dma_attr.dma_attr_addr_hi = 0xffffffffffull;
2261		reg32 = pci_config_get32(pci_conf_handle, NV_SATA_CFG_20);
2262		pci_config_put32(pci_conf_handle, NV_SATA_CFG_20,
2263		    reg32 |NV_40BIT_PRD);
2264	} else {
2265		NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp, "rev is %X and is "
2266		    "not capable of 40-bit addressing", nvc->nvc_revid));
2267	}
2268#endif
2269
2270}
2271
2272
2273/*
2274 * Initialize register handling specific to mcp04
2275 */
2276static void
2277mcp04_reg_init(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle)
2278{
2279	uchar_t *bar5  = nvc->nvc_bar_addr[5];
2280	uint32_t reg32;
2281	uint16_t reg16;
2282	nv_port_t *nvp;
2283	int j;
2284
2285	/*
2286	 * delay hotplug interrupts until PHYRDY.
2287	 */
2288	reg32 = pci_config_get32(pci_conf_handle, NV_SATA_CFG_42);
2289	pci_config_put32(pci_conf_handle, NV_SATA_CFG_42,
2290	    reg32 | MCP04_CFG_DELAY_HOTPLUG_INTR);
2291
2292	/*
2293	 * enable hot plug interrupts for channel x and y
2294	 */
2295	reg16 = nv_get16(nvc->nvc_bar_hdl[5],
2296	    (uint16_t *)(bar5 + NV_ADMACTL_X));
2297	nv_put16(nvc->nvc_bar_hdl[5], (uint16_t *)(bar5 + NV_ADMACTL_X),
2298	    NV_HIRQ_EN | reg16);
2299
2300
2301	reg16 = nv_get16(nvc->nvc_bar_hdl[5],
2302	    (uint16_t *)(bar5 + NV_ADMACTL_Y));
2303	nv_put16(nvc->nvc_bar_hdl[5], (uint16_t *)(bar5 + NV_ADMACTL_Y),
2304	    NV_HIRQ_EN | reg16);
2305
2306	nvc->nvc_mcp04_int_status = (uint8_t *)(bar5 + MCP04_SATA_INT_STATUS);
2307
2308	/*
2309	 * clear any existing interrupt pending then enable
2310	 */
2311	for (j = 0; j < NV_MAX_PORTS(nvc); j++) {
2312		nvp = &(nvc->nvc_port[j]);
2313		mutex_enter(&nvp->nvp_mutex);
2314		(*(nvp->nvp_ctlp->nvc_set_intr))(nvp,
2315		    NV_INTR_CLEAR_ALL|NV_INTR_ENABLE);
2316		mutex_exit(&nvp->nvp_mutex);
2317	}
2318}
2319
2320
2321/*
2322 * Initialize the controller and set up driver data structures.
2323 * determine if ck804 or mcp55 class.
2324 */
2325static int
2326nv_init_ctl(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle)
2327{
2328	struct sata_hba_tran stran;
2329	nv_port_t *nvp;
2330	int j, ck804;
2331	uchar_t *cmd_addr, *ctl_addr, *bm_addr;
2332	ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5];
2333	uchar_t *bar5  = nvc->nvc_bar_addr[5];
2334	uint32_t reg32;
2335	uint8_t reg8, reg8_save;
2336
2337	NVLOG((NVDBG_INIT, nvc, NULL, "nv_init_ctl entered"));
2338
2339	ck804 = B_TRUE;
2340#ifdef SGPIO_SUPPORT
2341	nvc->nvc_mcp55_flag = B_FALSE;
2342#endif
2343
2344	/*
2345	 * Need to set bit 2 to 1 at config offset 0x50
2346	 * to enable access to the bar5 registers.
2347	 */
2348	reg32 = pci_config_get32(pci_conf_handle, NV_SATA_CFG_20);
2349	pci_config_put32(pci_conf_handle, NV_SATA_CFG_20,
2350	    reg32 | NV_BAR5_SPACE_EN);
2351
2352	/*
2353	 * Determine if this is ck804 or mcp55.  ck804 will map in the
2354	 * task file registers into bar5 while mcp55 won't.  The offset of
2355	 * the task file registers in mcp55's space is unused, so it will
2356	 * return zero.  So check one of the task file registers to see if it is
2357	 * writable and reads back what was written.  If it's mcp55 it will
2358	 * return back 0xff whereas ck804 will return the value written.
2359	 */
2360	reg8_save = nv_get8(bar5_hdl,
2361	    (uint8_t *)(bar5 + NV_BAR5_TRAN_LEN_CH_X));
2362
2363
2364	for (j = 1; j < 3; j++) {
2365
2366		nv_put8(bar5_hdl, (uint8_t *)(bar5 + NV_BAR5_TRAN_LEN_CH_X), j);
2367		reg8 = nv_get8(bar5_hdl,
2368		    (uint8_t *)(bar5 + NV_BAR5_TRAN_LEN_CH_X));
2369
2370		if (reg8 != j) {
2371			ck804 = B_FALSE;
2372			nvc->nvc_mcp55_flag = B_TRUE;
2373			break;
2374		}
2375	}
2376
2377	nv_put8(bar5_hdl, (uint8_t *)(bar5 + NV_BAR5_TRAN_LEN_CH_X), reg8_save);
2378
2379	if (ck804 == B_TRUE) {
2380		NVLOG((NVDBG_INIT, nvc, NULL, "controller is CK804"));
2381		nvc->nvc_interrupt = mcp04_intr;
2382		nvc->nvc_reg_init = mcp04_reg_init;
2383		nvc->nvc_set_intr = mcp04_set_intr;
2384	} else {
2385		NVLOG((NVDBG_INIT, nvc, NULL, "controller is MCP55"));
2386		nvc->nvc_interrupt = mcp55_intr;
2387		nvc->nvc_reg_init = mcp55_reg_init;
2388		nvc->nvc_set_intr = mcp55_set_intr;
2389	}
2390
2391
2392	stran.sata_tran_hba_rev = SATA_TRAN_HBA_REV_2;
2393	stran.sata_tran_hba_dip = nvc->nvc_dip;
2394	stran.sata_tran_hba_dma_attr = &buffer_dma_attr;
2395	stran.sata_tran_hba_num_cports = NV_NUM_CPORTS;
2396	stran.sata_tran_hba_features_support =
2397	    SATA_CTLF_HOTPLUG | SATA_CTLF_ASN | SATA_CTLF_ATAPI;
2398	stran.sata_tran_hba_qdepth = NV_QUEUE_SLOTS;
2399	stran.sata_tran_probe_port = nv_sata_probe;
2400	stran.sata_tran_start = nv_sata_start;
2401	stran.sata_tran_abort = nv_sata_abort;
2402	stran.sata_tran_reset_dport = nv_sata_reset;
2403	stran.sata_tran_selftest = NULL;
2404	stran.sata_tran_hotplug_ops = &nv_hotplug_ops;
2405	stran.sata_tran_pwrmgt_ops = NULL;
2406	stran.sata_tran_ioctl = NULL;
2407	nvc->nvc_sata_hba_tran = stran;
2408
2409	nvc->nvc_port = kmem_zalloc(sizeof (nv_port_t) * NV_MAX_PORTS(nvc),
2410	    KM_SLEEP);
2411
2412	/*
2413	 * initialize registers common to all chipsets
2414	 */
2415	nv_common_reg_init(nvc);
2416
2417	for (j = 0; j < NV_MAX_PORTS(nvc); j++) {
2418		nvp = &(nvc->nvc_port[j]);
2419
2420		cmd_addr = nvp->nvp_cmd_addr;
2421		ctl_addr = nvp->nvp_ctl_addr;
2422		bm_addr = nvp->nvp_bm_addr;
2423
2424		mutex_init(&nvp->nvp_mutex, NULL, MUTEX_DRIVER,
2425		    DDI_INTR_PRI(nvc->nvc_intr_pri));
2426
2427		cv_init(&nvp->nvp_poll_cv, NULL, CV_DRIVER, NULL);
2428
2429		nvp->nvp_data	= cmd_addr + NV_DATA;
2430		nvp->nvp_error	= cmd_addr + NV_ERROR;
2431		nvp->nvp_feature = cmd_addr + NV_FEATURE;
2432		nvp->nvp_count	= cmd_addr + NV_COUNT;
2433		nvp->nvp_sect	= cmd_addr + NV_SECT;
2434		nvp->nvp_lcyl	= cmd_addr + NV_LCYL;
2435		nvp->nvp_hcyl	= cmd_addr + NV_HCYL;
2436		nvp->nvp_drvhd	= cmd_addr + NV_DRVHD;
2437		nvp->nvp_status	= cmd_addr + NV_STATUS;
2438		nvp->nvp_cmd	= cmd_addr + NV_CMD;
2439		nvp->nvp_altstatus = ctl_addr + NV_ALTSTATUS;
2440		nvp->nvp_devctl	= ctl_addr + NV_DEVCTL;
2441
2442		nvp->nvp_bmicx	= bm_addr + BMICX_REG;
2443		nvp->nvp_bmisx	= bm_addr + BMISX_REG;
2444		nvp->nvp_bmidtpx = (uint32_t *)(bm_addr + BMIDTPX_REG);
2445
2446		nvp->nvp_state = 0;
2447	}
2448
2449	/*
2450	 * initialize register by calling chip specific reg initialization
2451	 */
2452	(*(nvc->nvc_reg_init))(nvc, pci_conf_handle);
2453
2454	return (NV_SUCCESS);
2455}
2456
2457
2458/*
2459 * Initialize data structures with enough slots to handle queuing, if
2460 * enabled.  NV_QUEUE_SLOTS will be set to 1 or 32, depending on whether
2461 * NCQ support is built into the driver and enabled.  It might have been
2462 * better to derive the true size from the drive itself, but the sata
2463 * module only sends down that information on the first NCQ command,
2464 * which means possibly re-sizing the structures on an interrupt stack,
2465 * making error handling more messy.  The easy way is to just allocate
2466 * all 32 slots, which is what most drives support anyway.
2467 */
2468static int
2469nv_init_port(nv_port_t *nvp)
2470{
2471	nv_ctl_t *nvc = nvp->nvp_ctlp;
2472	size_t	prd_size = sizeof (prde_t) * NV_DMA_NSEGS;
2473	dev_info_t *dip = nvc->nvc_dip;
2474	ddi_device_acc_attr_t dev_attr;
2475	size_t buf_size;
2476	ddi_dma_cookie_t cookie;
2477	uint_t count;
2478	int rc, i;
2479
2480	dev_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
2481	dev_attr.devacc_attr_endian_flags = DDI_NEVERSWAP_ACC;
2482	dev_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
2483
2484	if (nvp->nvp_state & NV_PORT_INIT) {
2485		NVLOG((NVDBG_INIT, nvc, nvp,
2486		    "nv_init_port previously initialized"));
2487
2488		return (NV_SUCCESS);
2489	} else {
2490		NVLOG((NVDBG_INIT, nvc, nvp, "nv_init_port initializing"));
2491	}
2492
2493	nvp->nvp_sg_dma_hdl = kmem_zalloc(sizeof (ddi_dma_handle_t) *
2494	    NV_QUEUE_SLOTS, KM_SLEEP);
2495
2496	nvp->nvp_sg_acc_hdl = kmem_zalloc(sizeof (ddi_acc_handle_t) *
2497	    NV_QUEUE_SLOTS, KM_SLEEP);
2498
2499	nvp->nvp_sg_addr = kmem_zalloc(sizeof (caddr_t) *
2500	    NV_QUEUE_SLOTS, KM_SLEEP);
2501
2502	nvp->nvp_sg_paddr = kmem_zalloc(sizeof (uint32_t) *
2503	    NV_QUEUE_SLOTS, KM_SLEEP);
2504
2505	nvp->nvp_slot = kmem_zalloc(sizeof (nv_slot_t) * NV_QUEUE_SLOTS,
2506	    KM_SLEEP);
2507
2508	for (i = 0; i < NV_QUEUE_SLOTS; i++) {
2509
2510		rc = ddi_dma_alloc_handle(dip, &nv_prd_dma_attr,
2511		    DDI_DMA_SLEEP, NULL, &(nvp->nvp_sg_dma_hdl[i]));
2512
2513		if (rc != DDI_SUCCESS) {
2514			nv_uninit_port(nvp);
2515
2516			return (NV_FAILURE);
2517		}
2518
2519		rc = ddi_dma_mem_alloc(nvp->nvp_sg_dma_hdl[i], prd_size,
2520		    &dev_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
2521		    NULL, &(nvp->nvp_sg_addr[i]), &buf_size,
2522		    &(nvp->nvp_sg_acc_hdl[i]));
2523
2524		if (rc != DDI_SUCCESS) {
2525			nv_uninit_port(nvp);
2526
2527			return (NV_FAILURE);
2528		}
2529
2530		rc = ddi_dma_addr_bind_handle(nvp->nvp_sg_dma_hdl[i], NULL,
2531		    nvp->nvp_sg_addr[i], buf_size,
2532		    DDI_DMA_WRITE | DDI_DMA_CONSISTENT,
2533		    DDI_DMA_SLEEP, NULL, &cookie, &count);
2534
2535		if (rc != DDI_DMA_MAPPED) {
2536			nv_uninit_port(nvp);
2537
2538			return (NV_FAILURE);
2539		}
2540
2541		ASSERT(count == 1);
2542		ASSERT((cookie.dmac_address & (sizeof (int) - 1)) == 0);
2543
2544		ASSERT(cookie.dmac_laddress <= UINT32_MAX);
2545
2546		nvp->nvp_sg_paddr[i] = cookie.dmac_address;
2547	}
2548
2549	/*
2550	 * nvp_queue_depth represents the actual drive queue depth, not the
2551	 * number of slots allocated in the structures (which may be more).
2552	 * Actual queue depth is only learned after the first NCQ command, so
2553	 * initialize it to 1 for now.
2554	 */
2555	nvp->nvp_queue_depth = 1;
2556
2557	nvp->nvp_state |= NV_PORT_INIT;
2558
2559	return (NV_SUCCESS);
2560}
2561
2562
2563/*
2564 * Free dynamically allocated structures for port.
2565 */
2566static void
2567nv_uninit_port(nv_port_t *nvp)
2568{
2569	int i;
2570
2571	/*
2572	 * It is possible to reach here before a port has been initialized or
2573	 * after it has already been uninitialized.  Just return in that case.
2574	 */
2575	if (nvp->nvp_slot == NULL) {
2576
2577		return;
2578	}
2579
2580	NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp,
2581	    "nv_uninit_port uninitializing"));
2582
2583	nvp->nvp_type = SATA_DTYPE_NONE;
2584
2585	for (i = 0; i < NV_QUEUE_SLOTS; i++) {
2586		if (nvp->nvp_sg_paddr[i]) {
2587			(void) ddi_dma_unbind_handle(nvp->nvp_sg_dma_hdl[i]);
2588		}
2589
2590		if (nvp->nvp_sg_acc_hdl[i] != NULL) {
2591			ddi_dma_mem_free(&(nvp->nvp_sg_acc_hdl[i]));
2592		}
2593
2594		if (nvp->nvp_sg_dma_hdl[i] != NULL) {
2595			ddi_dma_free_handle(&(nvp->nvp_sg_dma_hdl[i]));
2596		}
2597	}
2598
2599	kmem_free(nvp->nvp_slot, sizeof (nv_slot_t) * NV_QUEUE_SLOTS);
2600	nvp->nvp_slot = NULL;
2601
2602	kmem_free(nvp->nvp_sg_dma_hdl,
2603	    sizeof (ddi_dma_handle_t) * NV_QUEUE_SLOTS);
2604	nvp->nvp_sg_dma_hdl = NULL;
2605
2606	kmem_free(nvp->nvp_sg_acc_hdl,
2607	    sizeof (ddi_acc_handle_t) * NV_QUEUE_SLOTS);
2608	nvp->nvp_sg_acc_hdl = NULL;
2609
2610	kmem_free(nvp->nvp_sg_addr, sizeof (caddr_t) * NV_QUEUE_SLOTS);
2611	nvp->nvp_sg_addr = NULL;
2612
2613	kmem_free(nvp->nvp_sg_paddr, sizeof (uint32_t) * NV_QUEUE_SLOTS);
2614	nvp->nvp_sg_paddr = NULL;
2615
2616	nvp->nvp_state &= ~NV_PORT_INIT;
2617	nvp->nvp_signature = 0;
2618}
2619
2620
2621/*
2622 * Cache register offsets and access handles to frequently accessed registers
2623 * which are common to either chipset.
2624 */
2625static void
2626nv_common_reg_init(nv_ctl_t *nvc)
2627{
2628	uchar_t *bar5_addr = nvc->nvc_bar_addr[5];
2629	uchar_t *bm_addr_offset, *sreg_offset;
2630	uint8_t bar, port;
2631	nv_port_t *nvp;
2632
2633	for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
2634		if (port == 0) {
2635			bar = NV_BAR_0;
2636			bm_addr_offset = 0;
2637			sreg_offset = (uchar_t *)(CH0_SREG_OFFSET + bar5_addr);
2638		} else {
2639			bar = NV_BAR_2;
2640			bm_addr_offset = (uchar_t *)8;
2641			sreg_offset = (uchar_t *)(CH1_SREG_OFFSET + bar5_addr);
2642		}
2643
2644		nvp = &(nvc->nvc_port[port]);
2645		nvp->nvp_ctlp = nvc;
2646		nvp->nvp_port_num = port;
2647		NVLOG((NVDBG_INIT, nvc, nvp, "setting up port mappings"));
2648
2649		nvp->nvp_cmd_hdl = nvc->nvc_bar_hdl[bar];
2650		nvp->nvp_cmd_addr = nvc->nvc_bar_addr[bar];
2651		nvp->nvp_ctl_hdl = nvc->nvc_bar_hdl[bar + 1];
2652		nvp->nvp_ctl_addr = nvc->nvc_bar_addr[bar + 1];
2653		nvp->nvp_bm_hdl = nvc->nvc_bar_hdl[NV_BAR_4];
2654		nvp->nvp_bm_addr = nvc->nvc_bar_addr[NV_BAR_4] +
2655		    (long)bm_addr_offset;
2656
2657		nvp->nvp_sstatus = (uint32_t *)(sreg_offset + NV_SSTATUS);
2658		nvp->nvp_serror = (uint32_t *)(sreg_offset + NV_SERROR);
2659		nvp->nvp_sactive = (uint32_t *)(sreg_offset + NV_SACTIVE);
2660		nvp->nvp_sctrl = (uint32_t *)(sreg_offset + NV_SCTRL);
2661	}
2662}
2663
2664
2665static void
2666nv_uninit_ctl(nv_ctl_t *nvc)
2667{
2668	int port;
2669	nv_port_t *nvp;
2670
2671	NVLOG((NVDBG_INIT, nvc, NULL, "nv_uninit_ctl entered"));
2672
2673	for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
2674		nvp = &(nvc->nvc_port[port]);
2675		mutex_enter(&nvp->nvp_mutex);
2676		NVLOG((NVDBG_INIT, nvc, nvp, "uninitializing port"));
2677		nv_uninit_port(nvp);
2678		mutex_exit(&nvp->nvp_mutex);
2679		mutex_destroy(&nvp->nvp_mutex);
2680		cv_destroy(&nvp->nvp_poll_cv);
2681	}
2682
2683	kmem_free(nvc->nvc_port, NV_MAX_PORTS(nvc) * sizeof (nv_port_t));
2684	nvc->nvc_port = NULL;
2685}
2686
2687
2688/*
2689 * mcp04 interrupt.  This is a wrapper around mcp04_intr_process so
2690 * that interrupts from other devices can be disregarded while dtracing.
2691 */
2692/* ARGSUSED */
2693static uint_t
2694mcp04_intr(caddr_t arg1, caddr_t arg2)
2695{
2696	nv_ctl_t *nvc = (nv_ctl_t *)arg1;
2697	uint8_t intr_status;
2698	ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5];
2699
2700	intr_status = ddi_get8(bar5_hdl, nvc->nvc_mcp04_int_status);
2701
2702	if (intr_status == 0) {
2703
2704		return (DDI_INTR_UNCLAIMED);
2705	}
2706
2707	mcp04_intr_process(nvc, intr_status);
2708
2709	return (DDI_INTR_CLAIMED);
2710}
2711
2712
2713/*
2714 * Main interrupt handler for ck804.  handles normal device
2715 * interrupts as well as port hot plug and remove interrupts.
2716 *
2717 */
2718static void
2719mcp04_intr_process(nv_ctl_t *nvc, uint8_t intr_status)
2720{
2721
2722	int port, i;
2723	nv_port_t *nvp;
2724	nv_slot_t *nv_slotp;
2725	uchar_t	status;
2726	sata_pkt_t *spkt;
2727	uint8_t bmstatus, clear_bits;
2728	ddi_acc_handle_t bmhdl;
2729	int nvcleared = 0;
2730	ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5];
2731	uint32_t sstatus;
2732	int port_mask_hot[] = {
2733		MCP04_INT_PDEV_HOT, MCP04_INT_SDEV_HOT,
2734	};
2735	int port_mask_pm[] = {
2736		MCP04_INT_PDEV_PM, MCP04_INT_SDEV_PM,
2737	};
2738
2739	NVLOG((NVDBG_INTR, nvc, NULL,
2740	    "mcp04_intr_process entered intr_status=%x", intr_status));
2741
2742	/*
2743	 * For command completion interrupt, explicit clear is not required.
2744	 * however, for the error cases explicit clear is performed.
2745	 */
2746	for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
2747
2748		int port_mask[] = {MCP04_INT_PDEV_INT, MCP04_INT_SDEV_INT};
2749
2750		if ((port_mask[port] & intr_status) == 0) {
2751			continue;
2752		}
2753
2754		NVLOG((NVDBG_INTR, nvc, NULL,
2755		    "mcp04_intr_process interrupt on port %d", port));
2756
2757		nvp = &(nvc->nvc_port[port]);
2758
2759		mutex_enter(&nvp->nvp_mutex);
2760
2761		/*
2762		 * there was a corner case found where an interrupt
2763		 * arrived before nvp_slot was set.  Should
2764		 * probably should track down why that happens and try
2765		 * to eliminate that source and then get rid of this
2766		 * check.
2767		 */
2768		if (nvp->nvp_slot == NULL) {
2769			status = nv_get8(nvp->nvp_ctl_hdl, nvp->nvp_status);
2770			NVLOG((NVDBG_ALWAYS, nvc, nvp, "spurious interrupt "
2771			    "received before initialization "
2772			    "completed status=%x", status));
2773			mutex_exit(&nvp->nvp_mutex);
2774
2775			/*
2776			 * clear interrupt bits
2777			 */
2778			nv_put8(bar5_hdl, nvc->nvc_mcp04_int_status,
2779			    port_mask[port]);
2780
2781			continue;
2782		}
2783
2784		if ((&(nvp->nvp_slot[0]))->nvslot_spkt == NULL)  {
2785			status = nv_get8(nvp->nvp_ctl_hdl, nvp->nvp_status);
2786			NVLOG((NVDBG_ALWAYS, nvc, nvp, "spurious interrupt "
2787			    " no command in progress status=%x", status));
2788			mutex_exit(&nvp->nvp_mutex);
2789
2790			/*
2791			 * clear interrupt bits
2792			 */
2793			nv_put8(bar5_hdl, nvc->nvc_mcp04_int_status,
2794			    port_mask[port]);
2795
2796			continue;
2797		}
2798
2799		bmhdl = nvp->nvp_bm_hdl;
2800		bmstatus = nv_get8(bmhdl, nvp->nvp_bmisx);
2801
2802		if (!(bmstatus & BMISX_IDEINTS)) {
2803			mutex_exit(&nvp->nvp_mutex);
2804
2805			continue;
2806		}
2807
2808		status = nv_get8(nvp->nvp_ctl_hdl, nvp->nvp_altstatus);
2809
2810		if (status & SATA_STATUS_BSY) {
2811			mutex_exit(&nvp->nvp_mutex);
2812
2813			continue;
2814		}
2815
2816		nv_slotp = &(nvp->nvp_slot[0]);
2817
2818		ASSERT(nv_slotp);
2819
2820		spkt = nv_slotp->nvslot_spkt;
2821
2822		if (spkt == NULL) {
2823			mutex_exit(&nvp->nvp_mutex);
2824
2825			continue;
2826		}
2827
2828		(*nv_slotp->nvslot_intr)(nvp, nv_slotp);
2829
2830		nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
2831
2832		/*
2833		 * If there is no link cannot be certain about the completion
2834		 * of the packet, so abort it.
2835		 */
2836		if (nv_check_link((&spkt->satapkt_device)->
2837		    satadev_scr.sstatus) == B_FALSE) {
2838
2839			(void) nv_abort_active(nvp, NULL, SATA_PKT_PORT_ERROR);
2840
2841		} else if (nv_slotp->nvslot_flags == NVSLOT_COMPLETE) {
2842
2843			nv_complete_io(nvp, spkt, 0);
2844		}
2845
2846		mutex_exit(&nvp->nvp_mutex);
2847	}
2848
2849	/*
2850	 * mcp04 often doesn't correctly distinguish hot add/remove
2851	 * interrupts.  Frequently both the ADD and the REMOVE bits
2852	 * are asserted, whether it was a remove or add.  Use sstatus
2853	 * to distinguish hot add from hot remove.
2854	 */
2855
2856	for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
2857		clear_bits = 0;
2858
2859		nvp = &(nvc->nvc_port[port]);
2860		mutex_enter(&nvp->nvp_mutex);
2861
2862		if ((port_mask_pm[port] & intr_status) != 0) {
2863			clear_bits = port_mask_pm[port];
2864			NVLOG((NVDBG_HOT, nvc, nvp,
2865			    "clearing PM interrupt bit: %x",
2866			    intr_status & port_mask_pm[port]));
2867		}
2868
2869		if ((port_mask_hot[port] & intr_status) == 0) {
2870			if (clear_bits != 0) {
2871				goto clear;
2872			} else {
2873				mutex_exit(&nvp->nvp_mutex);
2874				continue;
2875			}
2876		}
2877
2878		/*
2879		 * reaching here means there was a hot add or remove.
2880		 */
2881		clear_bits |= port_mask_hot[port];
2882
2883		ASSERT(nvc->nvc_port[port].nvp_sstatus);
2884
2885		sstatus = nv_get32(bar5_hdl,
2886		    nvc->nvc_port[port].nvp_sstatus);
2887
2888		if ((sstatus & SSTATUS_DET_DEVPRE_PHYCOM) ==
2889		    SSTATUS_DET_DEVPRE_PHYCOM) {
2890			nv_report_add_remove(nvp, 0);
2891		} else {
2892			nv_report_add_remove(nvp, NV_PORT_HOTREMOVED);
2893		}
2894	clear:
2895		/*
2896		 * clear interrupt bits.  explicit interrupt clear is
2897		 * required for hotplug interrupts.
2898		 */
2899		nv_put8(bar5_hdl, nvc->nvc_mcp04_int_status, clear_bits);
2900
2901		/*
2902		 * make sure it's flushed and cleared.  If not try
2903		 * again.  Sometimes it has been observed to not clear
2904		 * on the first try.
2905		 */
2906		intr_status = nv_get8(bar5_hdl, nvc->nvc_mcp04_int_status);
2907
2908		/*
2909		 * make 10 additional attempts to clear the interrupt
2910		 */
2911		for (i = 0; (intr_status & clear_bits) && (i < 10); i++) {
2912			NVLOG((NVDBG_ALWAYS, nvc, nvp, "inst_status=%x "
2913			    "still not clear try=%d", intr_status,
2914			    ++nvcleared));
2915			nv_put8(bar5_hdl, nvc->nvc_mcp04_int_status,
2916			    clear_bits);
2917			intr_status = nv_get8(bar5_hdl,
2918			    nvc->nvc_mcp04_int_status);
2919		}
2920
2921		/*
2922		 * if still not clear, log a message and disable the
2923		 * port. highly unlikely that this path is taken, but it
2924		 * gives protection against a wedged interrupt.
2925		 */
2926		if (intr_status & clear_bits) {
2927			(*(nvc->nvc_set_intr))(nvp, NV_INTR_DISABLE);
2928			nv_port_state_change(nvp, SATA_EVNT_PORT_FAILED,
2929			    SATA_ADDR_CPORT, SATA_PSTATE_FAILED);
2930			nvp->nvp_state |= NV_PORT_FAILED;
2931			(void) nv_abort_active(nvp, NULL, SATA_PKT_DEV_ERROR);
2932			nv_cmn_err(CE_WARN, nvc, nvp, "unable to clear "
2933			    "interrupt.  disabling port intr_status=%X",
2934			    intr_status);
2935		}
2936
2937		mutex_exit(&nvp->nvp_mutex);
2938	}
2939}
2940
2941
2942/*
2943 * Interrupt handler for mcp55.  It is invoked by the wrapper for each port
2944 * on the controller, to handle completion and hot plug and remove events.
2945 *
2946 */
2947static uint_t
2948mcp55_intr_port(nv_port_t *nvp)
2949{
2950	nv_ctl_t *nvc = nvp->nvp_ctlp;
2951	ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5];
2952	uint8_t clear = 0, intr_cycles = 0;
2953	int ret = DDI_INTR_UNCLAIMED;
2954	uint16_t int_status;
2955
2956	NVLOG((NVDBG_INTR, nvc, nvp, "mcp55_intr_port entered"));
2957
2958	for (;;) {
2959		/*
2960		 * read current interrupt status
2961		 */
2962		int_status = nv_get16(bar5_hdl, nvp->nvp_mcp55_int_status);
2963
2964		NVLOG((NVDBG_INTR, nvc, nvp, "int_status = %x", int_status));
2965
2966		/*
2967		 * MCP55_INT_IGNORE interrupts will show up in the status,
2968		 * but are masked out from causing an interrupt to be generated
2969		 * to the processor.  Ignore them here by masking them out.
2970		 */
2971		int_status &= ~(MCP55_INT_IGNORE);
2972
2973		/*
2974		 * exit the loop when no more interrupts to process
2975		 */
2976		if (int_status == 0) {
2977
2978			break;
2979		}
2980
2981		if (int_status & MCP55_INT_COMPLETE) {
2982			NVLOG((NVDBG_INTR, nvc, nvp,
2983			    "mcp55_packet_complete_intr"));
2984			/*
2985			 * since int_status was set, return DDI_INTR_CLAIMED
2986			 * from the DDI's perspective even though the packet
2987			 * completion may not have succeeded.  If it fails,
2988			 * need to manually clear the interrupt, otherwise
2989			 * clearing is implicit.
2990			 */
2991			ret = DDI_INTR_CLAIMED;
2992			if (mcp55_packet_complete_intr(nvc, nvp) ==
2993			    NV_FAILURE) {
2994				clear = MCP55_INT_COMPLETE;
2995			} else {
2996				intr_cycles = 0;
2997			}
2998		}
2999
3000		if (int_status & MCP55_INT_DMA_SETUP) {
3001			NVLOG((NVDBG_INTR, nvc, nvp, "mcp55_dma_setup_intr"));
3002
3003			/*
3004			 * Needs to be cleared before starting the BM, so do it
3005			 * now.  make sure this is still working.
3006			 */
3007			nv_put16(bar5_hdl, nvp->nvp_mcp55_int_status,
3008			    MCP55_INT_DMA_SETUP);
3009#ifdef NCQ
3010			ret = mcp55_dma_setup_intr(nvc, nvp);
3011#endif
3012		}
3013
3014		if (int_status & MCP55_INT_REM) {
3015			NVLOG((NVDBG_INTR, nvc, nvp, "mcp55 device removed"));
3016			clear = MCP55_INT_REM;
3017			ret = DDI_INTR_CLAIMED;
3018
3019			mutex_enter(&nvp->nvp_mutex);
3020			nv_report_add_remove(nvp, NV_PORT_HOTREMOVED);
3021			mutex_exit(&nvp->nvp_mutex);
3022
3023		} else if (int_status & MCP55_INT_ADD) {
3024			NVLOG((NVDBG_HOT, nvc, nvp, "mcp55 device added"));
3025			clear = MCP55_INT_ADD;
3026			ret = DDI_INTR_CLAIMED;
3027
3028			mutex_enter(&nvp->nvp_mutex);
3029			nv_report_add_remove(nvp, 0);
3030			mutex_exit(&nvp->nvp_mutex);
3031		}
3032
3033		if (clear) {
3034			nv_put16(bar5_hdl, nvp->nvp_mcp55_int_status, clear);
3035			clear = 0;
3036		}
3037
3038		if (intr_cycles++ == NV_MAX_INTR_LOOP) {
3039			nv_cmn_err(CE_WARN, nvc, nvp, "excessive interrupt "
3040			    "processing.  Disabling port int_status=%X"
3041			    " clear=%X", int_status, clear);
3042			mutex_enter(&nvp->nvp_mutex);
3043			(*(nvc->nvc_set_intr))(nvp, NV_INTR_DISABLE);
3044			nv_port_state_change(nvp, SATA_EVNT_PORT_FAILED,
3045			    SATA_ADDR_CPORT, SATA_PSTATE_FAILED);
3046			nvp->nvp_state |= NV_PORT_FAILED;
3047			(void) nv_abort_active(nvp, NULL, SATA_PKT_DEV_ERROR);
3048			mutex_exit(&nvp->nvp_mutex);
3049		}
3050	}
3051
3052	NVLOG((NVDBG_INTR, nvc, nvp, "mcp55_intr_port: finished ret=%d", ret));
3053
3054	return (ret);
3055}
3056
3057
3058/* ARGSUSED */
3059static uint_t
3060mcp55_intr(caddr_t arg1, caddr_t arg2)
3061{
3062	nv_ctl_t *nvc = (nv_ctl_t *)arg1;
3063	int ret;
3064
3065	ret = mcp55_intr_port(&(nvc->nvc_port[0]));
3066	ret |= mcp55_intr_port(&(nvc->nvc_port[1]));
3067
3068	return (ret);
3069}
3070
3071
3072#ifdef NCQ
3073/*
3074 * with software driven NCQ on mcp55, an interrupt occurs right
3075 * before the drive is ready to do a DMA transfer.  At this point,
3076 * the PRD table needs to be programmed and the DMA engine enabled
3077 * and ready to go.
3078 *
3079 * -- MCP_SATA_AE_INT_STATUS_SDEV_DMA_SETUP indicates the interrupt
3080 * -- MCP_SATA_AE_NCQ_PDEV_DMA_SETUP_TAG shows which command is ready
3081 * -- clear bit 0 of master command reg
3082 * -- program PRD
3083 * -- clear the interrupt status bit for the DMA Setup FIS
3084 * -- set bit 0 of the bus master command register
3085 */
3086static int
3087mcp55_dma_setup_intr(nv_ctl_t *nvc, nv_port_t *nvp)
3088{
3089	int slot;
3090	ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl;
3091	uint8_t bmicx;
3092	int port = nvp->nvp_port_num;
3093	uint8_t tag_shift[] = {MCP_SATA_AE_NCQ_PDEV_DMA_SETUP_TAG_SHIFT,
3094	    MCP_SATA_AE_NCQ_SDEV_DMA_SETUP_TAG_SHIFT};
3095
3096	nv_cmn_err(CE_PANIC, nvc, nvp,
3097	    "this is should not be executed at all until NCQ");
3098
3099	mutex_enter(&nvp->nvp_mutex);
3100
3101	slot = nv_get32(nvc->nvc_bar_hdl[5], nvc->nvc_mcp55_ncq);
3102
3103	slot = (slot >> tag_shift[port]) & MCP_SATA_AE_NCQ_DMA_SETUP_TAG_MASK;
3104
3105	NVLOG((NVDBG_INTR, nvc, nvp, "mcp55_dma_setup_intr slot %d"
3106	    " nvp_slot_sactive %X", slot, nvp->nvp_sactive_cache));
3107
3108	/*
3109	 * halt the DMA engine.  This step is necessary according to
3110	 * the mcp55 spec, probably since there may have been a "first" packet
3111	 * that already programmed the DMA engine, but may not turn out to
3112	 * be the first one processed.
3113	 */
3114	bmicx = nv_get8(bmhdl, nvp->nvp_bmicx);
3115
3116#if 0
3117	if (bmicx & BMICX_SSBM) {
3118		NVLOG((NVDBG_INTR, nvc, nvp, "BM was already enabled for "
3119		    "another packet.  Cancelling and reprogramming"));
3120		nv_put8(bmhdl, nvp->nvp_bmicx,  bmicx & ~BMICX_SSBM);
3121	}
3122#endif
3123	nv_put8(bmhdl, nvp->nvp_bmicx,  bmicx & ~BMICX_SSBM);
3124
3125	nv_start_dma_engine(nvp, slot);
3126
3127	mutex_exit(&nvp->nvp_mutex);
3128
3129	return (DDI_INTR_CLAIMED);
3130}
3131#endif /* NCQ */
3132
3133
3134/*
3135 * packet completion interrupt.  If the packet is complete, invoke
3136 * the packet completion callback.
3137 */
3138static int
3139mcp55_packet_complete_intr(nv_ctl_t *nvc, nv_port_t *nvp)
3140{
3141	uint8_t status, bmstatus;
3142	ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl;
3143	int sactive;
3144	int active_pkt_bit = 0, active_pkt = 0, ncq_command = B_FALSE;
3145	sata_pkt_t *spkt;
3146	nv_slot_t *nv_slotp;
3147
3148	mutex_enter(&nvp->nvp_mutex);
3149
3150	bmstatus = nv_get8(bmhdl, nvp->nvp_bmisx);
3151
3152	if (!(bmstatus & BMISX_IDEINTS)) {
3153		NVLOG((NVDBG_INTR, nvc, nvp, "BMISX_IDEINTS not set"));
3154		mutex_exit(&nvp->nvp_mutex);
3155
3156		return (NV_FAILURE);
3157	}
3158
3159	/*
3160	 * If the just completed item is a non-ncq command, the busy
3161	 * bit should not be set
3162	 */
3163	if (nvp->nvp_non_ncq_run) {
3164		status = nv_get8(nvp->nvp_ctl_hdl, nvp->nvp_altstatus);
3165		if (status & SATA_STATUS_BSY) {
3166			nv_cmn_err(CE_WARN, nvc, nvp,
3167			    "unexpected SATA_STATUS_BSY set");
3168			mutex_exit(&nvp->nvp_mutex);
3169			/*
3170			 * calling function will clear interrupt.  then
3171			 * the real interrupt will either arrive or the
3172			 * packet timeout handling will take over and
3173			 * reset.
3174			 */
3175			return (NV_FAILURE);
3176		}
3177
3178	} else {
3179		/*
3180		 * NCQ check for BSY here and wait if still bsy before
3181		 * continuing. Rather than wait for it to be cleared
3182		 * when starting a packet and wasting CPU time, the starting
3183		 * thread can exit immediate, but might have to spin here
3184		 * for a bit possibly.  Needs more work and experimentation.
3185		 */
3186		ASSERT(nvp->nvp_ncq_run);
3187	}
3188
3189
3190	if (nvp->nvp_ncq_run) {
3191		ncq_command = B_TRUE;
3192		ASSERT(nvp->nvp_non_ncq_run == 0);
3193	} else {
3194		ASSERT(nvp->nvp_non_ncq_run != 0);
3195	}
3196
3197	/*
3198	 * active_pkt_bit will represent the bitmap of the single completed
3199	 * packet.  Because of the nature of sw assisted NCQ, only one
3200	 * command will complete per interrupt.
3201	 */
3202
3203	if (ncq_command == B_FALSE) {
3204		active_pkt = 0;
3205	} else {
3206		/*
3207		 * NCQ: determine which command just completed, by examining
3208		 * which bit cleared in the register since last written.
3209		 */
3210		sactive = nv_get32(nvc->nvc_bar_hdl[5], nvp->nvp_sactive);
3211
3212		active_pkt_bit = ~sactive & nvp->nvp_sactive_cache;
3213
3214		ASSERT(active_pkt_bit);
3215
3216
3217		/*
3218		 * this failure path needs more work to handle the
3219		 * error condition and recovery.
3220		 */
3221		if (active_pkt_bit == 0) {
3222			ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
3223
3224			nv_cmn_err(CE_CONT, nvc, nvp, "ERROR sactive = %X  "
3225			    "nvp->nvp_sactive %X", sactive,
3226			    nvp->nvp_sactive_cache);
3227
3228			(void) nv_get8(cmdhdl, nvp->nvp_status);
3229
3230			mutex_exit(&nvp->nvp_mutex);
3231
3232			return (NV_FAILURE);
3233		}
3234
3235		for (active_pkt = 0; (active_pkt_bit & 0x1) != 0x1;
3236		    active_pkt++, active_pkt_bit >>= 1) {
3237		}
3238
3239		/*
3240		 * make sure only one bit is ever turned on
3241		 */
3242		ASSERT(active_pkt_bit == 1);
3243
3244		nvp->nvp_sactive_cache &= ~(0x01 << active_pkt);
3245	}
3246
3247	nv_slotp = &(nvp->nvp_slot[active_pkt]);
3248
3249	spkt = nv_slotp->nvslot_spkt;
3250
3251	ASSERT(spkt != NULL);
3252
3253	(*nv_slotp->nvslot_intr)(nvp, nv_slotp);
3254
3255	nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
3256
3257	/*
3258	 * If there is no link cannot be certain about the completion
3259	 * of the packet, so abort it.
3260	 */
3261	if (nv_check_link((&spkt->satapkt_device)->
3262	    satadev_scr.sstatus) == B_FALSE) {
3263		(void) nv_abort_active(nvp, NULL, SATA_PKT_PORT_ERROR);
3264
3265	} else if (nv_slotp->nvslot_flags == NVSLOT_COMPLETE) {
3266
3267		nv_complete_io(nvp, spkt, active_pkt);
3268	}
3269
3270	mutex_exit(&nvp->nvp_mutex);
3271
3272	return (NV_SUCCESS);
3273}
3274
3275
3276static void
3277nv_complete_io(nv_port_t *nvp, sata_pkt_t *spkt, int slot)
3278{
3279
3280	ASSERT(MUTEX_HELD(&nvp->nvp_mutex));
3281
3282	if ((&(nvp->nvp_slot[slot]))->nvslot_flags & NVSLOT_NCQ) {
3283		nvp->nvp_ncq_run--;
3284	} else {
3285		nvp->nvp_non_ncq_run--;
3286	}
3287
3288	/*
3289	 * mark the packet slot idle so it can be reused.  Do this before
3290	 * calling satapkt_comp so the slot can be reused.
3291	 */
3292	(&(nvp->nvp_slot[slot]))->nvslot_spkt = NULL;
3293
3294	if (spkt->satapkt_op_mode & SATA_OPMODE_SYNCH) {
3295		/*
3296		 * If this is not timed polled mode cmd, which has an
3297		 * active thread monitoring for completion, then need
3298		 * to signal the sleeping thread that the cmd is complete.
3299		 */
3300		if ((spkt->satapkt_op_mode & SATA_OPMODE_POLLING) == 0) {
3301			cv_signal(&nvp->nvp_poll_cv);
3302		}
3303
3304		return;
3305	}
3306
3307	if (spkt->satapkt_comp != NULL) {
3308		mutex_exit(&nvp->nvp_mutex);
3309		(*spkt->satapkt_comp)(spkt);
3310		mutex_enter(&nvp->nvp_mutex);
3311	}
3312}
3313
3314
3315/*
3316 * check whether packet is ncq command or not.  for ncq command,
3317 * start it if there is still room on queue.  for non-ncq command only
3318 * start if no other command is running.
3319 */
3320static int
3321nv_start_async(nv_port_t *nvp, sata_pkt_t *spkt)
3322{
3323	uint8_t cmd, ncq;
3324
3325	NVLOG((NVDBG_ENTRY, nvp->nvp_ctlp, nvp, "nv_start_async: entry"));
3326
3327	cmd = spkt->satapkt_cmd.satacmd_cmd_reg;
3328
3329	ncq = ((cmd == SATAC_WRITE_FPDMA_QUEUED) ||
3330	    (cmd == SATAC_READ_FPDMA_QUEUED));
3331
3332	if (ncq == B_FALSE) {
3333
3334		if ((nvp->nvp_non_ncq_run == 1) ||
3335		    (nvp->nvp_ncq_run > 0)) {
3336			/*
3337			 * next command is non-ncq which can't run
3338			 * concurrently.  exit and return queue full.
3339			 */
3340			spkt->satapkt_reason = SATA_PKT_QUEUE_FULL;
3341
3342			return (SATA_TRAN_QUEUE_FULL);
3343		}
3344
3345		return (nv_start_common(nvp, spkt));
3346	}
3347
3348	/*
3349	 * ncq == B_TRUE
3350	 */
3351	if (nvp->nvp_non_ncq_run == 1) {
3352		/*
3353		 * cannot start any NCQ commands when there
3354		 * is a non-NCQ command running.
3355		 */
3356		spkt->satapkt_reason = SATA_PKT_QUEUE_FULL;
3357
3358		return (SATA_TRAN_QUEUE_FULL);
3359	}
3360
3361#ifdef NCQ
3362	/*
3363	 * this is not compiled for now as satapkt_device.satadev_qdepth
3364	 * is being pulled out until NCQ support is later addressed
3365	 *
3366	 * nvp_queue_depth is initialized by the first NCQ command
3367	 * received.
3368	 */
3369	if (nvp->nvp_queue_depth == 1) {
3370		nvp->nvp_queue_depth =
3371		    spkt->satapkt_device.satadev_qdepth;
3372
3373		ASSERT(nvp->nvp_queue_depth > 1);
3374
3375		NVLOG((NVDBG_ENTRY, nvp->nvp_ctlp, nvp,
3376		    "nv_process_queue: nvp_queue_depth set to %d",
3377		    nvp->nvp_queue_depth));
3378	}
3379#endif
3380
3381	if (nvp->nvp_ncq_run >= nvp->nvp_queue_depth) {
3382		/*
3383		 * max number of NCQ commands already active
3384		 */
3385		spkt->satapkt_reason = SATA_PKT_QUEUE_FULL;
3386
3387		return (SATA_TRAN_QUEUE_FULL);
3388	}
3389
3390	return (nv_start_common(nvp, spkt));
3391}
3392
3393
3394/*
3395 * configure INTx and legacy interrupts
3396 */
3397static int
3398nv_add_legacy_intrs(nv_ctl_t *nvc)
3399{
3400	dev_info_t	*devinfo = nvc->nvc_dip;
3401	int		actual, count = 0;
3402	int		x, y, rc, inum = 0;
3403
3404	NVLOG((NVDBG_ENTRY, nvc, NULL, "nv_add_legacy_intrs"));
3405
3406	/*
3407	 * get number of interrupts
3408	 */
3409	rc = ddi_intr_get_nintrs(devinfo, DDI_INTR_TYPE_FIXED, &count);
3410	if ((rc != DDI_SUCCESS) || (count == 0)) {
3411		NVLOG((NVDBG_INTR, nvc, NULL,
3412		    "ddi_intr_get_nintrs() failed, "
3413		    "rc %d count %d", rc, count));
3414
3415		return (DDI_FAILURE);
3416	}
3417
3418	/*
3419	 * allocate an array of interrupt handles
3420	 */
3421	nvc->nvc_intr_size = count * sizeof (ddi_intr_handle_t);
3422	nvc->nvc_htable = kmem_zalloc(nvc->nvc_intr_size, KM_SLEEP);
3423
3424	/*
3425	 * call ddi_intr_alloc()
3426	 */
3427	rc = ddi_intr_alloc(devinfo, nvc->nvc_htable, DDI_INTR_TYPE_FIXED,
3428	    inum, count, &actual, DDI_INTR_ALLOC_STRICT);
3429
3430	if ((rc != DDI_SUCCESS) || (actual == 0)) {
3431		nv_cmn_err(CE_WARN, nvc, NULL,
3432		    "ddi_intr_alloc() failed, rc %d", rc);
3433		kmem_free(nvc->nvc_htable, nvc->nvc_intr_size);
3434
3435		return (DDI_FAILURE);
3436	}
3437
3438	if (actual < count) {
3439		nv_cmn_err(CE_WARN, nvc, NULL,
3440		    "ddi_intr_alloc: requested: %d, received: %d",
3441		    count, actual);
3442
3443		goto failure;
3444	}
3445
3446	nvc->nvc_intr_cnt = actual;
3447
3448	/*
3449	 * get intr priority
3450	 */
3451	if (ddi_intr_get_pri(nvc->nvc_htable[0], &nvc->nvc_intr_pri) !=
3452	    DDI_SUCCESS) {
3453		nv_cmn_err(CE_WARN, nvc, NULL, "ddi_intr_get_pri() failed");
3454
3455		goto failure;
3456	}
3457
3458	/*
3459	 * Test for high level mutex
3460	 */
3461	if (nvc->nvc_intr_pri >= ddi_intr_get_hilevel_pri()) {
3462		nv_cmn_err(CE_WARN, nvc, NULL,
3463		    "nv_add_legacy_intrs: high level intr not supported");
3464
3465		goto failure;
3466	}
3467
3468	for (x = 0; x < actual; x++) {
3469		if (ddi_intr_add_handler(nvc->nvc_htable[x],
3470		    nvc->nvc_interrupt, (caddr_t)nvc, NULL) != DDI_SUCCESS) {
3471			nv_cmn_err(CE_WARN, nvc, NULL,
3472			    "ddi_intr_add_handler() failed");
3473
3474			goto failure;
3475		}
3476	}
3477
3478	/*
3479	 * call ddi_intr_enable() for legacy interrupts
3480	 */
3481	for (x = 0; x < nvc->nvc_intr_cnt; x++) {
3482		(void) ddi_intr_enable(nvc->nvc_htable[x]);
3483	}
3484
3485	return (DDI_SUCCESS);
3486
3487	failure:
3488	/*
3489	 * free allocated intr and nvc_htable
3490	 */
3491	for (y = 0; y < actual; y++) {
3492		(void) ddi_intr_free(nvc->nvc_htable[y]);
3493	}
3494
3495	kmem_free(nvc->nvc_htable, nvc->nvc_intr_size);
3496
3497	return (DDI_FAILURE);
3498}
3499
3500#ifdef	NV_MSI_SUPPORTED
3501/*
3502 * configure MSI interrupts
3503 */
3504static int
3505nv_add_msi_intrs(nv_ctl_t *nvc)
3506{
3507	dev_info_t	*devinfo = nvc->nvc_dip;
3508	int		count, avail, actual;
3509	int		x, y, rc, inum = 0;
3510
3511	NVLOG((NVDBG_ENTRY, nvc, NULL, "nv_add_msi_intrs"));
3512
3513	/*
3514	 * get number of interrupts
3515	 */
3516	rc = ddi_intr_get_nintrs(devinfo, DDI_INTR_TYPE_MSI, &count);
3517	if ((rc != DDI_SUCCESS) || (count == 0)) {
3518		nv_cmn_err(CE_WARN, nvc, NULL,
3519		    "ddi_intr_get_nintrs() failed, "
3520		    "rc %d count %d", rc, count);
3521
3522		return (DDI_FAILURE);
3523	}
3524
3525	/*
3526	 * get number of available interrupts
3527	 */
3528	rc = ddi_intr_get_navail(devinfo, DDI_INTR_TYPE_MSI, &avail);
3529	if ((rc != DDI_SUCCESS) || (avail == 0)) {
3530		nv_cmn_err(CE_WARN, nvc, NULL,
3531		    "ddi_intr_get_navail() failed, "
3532		    "rc %d avail %d", rc, avail);
3533
3534		return (DDI_FAILURE);
3535	}
3536
3537	if (avail < count) {
3538		nv_cmn_err(CE_WARN, nvc, NULL,
3539		    "ddi_intr_get_nvail returned %d ddi_intr_get_nintrs: %d",
3540		    avail, count);
3541	}
3542
3543	/*
3544	 * allocate an array of interrupt handles
3545	 */
3546	nvc->nvc_intr_size = count * sizeof (ddi_intr_handle_t);
3547	nvc->nvc_htable = kmem_alloc(nvc->nvc_intr_size, KM_SLEEP);
3548
3549	rc = ddi_intr_alloc(devinfo, nvc->nvc_htable, DDI_INTR_TYPE_MSI,
3550	    inum, count, &actual, DDI_INTR_ALLOC_NORMAL);
3551
3552	if ((rc != DDI_SUCCESS) || (actual == 0)) {
3553		nv_cmn_err(CE_WARN, nvc, NULL,
3554		    "ddi_intr_alloc() failed, rc %d", rc);
3555		kmem_free(nvc->nvc_htable, nvc->nvc_intr_size);
3556
3557		return (DDI_FAILURE);
3558	}
3559
3560	/*
3561	 * Use interrupt count returned or abort?
3562	 */
3563	if (actual < count) {
3564		NVLOG((NVDBG_INIT, nvc, NULL,
3565		    "Requested: %d, Received: %d", count, actual));
3566	}
3567
3568	nvc->nvc_intr_cnt = actual;
3569
3570	/*
3571	 * get priority for first msi, assume remaining are all the same
3572	 */
3573	if (ddi_intr_get_pri(nvc->nvc_htable[0], &nvc->nvc_intr_pri) !=
3574	    DDI_SUCCESS) {
3575		nv_cmn_err(CE_WARN, nvc, NULL, "ddi_intr_get_pri() failed");
3576
3577		goto failure;
3578	}
3579
3580	/*
3581	 * test for high level mutex
3582	 */
3583	if (nvc->nvc_intr_pri >= ddi_intr_get_hilevel_pri()) {
3584		nv_cmn_err(CE_WARN, nvc, NULL,
3585		    "nv_add_msi_intrs: high level intr not supported");
3586
3587		goto failure;
3588	}
3589
3590	/*
3591	 * Call ddi_intr_add_handler()
3592	 */
3593	for (x = 0; x < actual; x++) {
3594		if (ddi_intr_add_handler(nvc->nvc_htable[x],
3595		    nvc->nvc_interrupt, (caddr_t)nvc, NULL) != DDI_SUCCESS) {
3596			nv_cmn_err(CE_WARN, nvc, NULL,
3597			    "ddi_intr_add_handler() failed");
3598
3599			goto failure;
3600		}
3601	}
3602
3603	(void) ddi_intr_get_cap(nvc->nvc_htable[0], &nvc->nvc_intr_cap);
3604
3605	if (nvc->nvc_intr_cap & DDI_INTR_FLAG_BLOCK) {
3606		(void) ddi_intr_block_enable(nvc->nvc_htable,
3607		    nvc->nvc_intr_cnt);
3608	} else {
3609		/*
3610		 * Call ddi_intr_enable() for MSI non block enable
3611		 */
3612		for (x = 0; x < nvc->nvc_intr_cnt; x++) {
3613			(void) ddi_intr_enable(nvc->nvc_htable[x]);
3614		}
3615	}
3616
3617	return (DDI_SUCCESS);
3618
3619	failure:
3620	/*
3621	 * free allocated intr and nvc_htable
3622	 */
3623	for (y = 0; y < actual; y++) {
3624		(void) ddi_intr_free(nvc->nvc_htable[y]);
3625	}
3626
3627	kmem_free(nvc->nvc_htable, nvc->nvc_intr_size);
3628
3629	return (DDI_FAILURE);
3630}
3631#endif
3632
3633
3634static void
3635nv_rem_intrs(nv_ctl_t *nvc)
3636{
3637	int x, i;
3638	nv_port_t *nvp;
3639
3640	NVLOG((NVDBG_ENTRY, nvc, NULL, "nv_rem_intrs"));
3641
3642	/*
3643	 * prevent controller from generating interrupts by
3644	 * masking them out.  This is an extra precaution.
3645	 */
3646	for (i = 0; i < NV_MAX_PORTS(nvc); i++) {
3647		nvp = (&nvc->nvc_port[i]);
3648		mutex_enter(&nvp->nvp_mutex);
3649		(*(nvc->nvc_set_intr))(nvp, NV_INTR_DISABLE);
3650		mutex_exit(&nvp->nvp_mutex);
3651	}
3652
3653	/*
3654	 * disable all interrupts
3655	 */
3656	if ((nvc->nvc_intr_type == DDI_INTR_TYPE_MSI) &&
3657	    (nvc->nvc_intr_cap & DDI_INTR_FLAG_BLOCK)) {
3658		(void) ddi_intr_block_disable(nvc->nvc_htable,
3659		    nvc->nvc_intr_cnt);
3660	} else {
3661		for (x = 0; x < nvc->nvc_intr_cnt; x++) {
3662			(void) ddi_intr_disable(nvc->nvc_htable[x]);
3663		}
3664	}
3665
3666	for (x = 0; x < nvc->nvc_intr_cnt; x++) {
3667		(void) ddi_intr_remove_handler(nvc->nvc_htable[x]);
3668		(void) ddi_intr_free(nvc->nvc_htable[x]);
3669	}
3670
3671	kmem_free(nvc->nvc_htable, nvc->nvc_intr_size);
3672}
3673
3674
3675/*
3676 * variable argument wrapper for cmn_err.  prefixes the instance and port
3677 * number if possible
3678 */
3679static void
3680nv_vcmn_err(int ce, nv_ctl_t *nvc, nv_port_t *nvp, char *fmt, va_list ap)
3681{
3682	char port[NV_STRING_10];
3683	char inst[NV_STRING_10];
3684
3685	mutex_enter(&nv_log_mutex);
3686
3687	if (nvc) {
3688		(void) snprintf(inst, NV_STRING_10, "inst %d",
3689		    ddi_get_instance(nvc->nvc_dip));
3690	} else {
3691		inst[0] = '\0';
3692	}
3693
3694	if (nvp) {
3695		(void) sprintf(port, " port %d", nvp->nvp_port_num);
3696	} else {
3697		port[0] = '\0';
3698	}
3699
3700	(void) sprintf(nv_log_buf, "nv_sata %s%s%s", inst, port,
3701	    (inst[0]|port[0] ? ": " :""));
3702
3703	(void) vsnprintf(&nv_log_buf[strlen(nv_log_buf)],
3704	    NV_STRING_512 - strlen(nv_log_buf), fmt, ap);
3705
3706	/*
3707	 * normally set to log to console but in some debug situations it
3708	 * may be useful to log only to a file.
3709	 */
3710	if (nv_log_to_console) {
3711		if (nv_prom_print) {
3712			prom_printf("%s\n", nv_log_buf);
3713		} else {
3714			cmn_err(ce, "%s", nv_log_buf);
3715		}
3716
3717
3718	} else {
3719		cmn_err(ce, "!%s", nv_log_buf);
3720	}
3721
3722	mutex_exit(&nv_log_mutex);
3723}
3724
3725
3726/*
3727 * wrapper for cmn_err
3728 */
3729static void
3730nv_cmn_err(int ce, nv_ctl_t *nvc, nv_port_t *nvp, char *fmt, ...)
3731{
3732	va_list ap;
3733
3734	va_start(ap, fmt);
3735	nv_vcmn_err(ce, nvc, nvp, fmt, ap);
3736	va_end(ap);
3737}
3738
3739
3740#if defined(DEBUG)
3741/*
3742 * prefixes the instance and port number if possible to the debug message
3743 */
3744static void
3745nv_log(uint_t flag, nv_ctl_t *nvc, nv_port_t *nvp, char *fmt, ...)
3746{
3747	va_list ap;
3748
3749	if ((nv_debug_flags & flag) == 0) {
3750		return;
3751	}
3752
3753	va_start(ap, fmt);
3754	nv_vcmn_err(CE_NOTE, nvc, nvp, fmt, ap);
3755	va_end(ap);
3756
3757	/*
3758	 * useful for some debugging situations
3759	 */
3760	if (nv_log_delay) {
3761		drv_usecwait(nv_log_delay);
3762	}
3763
3764}
3765#endif /* DEBUG */
3766
3767
3768/*
3769 * program registers which are common to all commands
3770 */
3771static void
3772nv_program_taskfile_regs(nv_port_t *nvp, int slot)
3773{
3774	nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
3775	sata_pkt_t *spkt;
3776	sata_cmd_t *satacmd;
3777	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
3778	uint8_t cmd, ncq = B_FALSE;
3779
3780	spkt = nv_slotp->nvslot_spkt;
3781	satacmd = &spkt->satapkt_cmd;
3782	cmd = satacmd->satacmd_cmd_reg;
3783
3784	ASSERT(nvp->nvp_slot);
3785
3786	if ((cmd == SATAC_WRITE_FPDMA_QUEUED) ||
3787	    (cmd == SATAC_READ_FPDMA_QUEUED)) {
3788		ncq = B_TRUE;
3789	}
3790
3791	/*
3792	 * select the drive
3793	 */
3794	nv_put8(cmdhdl, nvp->nvp_drvhd, satacmd->satacmd_device_reg);
3795
3796	/*
3797	 * make certain the drive selected
3798	 */
3799	if (nv_wait(nvp, SATA_STATUS_DRDY, SATA_STATUS_BSY,
3800	    NV_SEC2USEC(5), 0) == B_FALSE) {
3801
3802		return;
3803	}
3804
3805	switch (spkt->satapkt_cmd.satacmd_addr_type) {
3806
3807	case ATA_ADDR_LBA:
3808		NVLOG((NVDBG_DELIVER, nvp->nvp_ctlp, nvp, "ATA_ADDR_LBA mode"));
3809
3810		nv_put8(cmdhdl, nvp->nvp_count, satacmd->satacmd_sec_count_lsb);
3811		nv_put8(cmdhdl, nvp->nvp_hcyl, satacmd->satacmd_lba_high_lsb);
3812		nv_put8(cmdhdl, nvp->nvp_lcyl, satacmd->satacmd_lba_mid_lsb);
3813		nv_put8(cmdhdl, nvp->nvp_sect, satacmd->satacmd_lba_low_lsb);
3814
3815		break;
3816
3817	case ATA_ADDR_LBA28:
3818		NVLOG((NVDBG_DELIVER, nvp->nvp_ctlp, nvp,
3819		    "ATA_ADDR_LBA28 mode"));
3820		/*
3821		 * NCQ only uses 48-bit addressing
3822		 */
3823		ASSERT(ncq != B_TRUE);
3824
3825		nv_put8(cmdhdl, nvp->nvp_count, satacmd->satacmd_sec_count_lsb);
3826		nv_put8(cmdhdl, nvp->nvp_hcyl, satacmd->satacmd_lba_high_lsb);
3827		nv_put8(cmdhdl, nvp->nvp_lcyl, satacmd->satacmd_lba_mid_lsb);
3828		nv_put8(cmdhdl, nvp->nvp_sect, satacmd->satacmd_lba_low_lsb);
3829
3830		break;
3831
3832	case ATA_ADDR_LBA48:
3833		NVLOG((NVDBG_DELIVER, nvp->nvp_ctlp, nvp,
3834		    "ATA_ADDR_LBA48 mode"));
3835
3836		/*
3837		 * for NCQ, tag goes into count register and real sector count
3838		 * into features register.  The sata module does the translation
3839		 * in the satacmd.
3840		 */
3841		if (ncq == B_TRUE) {
3842			nv_put8(cmdhdl, nvp->nvp_count, slot << 3);
3843			nv_put8(cmdhdl, nvp->nvp_feature,
3844			    satacmd->satacmd_features_reg_ext);
3845			nv_put8(cmdhdl, nvp->nvp_feature,
3846			    satacmd->satacmd_features_reg);
3847		} else {
3848			nv_put8(cmdhdl, nvp->nvp_count,
3849			    satacmd->satacmd_sec_count_msb);
3850			nv_put8(cmdhdl, nvp->nvp_count,
3851			    satacmd->satacmd_sec_count_lsb);
3852		}
3853
3854		/*
3855		 * send the high-order half first
3856		 */
3857		nv_put8(cmdhdl, nvp->nvp_hcyl, satacmd->satacmd_lba_high_msb);
3858		nv_put8(cmdhdl, nvp->nvp_lcyl, satacmd->satacmd_lba_mid_msb);
3859		nv_put8(cmdhdl, nvp->nvp_sect, satacmd->satacmd_lba_low_msb);
3860		/*
3861		 * Send the low-order half
3862		 */
3863		nv_put8(cmdhdl, nvp->nvp_hcyl, satacmd->satacmd_lba_high_lsb);
3864		nv_put8(cmdhdl, nvp->nvp_lcyl, satacmd->satacmd_lba_mid_lsb);
3865		nv_put8(cmdhdl, nvp->nvp_sect, satacmd->satacmd_lba_low_lsb);
3866
3867		break;
3868
3869	case 0:
3870		/*
3871		 * non-media access commands such as identify and features
3872		 * take this path.
3873		 */
3874		nv_put8(cmdhdl, nvp->nvp_count, satacmd->satacmd_sec_count_lsb);
3875		nv_put8(cmdhdl, nvp->nvp_feature,
3876		    satacmd->satacmd_features_reg);
3877		nv_put8(cmdhdl, nvp->nvp_hcyl, satacmd->satacmd_lba_high_lsb);
3878		nv_put8(cmdhdl, nvp->nvp_lcyl, satacmd->satacmd_lba_mid_lsb);
3879		nv_put8(cmdhdl, nvp->nvp_sect, satacmd->satacmd_lba_low_lsb);
3880
3881		break;
3882
3883	default:
3884		break;
3885	}
3886
3887	ASSERT(nvp->nvp_slot);
3888}
3889
3890
3891/*
3892 * start a command that involves no media access
3893 */
3894static int
3895nv_start_nodata(nv_port_t *nvp, int slot)
3896{
3897	nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
3898	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
3899	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
3900	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
3901
3902	nv_program_taskfile_regs(nvp, slot);
3903
3904	/*
3905	 * This next one sets the controller in motion
3906	 */
3907	nv_put8(cmdhdl, nvp->nvp_cmd, sata_cmdp->satacmd_cmd_reg);
3908
3909	return (SATA_TRAN_ACCEPTED);
3910}
3911
3912
3913int
3914nv_bm_status_clear(nv_port_t *nvp)
3915{
3916	ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl;
3917	uchar_t	status, ret;
3918
3919	/*
3920	 * Get the current BM status
3921	 */
3922	ret = status = nv_get8(bmhdl, nvp->nvp_bmisx);
3923
3924	status = (status & BMISX_MASK) | BMISX_IDERR | BMISX_IDEINTS;
3925
3926	/*
3927	 * Clear the latches (and preserve the other bits)
3928	 */
3929	nv_put8(bmhdl, nvp->nvp_bmisx, status);
3930
3931	return (ret);
3932}
3933
3934
3935/*
3936 * program the bus master DMA engine with the PRD address for
3937 * the active slot command, and start the DMA engine.
3938 */
3939static void
3940nv_start_dma_engine(nv_port_t *nvp, int slot)
3941{
3942	nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
3943	ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl;
3944	uchar_t direction;
3945
3946	ASSERT(nv_slotp->nvslot_spkt != NULL);
3947
3948	if (nv_slotp->nvslot_spkt->satapkt_cmd.satacmd_flags.sata_data_direction
3949	    == SATA_DIR_READ) {
3950		direction = BMICX_RWCON_WRITE_TO_MEMORY;
3951	} else {
3952		direction = BMICX_RWCON_READ_FROM_MEMORY;
3953	}
3954
3955	NVLOG((NVDBG_DELIVER, nvp->nvp_ctlp, nvp,
3956	    "nv_start_dma_engine entered"));
3957
3958	/*
3959	 * reset the controller's interrupt and error status bits
3960	 */
3961	(void) nv_bm_status_clear(nvp);
3962
3963	/*
3964	 * program the PRD table physical start address
3965	 */
3966	nv_put32(bmhdl, nvp->nvp_bmidtpx, nvp->nvp_sg_paddr[slot]);
3967
3968	/*
3969	 * set the direction control and start the DMA controller
3970	 */
3971	nv_put8(bmhdl, nvp->nvp_bmicx, direction | BMICX_SSBM);
3972}
3973
3974/*
3975 * start dma command, either in or out
3976 */
3977static int
3978nv_start_dma(nv_port_t *nvp, int slot)
3979{
3980	nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
3981	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
3982	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
3983	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
3984	uint8_t cmd = sata_cmdp->satacmd_cmd_reg;
3985#ifdef NCQ
3986	uint8_t ncq = B_FALSE;
3987#endif
3988	ddi_acc_handle_t sghdl = nvp->nvp_sg_acc_hdl[slot];
3989	uint_t *dstp = (uint_t *)nvp->nvp_sg_addr[slot];
3990	int sg_count = sata_cmdp->satacmd_num_dma_cookies, idx;
3991	ddi_dma_cookie_t  *srcp = sata_cmdp->satacmd_dma_cookie_list;
3992
3993	ASSERT(sg_count != 0);
3994
3995	if (sata_cmdp->satacmd_num_dma_cookies > NV_DMA_NSEGS) {
3996		nv_cmn_err(CE_WARN, nvp->nvp_ctlp, nvp, "NV_DMA_NSEGS=%d <"
3997		    " satacmd_num_dma_cookies=%d", NV_DMA_NSEGS,
3998		    sata_cmdp->satacmd_num_dma_cookies);
3999
4000		return (NV_FAILURE);
4001	}
4002
4003	nv_program_taskfile_regs(nvp, slot);
4004
4005	/*
4006	 * start the drive in motion
4007	 */
4008	nv_put8(cmdhdl, nvp->nvp_cmd, cmd);
4009
4010	/*
4011	 * the drive starts processing the transaction when the cmd register
4012	 * is written.  This is done here before programming the DMA engine to
4013	 * parallelize and save some time.  In the event that the drive is ready
4014	 * before DMA, it will wait.
4015	 */
4016#ifdef NCQ
4017	if ((cmd == SATAC_WRITE_FPDMA_QUEUED) ||
4018	    (cmd == SATAC_READ_FPDMA_QUEUED)) {
4019		ncq = B_TRUE;
4020	}
4021#endif
4022
4023	/*
4024	 * copy the PRD list to PRD table in DMA accessible memory
4025	 * so that the controller can access it.
4026	 */
4027	for (idx = 0; idx < sg_count; idx++, srcp++) {
4028		uint32_t size;
4029
4030		ASSERT(srcp->dmac_size <= UINT16_MAX);
4031
4032		nv_put32(sghdl, dstp++, srcp->dmac_address);
4033
4034		size = srcp->dmac_size;
4035
4036		/*
4037		 * If this is a 40-bit address, copy bits 32-40 of the
4038		 * physical address to bits 16-24 of the PRD count.
4039		 */
4040		if (srcp->dmac_laddress > UINT32_MAX) {
4041			size |= ((srcp->dmac_laddress & 0xff00000000) >> 16);
4042		}
4043
4044		/*
4045		 * set the end of table flag for the last entry
4046		 */
4047		if (idx == (sg_count - 1)) {
4048			size |= PRDE_EOT;
4049		}
4050
4051		nv_put32(sghdl, dstp++, size);
4052	}
4053
4054	(void) ddi_dma_sync(nvp->nvp_sg_dma_hdl[slot], 0,
4055	    sizeof (prde_t) * NV_DMA_NSEGS, DDI_DMA_SYNC_FORDEV);
4056
4057	nv_start_dma_engine(nvp, slot);
4058
4059#ifdef NCQ
4060	/*
4061	 * optimization:  for SWNCQ, start DMA engine if this is the only
4062	 * command running.  Preliminary NCQ efforts indicated this needs
4063	 * more debugging.
4064	 *
4065	 * if (nvp->nvp_ncq_run <= 1)
4066	 */
4067
4068	if (ncq == B_FALSE) {
4069		NVLOG((NVDBG_DELIVER, nvp->nvp_ctlp, nvp,
4070		    "NOT NCQ so starting DMA NOW non_ncq_commands=%d"
4071		    " cmd = %X", non_ncq_commands++, cmd));
4072		nv_start_dma_engine(nvp, slot);
4073	} else {
4074		NVLOG((NVDBG_DELIVER, nvp->nvp_ctlp, nvp, "?NCQ, so program "
4075		    "DMA later ncq_commands=%d cmd = %X", ncq_commands++, cmd));
4076	}
4077#endif /* NCQ */
4078
4079	return (SATA_TRAN_ACCEPTED);
4080}
4081
4082
4083/*
4084 * start a PIO data-in ATA command
4085 */
4086static int
4087nv_start_pio_in(nv_port_t *nvp, int slot)
4088{
4089
4090	nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
4091	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4092	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4093
4094	nv_program_taskfile_regs(nvp, slot);
4095
4096	/*
4097	 * This next one sets the drive in motion
4098	 */
4099	nv_put8(cmdhdl, nvp->nvp_cmd, spkt->satapkt_cmd.satacmd_cmd_reg);
4100
4101	return (SATA_TRAN_ACCEPTED);
4102}
4103
4104
4105/*
4106 * start a PIO data-out ATA command
4107 */
4108static int
4109nv_start_pio_out(nv_port_t *nvp, int slot)
4110{
4111	nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
4112	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4113	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4114
4115	nv_program_taskfile_regs(nvp, slot);
4116
4117	/*
4118	 * this next one sets the drive in motion
4119	 */
4120	nv_put8(cmdhdl, nvp->nvp_cmd, spkt->satapkt_cmd.satacmd_cmd_reg);
4121
4122	/*
4123	 * wait for the busy bit to settle
4124	 */
4125	NV_DELAY_NSEC(400);
4126
4127	/*
4128	 * wait for the drive to assert DRQ to send the first chunk
4129	 * of data. Have to busy wait because there's no interrupt for
4130	 * the first chunk. This is bad... uses a lot of cycles if the
4131	 * drive responds too slowly or if the wait loop granularity
4132	 * is too large. It's even worse if the drive is defective and
4133	 * the loop times out.
4134	 */
4135	if (nv_wait3(nvp, SATA_STATUS_DRQ, SATA_STATUS_BSY, /* okay */
4136	    SATA_STATUS_ERR, SATA_STATUS_BSY, /* cmd failed */
4137	    SATA_STATUS_DF, SATA_STATUS_BSY, /* drive failed */
4138	    4000000, 0) == B_FALSE) {
4139		spkt->satapkt_reason = SATA_PKT_TIMEOUT;
4140
4141		goto error;
4142	}
4143
4144	/*
4145	 * send the first block.
4146	 */
4147	nv_intr_pio_out(nvp, nv_slotp);
4148
4149	/*
4150	 * If nvslot_flags is not set to COMPLETE yet, then processing
4151	 * is OK so far, so return.  Otherwise, fall into error handling
4152	 * below.
4153	 */
4154	if (nv_slotp->nvslot_flags != NVSLOT_COMPLETE) {
4155
4156		return (SATA_TRAN_ACCEPTED);
4157	}
4158
4159	error:
4160	/*
4161	 * there was an error so reset the device and complete the packet.
4162	 */
4163	nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
4164	nv_complete_io(nvp, spkt, 0);
4165	nv_reset(nvp);
4166
4167	return (SATA_TRAN_PORT_ERROR);
4168}
4169
4170
4171/*
4172 * start a ATAPI Packet command (PIO data in or out)
4173 */
4174static int
4175nv_start_pkt_pio(nv_port_t *nvp, int slot)
4176{
4177	nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
4178	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4179	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4180	sata_cmd_t *satacmd = &spkt->satapkt_cmd;
4181
4182	NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4183	    "nv_start_pkt_pio: start"));
4184
4185	/*
4186	 * Write the PACKET command to the command register.  Normally
4187	 * this would be done through nv_program_taskfile_regs().  It
4188	 * is done here because some values need to be overridden.
4189	 */
4190
4191	/* select the drive */
4192	nv_put8(cmdhdl, nvp->nvp_drvhd, satacmd->satacmd_device_reg);
4193
4194	/* make certain the drive selected */
4195	if (nv_wait(nvp, SATA_STATUS_DRDY, SATA_STATUS_BSY,
4196	    NV_SEC2USEC(5), 0) == B_FALSE) {
4197		NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4198		    "nv_start_pkt_pio: drive select failed"));
4199		return (SATA_TRAN_PORT_ERROR);
4200	}
4201
4202	/*
4203	 * The command is always sent via PIO, despite whatever the SATA
4204	 * framework sets in the command.  Overwrite the DMA bit to do this.
4205	 * Also, overwrite the overlay bit to be safe (it shouldn't be set).
4206	 */
4207	nv_put8(cmdhdl, nvp->nvp_feature, 0);	/* deassert DMA and OVL */
4208
4209	/* set appropriately by the sata framework */
4210	nv_put8(cmdhdl, nvp->nvp_hcyl, satacmd->satacmd_lba_high_lsb);
4211	nv_put8(cmdhdl, nvp->nvp_lcyl, satacmd->satacmd_lba_mid_lsb);
4212	nv_put8(cmdhdl, nvp->nvp_sect, satacmd->satacmd_lba_low_lsb);
4213	nv_put8(cmdhdl, nvp->nvp_count, satacmd->satacmd_sec_count_lsb);
4214
4215	/* initiate the command by writing the command register last */
4216	nv_put8(cmdhdl, nvp->nvp_cmd, spkt->satapkt_cmd.satacmd_cmd_reg);
4217
4218	/* Give the host controller time to do its thing */
4219	NV_DELAY_NSEC(400);
4220
4221	/*
4222	 * Wait for the device to indicate that it is ready for the command
4223	 * ATAPI protocol state - HP0: Check_Status_A
4224	 */
4225
4226	if (nv_wait3(nvp, SATA_STATUS_DRQ, SATA_STATUS_BSY, /* okay */
4227	    SATA_STATUS_ERR, SATA_STATUS_BSY, /* cmd failed */
4228	    SATA_STATUS_DF, SATA_STATUS_BSY, /* drive failed */
4229	    4000000, 0) == B_FALSE) {
4230		/*
4231		 * Either an error or device fault occurred or the wait
4232		 * timed out.  According to the ATAPI protocol, command
4233		 * completion is also possible.  Other implementations of
4234		 * this protocol don't handle this last case, so neither
4235		 * does this code.
4236		 */
4237
4238		if (nv_get8(cmdhdl, nvp->nvp_status) &
4239		    (SATA_STATUS_ERR | SATA_STATUS_DF)) {
4240			spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4241
4242			NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4243			    "nv_start_pkt_pio: device error (HP0)"));
4244		} else {
4245			spkt->satapkt_reason = SATA_PKT_TIMEOUT;
4246
4247			NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4248			    "nv_start_pkt_pio: timeout (HP0)"));
4249		}
4250
4251		nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
4252		nv_complete_io(nvp, spkt, 0);
4253		nv_reset(nvp);
4254
4255		return (SATA_TRAN_PORT_ERROR);
4256	}
4257
4258	/*
4259	 * Put the ATAPI command in the data register
4260	 * ATAPI protocol state - HP1: Send_Packet
4261	 */
4262
4263	ddi_rep_put16(cmdhdl, (ushort_t *)spkt->satapkt_cmd.satacmd_acdb,
4264	    (ushort_t *)nvp->nvp_data,
4265	    (spkt->satapkt_cmd.satacmd_acdb_len >> 1), DDI_DEV_NO_AUTOINCR);
4266
4267	/*
4268	 * See you in nv_intr_pkt_pio.
4269	 * ATAPI protocol state - HP3: INTRQ_wait
4270	 */
4271
4272	NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4273	    "nv_start_pkt_pio: exiting into HP3"));
4274
4275	return (SATA_TRAN_ACCEPTED);
4276}
4277
4278
4279/*
4280 * Interrupt processing for a non-data ATA command.
4281 */
4282static void
4283nv_intr_nodata(nv_port_t *nvp, nv_slot_t *nv_slotp)
4284{
4285	uchar_t status;
4286	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4287	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
4288	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
4289	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4290
4291	NVLOG((NVDBG_INTR, nvp->nvp_ctlp, nvp, "nv_intr_nodata entered"));
4292
4293	status = nv_get8(cmdhdl, nvp->nvp_status);
4294
4295	/*
4296	 * check for errors
4297	 */
4298	if (status & (SATA_STATUS_DF | SATA_STATUS_ERR)) {
4299		spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4300		sata_cmdp->satacmd_status_reg = nv_get8(ctlhdl,
4301		    nvp->nvp_altstatus);
4302		sata_cmdp->satacmd_error_reg = nv_get8(cmdhdl, nvp->nvp_error);
4303	} else {
4304		spkt->satapkt_reason = SATA_PKT_COMPLETED;
4305	}
4306
4307	nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4308}
4309
4310
4311/*
4312 * ATA command, PIO data in
4313 */
4314static void
4315nv_intr_pio_in(nv_port_t *nvp, nv_slot_t *nv_slotp)
4316{
4317	uchar_t	status;
4318	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4319	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
4320	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
4321	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4322	int count;
4323
4324	status = nv_get8(cmdhdl, nvp->nvp_status);
4325
4326	if (status & SATA_STATUS_BSY) {
4327		spkt->satapkt_reason = SATA_PKT_TIMEOUT;
4328		nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4329		sata_cmdp->satacmd_status_reg = nv_get8(ctlhdl,
4330		    nvp->nvp_altstatus);
4331		sata_cmdp->satacmd_error_reg = nv_get8(cmdhdl, nvp->nvp_error);
4332		nv_reset(nvp);
4333
4334		return;
4335	}
4336
4337	/*
4338	 * check for errors
4339	 */
4340	if ((status & (SATA_STATUS_DRQ | SATA_STATUS_DF |
4341	    SATA_STATUS_ERR)) != SATA_STATUS_DRQ) {
4342		nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
4343		nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4344		spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4345
4346		return;
4347	}
4348
4349	/*
4350	 * read the next chunk of data (if any)
4351	 */
4352	count = min(nv_slotp->nvslot_byte_count, NV_BYTES_PER_SEC);
4353
4354	/*
4355	 * read count bytes
4356	 */
4357	ASSERT(count != 0);
4358
4359	ddi_rep_get16(cmdhdl, (ushort_t *)nv_slotp->nvslot_v_addr,
4360	    (ushort_t *)nvp->nvp_data, (count >> 1), DDI_DEV_NO_AUTOINCR);
4361
4362	nv_slotp->nvslot_v_addr += count;
4363	nv_slotp->nvslot_byte_count -= count;
4364
4365
4366	if (nv_slotp->nvslot_byte_count != 0) {
4367		/*
4368		 * more to transfer.  Wait for next interrupt.
4369		 */
4370		return;
4371	}
4372
4373	/*
4374	 * transfer is complete. wait for the busy bit to settle.
4375	 */
4376	NV_DELAY_NSEC(400);
4377
4378	spkt->satapkt_reason = SATA_PKT_COMPLETED;
4379	nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4380}
4381
4382
4383/*
4384 * ATA command PIO data out
4385 */
4386static void
4387nv_intr_pio_out(nv_port_t *nvp, nv_slot_t *nv_slotp)
4388{
4389	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4390	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
4391	uchar_t status;
4392	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
4393	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4394	int count;
4395
4396	/*
4397	 * clear the IRQ
4398	 */
4399	status = nv_get8(cmdhdl, nvp->nvp_status);
4400
4401	if (status & SATA_STATUS_BSY) {
4402		/*
4403		 * this should not happen
4404		 */
4405		spkt->satapkt_reason = SATA_PKT_TIMEOUT;
4406		nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4407		sata_cmdp->satacmd_status_reg = nv_get8(ctlhdl,
4408		    nvp->nvp_altstatus);
4409		sata_cmdp->satacmd_error_reg = nv_get8(cmdhdl, nvp->nvp_error);
4410
4411		return;
4412	}
4413
4414	/*
4415	 * check for errors
4416	 */
4417	if (status & (SATA_STATUS_DF | SATA_STATUS_ERR)) {
4418		nv_copy_registers(nvp,  &spkt->satapkt_device, spkt);
4419		nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4420		spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4421
4422		return;
4423	}
4424
4425	/*
4426	 * this is the condition which signals the drive is
4427	 * no longer ready to transfer.  Likely that the transfer
4428	 * completed successfully, but check that byte_count is
4429	 * zero.
4430	 */
4431	if ((status & SATA_STATUS_DRQ) == 0) {
4432
4433		if (nv_slotp->nvslot_byte_count == 0) {
4434			/*
4435			 * complete; successful transfer
4436			 */
4437			spkt->satapkt_reason = SATA_PKT_COMPLETED;
4438		} else {
4439			/*
4440			 * error condition, incomplete transfer
4441			 */
4442			nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
4443			spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4444		}
4445		nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4446
4447		return;
4448	}
4449
4450	/*
4451	 * write the next chunk of data
4452	 */
4453	count = min(nv_slotp->nvslot_byte_count, NV_BYTES_PER_SEC);
4454
4455	/*
4456	 * read or write count bytes
4457	 */
4458
4459	ASSERT(count != 0);
4460
4461	ddi_rep_put16(cmdhdl, (ushort_t *)nv_slotp->nvslot_v_addr,
4462	    (ushort_t *)nvp->nvp_data, (count >> 1), DDI_DEV_NO_AUTOINCR);
4463
4464	nv_slotp->nvslot_v_addr += count;
4465	nv_slotp->nvslot_byte_count -= count;
4466}
4467
4468
4469/*
4470 * ATAPI PACKET command, PIO in/out interrupt
4471 *
4472 * Under normal circumstances, one of four different interrupt scenarios
4473 * will result in this function being called:
4474 *
4475 * 1. Packet command data transfer
4476 * 2. Packet command completion
4477 * 3. Request sense data transfer
4478 * 4. Request sense command completion
4479 */
4480static void
4481nv_intr_pkt_pio(nv_port_t *nvp, nv_slot_t *nv_slotp)
4482{
4483	uchar_t	status;
4484	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4485	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
4486	int direction = sata_cmdp->satacmd_flags.sata_data_direction;
4487	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
4488	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4489	uint16_t ctlr_count;
4490	int count;
4491
4492	/* ATAPI protocol state - HP2: Check_Status_B */
4493
4494	status = nv_get8(cmdhdl, nvp->nvp_status);
4495	NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4496	    "nv_intr_pkt_pio: status 0x%x", status));
4497
4498	if (status & SATA_STATUS_BSY) {
4499		if ((nv_slotp->nvslot_flags & NVSLOT_RQSENSE) != 0) {
4500			nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4501			spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4502		} else {
4503			nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4504			spkt->satapkt_reason = SATA_PKT_TIMEOUT;
4505
4506			nv_reset(nvp);
4507		}
4508
4509		NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4510		    "nv_intr_pkt_pio: busy - status 0x%x", status));
4511
4512		return;
4513	}
4514
4515	if ((status & SATA_STATUS_DF) != 0) {
4516		/*
4517		 * On device fault, just clean up and bail.  Request sense
4518		 * will just default to its NO SENSE initialized value.
4519		 */
4520
4521		if ((nv_slotp->nvslot_flags & NVSLOT_RQSENSE) == 0) {
4522			nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
4523		}
4524
4525		nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4526		spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4527
4528		sata_cmdp->satacmd_status_reg = nv_get8(ctlhdl,
4529		    nvp->nvp_altstatus);
4530		sata_cmdp->satacmd_error_reg = nv_get8(cmdhdl,
4531		    nvp->nvp_error);
4532
4533		NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4534		    "nv_intr_pkt_pio: device fault"));
4535
4536		return;
4537	}
4538
4539	if ((status & SATA_STATUS_ERR) != 0) {
4540		/*
4541		 * On command error, figure out whether we are processing a
4542		 * request sense.  If so, clean up and bail.  Otherwise,
4543		 * do a REQUEST SENSE.
4544		 */
4545
4546		if ((nv_slotp->nvslot_flags & NVSLOT_RQSENSE) == 0) {
4547			nv_slotp->nvslot_flags |= NVSLOT_RQSENSE;
4548			if (nv_start_rqsense_pio(nvp, nv_slotp) ==
4549			    NV_FAILURE) {
4550				nv_copy_registers(nvp, &spkt->satapkt_device,
4551				    spkt);
4552				nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4553				spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4554			}
4555
4556			sata_cmdp->satacmd_status_reg = nv_get8(ctlhdl,
4557			    nvp->nvp_altstatus);
4558			sata_cmdp->satacmd_error_reg = nv_get8(cmdhdl,
4559			    nvp->nvp_error);
4560		} else {
4561			nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4562			spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4563
4564			nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
4565		}
4566
4567		NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4568		    "nv_intr_pkt_pio: error (status 0x%x)", status));
4569
4570		return;
4571	}
4572
4573	if ((nv_slotp->nvslot_flags & NVSLOT_RQSENSE) != 0) {
4574		/*
4575		 * REQUEST SENSE command processing
4576		 */
4577
4578		if ((status & (SATA_STATUS_DRQ)) != 0) {
4579			/* ATAPI state - HP4: Transfer_Data */
4580
4581			/* read the byte count from the controller */
4582			ctlr_count =
4583			    (uint16_t)nv_get8(cmdhdl, nvp->nvp_hcyl) << 8;
4584			ctlr_count |= nv_get8(cmdhdl, nvp->nvp_lcyl);
4585
4586			NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4587			    "nv_intr_pkt_pio: ctlr byte count - %d",
4588			    ctlr_count));
4589
4590			if (ctlr_count == 0) {
4591				/* no data to transfer - some devices do this */
4592
4593				spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4594				nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4595
4596				NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4597				    "nv_intr_pkt_pio: done (no data)"));
4598
4599				return;
4600			}
4601
4602			count = min(ctlr_count, SATA_ATAPI_RQSENSE_LEN);
4603
4604			/* transfer the data */
4605			ddi_rep_get16(cmdhdl,
4606			    (ushort_t *)nv_slotp->nvslot_rqsense_buff,
4607			    (ushort_t *)nvp->nvp_data, (count >> 1),
4608			    DDI_DEV_NO_AUTOINCR);
4609
4610			/* consume residual bytes */
4611			ctlr_count -= count;
4612
4613			if (ctlr_count > 0) {
4614				for (; ctlr_count > 0; ctlr_count -= 2)
4615					(void) ddi_get16(cmdhdl,
4616					    (ushort_t *)nvp->nvp_data);
4617			}
4618
4619			NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4620			    "nv_intr_pkt_pio: transition to HP2"));
4621		} else {
4622			/* still in ATAPI state - HP2 */
4623
4624			/*
4625			 * In order to avoid clobbering the rqsense data
4626			 * set by the SATA framework, the sense data read
4627			 * from the device is put in a separate buffer and
4628			 * copied into the packet after the request sense
4629			 * command successfully completes.
4630			 */
4631			bcopy(nv_slotp->nvslot_rqsense_buff,
4632			    spkt->satapkt_cmd.satacmd_rqsense,
4633			    SATA_ATAPI_RQSENSE_LEN);
4634
4635			nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4636			spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4637
4638			NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4639			    "nv_intr_pkt_pio: request sense done"));
4640		}
4641
4642		return;
4643	}
4644
4645	/*
4646	 * Normal command processing
4647	 */
4648
4649	if ((status & (SATA_STATUS_DRQ)) != 0) {
4650		/* ATAPI protocol state - HP4: Transfer_Data */
4651
4652		/* read the byte count from the controller */
4653		ctlr_count = (uint16_t)nv_get8(cmdhdl, nvp->nvp_hcyl) << 8;
4654		ctlr_count |= nv_get8(cmdhdl, nvp->nvp_lcyl);
4655
4656		if (ctlr_count == 0) {
4657			/* no data to transfer - some devices do this */
4658
4659			spkt->satapkt_reason = SATA_PKT_COMPLETED;
4660			nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4661
4662			NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4663			    "nv_intr_pkt_pio: done (no data)"));
4664
4665			return;
4666		}
4667
4668		count = min(ctlr_count, nv_slotp->nvslot_byte_count);
4669
4670		NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4671		    "nv_intr_pkt_pio: drive_bytes 0x%x", ctlr_count));
4672
4673		NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4674		    "nv_intr_pkt_pio: byte_count 0x%x",
4675		    nv_slotp->nvslot_byte_count));
4676
4677		/* transfer the data */
4678
4679		if (direction == SATA_DIR_READ) {
4680			ddi_rep_get16(cmdhdl,
4681			    (ushort_t *)nv_slotp->nvslot_v_addr,
4682			    (ushort_t *)nvp->nvp_data, (count >> 1),
4683			    DDI_DEV_NO_AUTOINCR);
4684
4685			ctlr_count -= count;
4686
4687			if (ctlr_count > 0) {
4688				/* consume remainding bytes */
4689
4690				for (; ctlr_count > 0;
4691				    ctlr_count -= 2)
4692					(void) ddi_get16(cmdhdl,
4693					    (ushort_t *)nvp->nvp_data);
4694
4695				NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4696				    "nv_intr_pkt_pio: bytes remained"));
4697			}
4698		} else {
4699			ddi_rep_put16(cmdhdl,
4700			    (ushort_t *)nv_slotp->nvslot_v_addr,
4701			    (ushort_t *)nvp->nvp_data, (count >> 1),
4702			    DDI_DEV_NO_AUTOINCR);
4703		}
4704
4705		nv_slotp->nvslot_v_addr += count;
4706		nv_slotp->nvslot_byte_count -= count;
4707
4708		NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4709		    "nv_intr_pkt_pio: transition to HP2"));
4710	} else {
4711		/* still in ATAPI state - HP2 */
4712
4713		spkt->satapkt_reason = SATA_PKT_COMPLETED;
4714		nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4715
4716		NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4717		    "nv_intr_pkt_pio: done"));
4718	}
4719}
4720
4721
4722/*
4723 * ATA command, DMA data in/out
4724 */
4725static void
4726nv_intr_dma(nv_port_t *nvp, struct nv_slot *nv_slotp)
4727{
4728	uchar_t status;
4729	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4730	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
4731	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
4732	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4733	ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl;
4734	uchar_t	bmicx;
4735	uchar_t bm_status;
4736
4737	nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4738
4739	/*
4740	 * stop DMA engine.
4741	 */
4742	bmicx = nv_get8(bmhdl, nvp->nvp_bmicx);
4743	nv_put8(bmhdl, nvp->nvp_bmicx,  bmicx & ~BMICX_SSBM);
4744
4745	/*
4746	 * get the status and clear the IRQ, and check for DMA error
4747	 */
4748	status = nv_get8(cmdhdl, nvp->nvp_status);
4749
4750	/*
4751	 * check for drive errors
4752	 */
4753	if (status & (SATA_STATUS_DF | SATA_STATUS_ERR)) {
4754		nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
4755		spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4756		(void) nv_bm_status_clear(nvp);
4757
4758		return;
4759	}
4760
4761	bm_status = nv_bm_status_clear(nvp);
4762
4763	/*
4764	 * check for bus master errors
4765	 */
4766	if (bm_status & BMISX_IDERR) {
4767		spkt->satapkt_reason = SATA_PKT_RESET;
4768		sata_cmdp->satacmd_status_reg = nv_get8(ctlhdl,
4769		    nvp->nvp_altstatus);
4770		sata_cmdp->satacmd_error_reg = nv_get8(cmdhdl, nvp->nvp_error);
4771		nv_reset(nvp);
4772
4773		return;
4774	}
4775
4776	spkt->satapkt_reason = SATA_PKT_COMPLETED;
4777}
4778
4779
4780/*
4781 * Wait for a register of a controller to achieve a specific state.
4782 * To return normally, all the bits in the first sub-mask must be ON,
4783 * all the bits in the second sub-mask must be OFF.
4784 * If timeout_usec microseconds pass without the controller achieving
4785 * the desired bit configuration, return TRUE, else FALSE.
4786 *
4787 * hybrid waiting algorithm: if not in interrupt context, busy looping will
4788 * occur for the first 250 us, then switch over to a sleeping wait.
4789 *
4790 */
4791int
4792nv_wait(nv_port_t *nvp, uchar_t onbits, uchar_t offbits, uint_t timeout_usec,
4793    int type_wait)
4794{
4795	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
4796	hrtime_t end, cur, start_sleep, start;
4797	int first_time = B_TRUE;
4798	ushort_t val;
4799
4800	for (;;) {
4801		val = nv_get8(ctlhdl, nvp->nvp_altstatus);
4802
4803		if ((val & onbits) == onbits && (val & offbits) == 0) {
4804
4805			return (B_TRUE);
4806		}
4807
4808		cur = gethrtime();
4809
4810		/*
4811		 * store the start time and calculate the end
4812		 * time.  also calculate "start_sleep" which is
4813		 * the point after which the driver will stop busy
4814		 * waiting and change to sleep waiting.
4815		 */
4816		if (first_time) {
4817			first_time = B_FALSE;
4818			/*
4819			 * start and end are in nanoseconds
4820			 */
4821			start = cur;
4822			end = start + timeout_usec * 1000;
4823			/*
4824			 * add 1 ms to start
4825			 */
4826			start_sleep =  start + 250000;
4827
4828			if (servicing_interrupt()) {
4829				type_wait = NV_NOSLEEP;
4830			}
4831		}
4832
4833		if (cur > end) {
4834
4835			break;
4836		}
4837
4838		if ((type_wait != NV_NOSLEEP) && (cur > start_sleep)) {
4839#if ! defined(__lock_lint)
4840			delay(1);
4841#endif
4842		} else {
4843			drv_usecwait(nv_usec_delay);
4844		}
4845	}
4846
4847	return (B_FALSE);
4848}
4849
4850
4851/*
4852 * This is a slightly more complicated version that checks
4853 * for error conditions and bails-out rather than looping
4854 * until the timeout is exceeded.
4855 *
4856 * hybrid waiting algorithm: if not in interrupt context, busy looping will
4857 * occur for the first 250 us, then switch over to a sleeping wait.
4858 */
4859int
4860nv_wait3(
4861	nv_port_t	*nvp,
4862	uchar_t		onbits1,
4863	uchar_t		offbits1,
4864	uchar_t		failure_onbits2,
4865	uchar_t		failure_offbits2,
4866	uchar_t		failure_onbits3,
4867	uchar_t		failure_offbits3,
4868	uint_t		timeout_usec,
4869	int		type_wait)
4870{
4871	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
4872	hrtime_t end, cur, start_sleep, start;
4873	int first_time = B_TRUE;
4874	ushort_t val;
4875
4876	for (;;) {
4877		val = nv_get8(ctlhdl, nvp->nvp_altstatus);
4878
4879		/*
4880		 * check for expected condition
4881		 */
4882		if ((val & onbits1) == onbits1 && (val & offbits1) == 0) {
4883
4884			return (B_TRUE);
4885		}
4886
4887		/*
4888		 * check for error conditions
4889		 */
4890		if ((val & failure_onbits2) == failure_onbits2 &&
4891		    (val & failure_offbits2) == 0) {
4892
4893			return (B_FALSE);
4894		}
4895
4896		if ((val & failure_onbits3) == failure_onbits3 &&
4897		    (val & failure_offbits3) == 0) {
4898
4899			return (B_FALSE);
4900		}
4901
4902		/*
4903		 * store the start time and calculate the end
4904		 * time.  also calculate "start_sleep" which is
4905		 * the point after which the driver will stop busy
4906		 * waiting and change to sleep waiting.
4907		 */
4908		if (first_time) {
4909			first_time = B_FALSE;
4910			/*
4911			 * start and end are in nanoseconds
4912			 */
4913			cur = start = gethrtime();
4914			end = start + timeout_usec * 1000;
4915			/*
4916			 * add 1 ms to start
4917			 */
4918			start_sleep =  start + 250000;
4919
4920			if (servicing_interrupt()) {
4921				type_wait = NV_NOSLEEP;
4922			}
4923		} else {
4924			cur = gethrtime();
4925		}
4926
4927		if (cur > end) {
4928
4929			break;
4930		}
4931
4932		if ((type_wait != NV_NOSLEEP) && (cur > start_sleep)) {
4933#if ! defined(__lock_lint)
4934			delay(1);
4935#endif
4936		} else {
4937			drv_usecwait(nv_usec_delay);
4938		}
4939	}
4940
4941	return (B_FALSE);
4942}
4943
4944
4945/*
4946 * nv_check_link() checks if a specified link is active device present
4947 * and communicating.
4948 */
4949static boolean_t
4950nv_check_link(uint32_t sstatus)
4951{
4952	uint8_t det;
4953
4954	det = (sstatus & SSTATUS_DET) >> SSTATUS_DET_SHIFT;
4955
4956	return (det == SSTATUS_DET_DEVPRE_PHYCOM);
4957}
4958
4959
4960/*
4961 * nv_port_state_change() reports the state of the port to the
4962 * sata module by calling sata_hba_event_notify().  This
4963 * function is called any time the state of the port is changed
4964 */
4965static void
4966nv_port_state_change(nv_port_t *nvp, int event, uint8_t addr_type, int state)
4967{
4968	sata_device_t sd;
4969
4970	bzero((void *)&sd, sizeof (sata_device_t));
4971	sd.satadev_rev = SATA_DEVICE_REV;
4972	nv_copy_registers(nvp, &sd, NULL);
4973
4974	/*
4975	 * When NCQ is implemented sactive and snotific field need to be
4976	 * updated.
4977	 */
4978	sd.satadev_addr.cport = nvp->nvp_port_num;
4979	sd.satadev_addr.qual = addr_type;
4980	sd.satadev_state = state;
4981
4982	sata_hba_event_notify(nvp->nvp_ctlp->nvc_dip, &sd, event);
4983}
4984
4985
4986/*
4987 * timeout processing:
4988 *
4989 * Check if any packets have crossed a timeout threshold.  If so, then
4990 * abort the packet.  This function is not NCQ aware.
4991 *
4992 * If reset was invoked in any other place than nv_sata_probe(), then
4993 * monitor for reset completion here.
4994 *
4995 */
4996static void
4997nv_timeout(void *arg)
4998{
4999	nv_port_t *nvp = arg;
5000	nv_slot_t *nv_slotp;
5001	int restart_timeout = B_FALSE;
5002
5003	mutex_enter(&nvp->nvp_mutex);
5004
5005	/*
5006	 * If the probe entry point is driving the reset and signature
5007	 * acquisition, just return.
5008	 */
5009	if (nvp->nvp_state & NV_PORT_RESET_PROBE) {
5010		goto finished;
5011	}
5012
5013	/*
5014	 * If the port is not in the init state, it likely
5015	 * means the link was lost while a timeout was active.
5016	 */
5017	if ((nvp->nvp_state & NV_PORT_INIT) == 0) {
5018		NVLOG((NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp,
5019		    "nv_timeout: port uninitialized"));
5020
5021		goto finished;
5022	}
5023
5024	if (nvp->nvp_state & NV_PORT_RESET) {
5025		ddi_acc_handle_t bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5];
5026		uint32_t sstatus;
5027
5028		NVLOG((NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp,
5029		    "nv_timeout(): port waiting for signature"));
5030
5031		sstatus = nv_get32(bar5_hdl, nvp->nvp_sstatus);
5032
5033		/*
5034		 * check for link presence.  If the link remains
5035		 * missing for more than 2 seconds, send a remove
5036		 * event and abort signature acquisition.
5037		 */
5038		if (nv_check_link(sstatus) == B_FALSE) {
5039			clock_t e_link_lost = ddi_get_lbolt();
5040
5041			if (nvp->nvp_link_lost_time == 0) {
5042				nvp->nvp_link_lost_time = e_link_lost;
5043			}
5044			if (TICK_TO_SEC(e_link_lost -
5045			    nvp->nvp_link_lost_time) < NV_LINK_LOST_OK) {
5046				NVLOG((NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp,
5047				    "probe: intermittent link lost while"
5048				    " resetting"));
5049				restart_timeout = B_TRUE;
5050			} else {
5051				NVLOG((NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp,
5052				    "link lost during signature acquisition."
5053				    "  Giving up"));
5054				nv_port_state_change(nvp,
5055				    SATA_EVNT_DEVICE_DETACHED|
5056				    SATA_EVNT_LINK_LOST,
5057				    SATA_ADDR_CPORT, 0);
5058				nvp->nvp_state |= NV_PORT_HOTREMOVED;
5059				nvp->nvp_state &= ~NV_PORT_RESET;
5060			}
5061
5062			goto finished;
5063		} else {
5064
5065			nvp->nvp_link_lost_time = 0;
5066		}
5067
5068		nv_read_signature(nvp);
5069
5070		if (nvp->nvp_signature != 0) {
5071			if ((nvp->nvp_type == SATA_DTYPE_ATADISK) ||
5072			    (nvp->nvp_type == SATA_DTYPE_ATAPICD)) {
5073				nvp->nvp_state |= NV_PORT_RESTORE;
5074				nv_port_state_change(nvp,
5075				    SATA_EVNT_DEVICE_RESET,
5076				    SATA_ADDR_DCPORT,
5077				    SATA_DSTATE_RESET|SATA_DSTATE_PWR_ACTIVE);
5078			}
5079
5080			goto finished;
5081		}
5082
5083		/*
5084		 * Reset if more than 5 seconds has passed without
5085		 * acquiring a signature.
5086		 */
5087		if (TICK_TO_SEC(ddi_get_lbolt() - nvp->nvp_reset_time) > 5) {
5088			nv_reset(nvp);
5089		}
5090
5091		restart_timeout = B_TRUE;
5092		goto finished;
5093	}
5094
5095
5096	/*
5097	 * not yet NCQ aware
5098	 */
5099	nv_slotp = &(nvp->nvp_slot[0]);
5100
5101	/*
5102	 * this happens early on before nv_slotp is set
5103	 * up OR when a device was unexpectedly removed and
5104	 * there was an active packet.
5105	 */
5106	if (nv_slotp == NULL) {
5107		NVLOG((NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp,
5108		    "nv_timeout: nv_slotp == NULL"));
5109
5110		goto finished;
5111	}
5112
5113	/*
5114	 * perform timeout checking and processing only if there is an
5115	 * active packet on the port
5116	 */
5117	if (nv_slotp->nvslot_spkt != NULL)  {
5118		sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
5119		sata_cmd_t *satacmd = &spkt->satapkt_cmd;
5120		uint8_t cmd = satacmd->satacmd_cmd_reg;
5121		uint64_t lba;
5122
5123#if ! defined(__lock_lint) && defined(DEBUG)
5124
5125		lba = (uint64_t)satacmd->satacmd_lba_low_lsb |
5126		    ((uint64_t)satacmd->satacmd_lba_mid_lsb << 8) |
5127		    ((uint64_t)satacmd->satacmd_lba_high_lsb << 16) |
5128		    ((uint64_t)satacmd->satacmd_lba_low_msb << 24) |
5129		    ((uint64_t)satacmd->satacmd_lba_mid_msb << 32) |
5130		    ((uint64_t)satacmd->satacmd_lba_high_msb << 40);
5131#endif
5132
5133		/*
5134		 * timeout not needed if there is a polling thread
5135		 */
5136		if (spkt->satapkt_op_mode & SATA_OPMODE_POLLING) {
5137
5138			goto finished;
5139		}
5140
5141		if (TICK_TO_SEC(ddi_get_lbolt() - nv_slotp->nvslot_stime) >
5142		    spkt->satapkt_time) {
5143			NVLOG((NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp,
5144			    "abort timeout: "
5145			    "nvslot_stime: %ld max ticks till timeout: "
5146			    "%ld cur_time: %ld cmd=%x lba=%d",
5147			    nv_slotp->nvslot_stime, drv_usectohz(MICROSEC *
5148			    spkt->satapkt_time), ddi_get_lbolt(), cmd, lba));
5149
5150			(void) nv_abort_active(nvp, spkt, SATA_PKT_TIMEOUT);
5151
5152		} else {
5153			NVLOG((NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp, "nv_timeout:"
5154			    " still in use so restarting timeout"));
5155		}
5156		restart_timeout = B_TRUE;
5157
5158	} else {
5159		/*
5160		 * there was no active packet, so do not re-enable timeout
5161		 */
5162		NVLOG((NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp,
5163		    "nv_timeout: no active packet so not re-arming timeout"));
5164	}
5165
5166	finished:
5167
5168	if (restart_timeout == B_TRUE) {
5169		nvp->nvp_timeout_id = timeout(nv_timeout, (void *)nvp,
5170		    drv_usectohz(NV_ONE_SEC));
5171	} else {
5172		nvp->nvp_timeout_id = 0;
5173	}
5174	mutex_exit(&nvp->nvp_mutex);
5175}
5176
5177
5178/*
5179 * enable or disable the 3 interrupt types the driver is
5180 * interested in: completion, add and remove.
5181 */
5182static void
5183mcp04_set_intr(nv_port_t *nvp, int flag)
5184{
5185	nv_ctl_t *nvc = nvp->nvp_ctlp;
5186	ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5];
5187	uchar_t *bar5  = nvc->nvc_bar_addr[5];
5188	uint8_t intr_bits[] = { MCP04_INT_PDEV_HOT|MCP04_INT_PDEV_INT,
5189	    MCP04_INT_SDEV_HOT|MCP04_INT_SDEV_INT };
5190	uint8_t clear_all_bits[] = { MCP04_INT_PDEV_ALL, MCP04_INT_SDEV_ALL };
5191	uint8_t int_en, port = nvp->nvp_port_num, intr_status;
5192
5193	ASSERT(mutex_owned(&nvp->nvp_mutex));
5194
5195	/*
5196	 * controller level lock also required since access to an 8-bit
5197	 * interrupt register is shared between both channels.
5198	 */
5199	mutex_enter(&nvc->nvc_mutex);
5200
5201	if (flag & NV_INTR_CLEAR_ALL) {
5202		NVLOG((NVDBG_INTR, nvc, nvp,
5203		    "mcp04_set_intr: NV_INTR_CLEAR_ALL"));
5204
5205		intr_status = nv_get8(nvc->nvc_bar_hdl[5],
5206		    (uint8_t *)(nvc->nvc_mcp04_int_status));
5207
5208		if (intr_status & clear_all_bits[port]) {
5209
5210			nv_put8(nvc->nvc_bar_hdl[5],
5211			    (uint8_t *)(nvc->nvc_mcp04_int_status),
5212			    clear_all_bits[port]);
5213
5214			NVLOG((NVDBG_INTR, nvc, nvp,
5215			    "interrupt bits cleared %x",
5216			    intr_status & clear_all_bits[port]));
5217		}
5218	}
5219
5220	if (flag & NV_INTR_DISABLE) {
5221		NVLOG((NVDBG_INTR, nvc, nvp,
5222		    "mcp04_set_intr: NV_INTR_DISABLE"));
5223		int_en = nv_get8(bar5_hdl,
5224		    (uint8_t *)(bar5 + MCP04_SATA_INT_EN));
5225		int_en &= ~intr_bits[port];
5226		nv_put8(bar5_hdl, (uint8_t *)(bar5 + MCP04_SATA_INT_EN),
5227		    int_en);
5228	}
5229
5230	if (flag & NV_INTR_ENABLE) {
5231		NVLOG((NVDBG_INTR, nvc, nvp, "mcp04_set_intr: NV_INTR_ENABLE"));
5232		int_en = nv_get8(bar5_hdl,
5233		    (uint8_t *)(bar5 + MCP04_SATA_INT_EN));
5234		int_en |= intr_bits[port];
5235		nv_put8(bar5_hdl, (uint8_t *)(bar5 + MCP04_SATA_INT_EN),
5236		    int_en);
5237	}
5238
5239	mutex_exit(&nvc->nvc_mutex);
5240}
5241
5242
5243/*
5244 * enable or disable the 3 interrupts the driver is interested in:
5245 * completion interrupt, hot add, and hot remove interrupt.
5246 */
5247static void
5248mcp55_set_intr(nv_port_t *nvp, int flag)
5249{
5250	nv_ctl_t *nvc = nvp->nvp_ctlp;
5251	ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5];
5252	uint16_t intr_bits =
5253	    MCP55_INT_ADD|MCP55_INT_REM|MCP55_INT_COMPLETE;
5254	uint16_t int_en;
5255
5256	ASSERT(mutex_owned(&nvp->nvp_mutex));
5257
5258	NVLOG((NVDBG_HOT, nvc, nvp, "mcp055_set_intr: enter flag: %d", flag));
5259
5260	if (flag & NV_INTR_CLEAR_ALL) {
5261		NVLOG((NVDBG_INTR, nvc, nvp,
5262		    "mcp55_set_intr: NV_INTR_CLEAR_ALL"));
5263		nv_put16(bar5_hdl, nvp->nvp_mcp55_int_status, MCP55_INT_CLEAR);
5264	}
5265
5266	if (flag & NV_INTR_ENABLE) {
5267		NVLOG((NVDBG_INTR, nvc, nvp, "mcp55_set_intr: NV_INTR_ENABLE"));
5268		int_en = nv_get16(bar5_hdl, nvp->nvp_mcp55_int_ctl);
5269		int_en |= intr_bits;
5270		nv_put16(bar5_hdl, nvp->nvp_mcp55_int_ctl, int_en);
5271	}
5272
5273	if (flag & NV_INTR_DISABLE) {
5274		NVLOG((NVDBG_INTR, nvc, nvp,
5275		    "mcp55_set_intr: NV_INTR_DISABLE"));
5276		int_en = nv_get16(bar5_hdl, nvp->nvp_mcp55_int_ctl);
5277		int_en &= ~intr_bits;
5278		nv_put16(bar5_hdl, nvp->nvp_mcp55_int_ctl, int_en);
5279	}
5280}
5281
5282
5283/*
5284 * The PM functions for suspend and resume are incomplete and need additional
5285 * work.  It may or may not work in the current state.
5286 */
5287static void
5288nv_resume(nv_port_t *nvp)
5289{
5290	NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp, "nv_resume()"));
5291
5292	mutex_enter(&nvp->nvp_mutex);
5293
5294	if (nvp->nvp_state & NV_PORT_INACTIVE) {
5295		mutex_exit(&nvp->nvp_mutex);
5296
5297		return;
5298	}
5299
5300#ifdef SGPIO_SUPPORT
5301	nv_sgp_drive_connect(nvp->nvp_ctlp, SGP_CTLR_PORT_TO_DRV(
5302	    nvp->nvp_ctlp->nvc_ctlr_num, nvp->nvp_port_num));
5303#endif
5304
5305	(*(nvp->nvp_ctlp->nvc_set_intr))(nvp, NV_INTR_CLEAR_ALL|NV_INTR_ENABLE);
5306
5307	/*
5308	 * power may have been removed to the port and the
5309	 * drive, and/or a drive may have been added or removed.
5310	 * Force a reset which will cause a probe and re-establish
5311	 * any state needed on the drive.
5312	 * nv_reset(nvp);
5313	 */
5314
5315	nv_reset(nvp);
5316
5317	mutex_exit(&nvp->nvp_mutex);
5318}
5319
5320/*
5321 * The PM functions for suspend and resume are incomplete and need additional
5322 * work.  It may or may not work in the current state.
5323 */
5324static void
5325nv_suspend(nv_port_t *nvp)
5326{
5327	NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp, "nv_suspend()"));
5328
5329	mutex_enter(&nvp->nvp_mutex);
5330
5331#ifdef SGPIO_SUPPORT
5332	nv_sgp_drive_disconnect(nvp->nvp_ctlp, SGP_CTLR_PORT_TO_DRV(
5333	    nvp->nvp_ctlp->nvc_ctlr_num, nvp->nvp_port_num));
5334#endif
5335
5336	if (nvp->nvp_state & NV_PORT_INACTIVE) {
5337		mutex_exit(&nvp->nvp_mutex);
5338
5339		return;
5340	}
5341
5342	(*(nvp->nvp_ctlp->nvc_set_intr))(nvp, NV_INTR_DISABLE);
5343
5344	/*
5345	 * power may have been removed to the port and the
5346	 * drive, and/or a drive may have been added or removed.
5347	 * Force a reset which will cause a probe and re-establish
5348	 * any state needed on the drive.
5349	 * nv_reset(nvp);
5350	 */
5351
5352	mutex_exit(&nvp->nvp_mutex);
5353}
5354
5355
5356static void
5357nv_copy_registers(nv_port_t *nvp, sata_device_t *sd, sata_pkt_t *spkt)
5358{
5359	ddi_acc_handle_t bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5];
5360	sata_cmd_t *scmd = &spkt->satapkt_cmd;
5361	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
5362	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
5363	uchar_t status;
5364	struct sata_cmd_flags flags;
5365
5366	NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp, "nv_copy_registers()"));
5367
5368	sd->satadev_scr.sstatus = nv_get32(bar5_hdl, nvp->nvp_sstatus);
5369	sd->satadev_scr.serror = nv_get32(bar5_hdl, nvp->nvp_serror);
5370	sd->satadev_scr.scontrol = nv_get32(bar5_hdl, nvp->nvp_sctrl);
5371
5372	if (spkt == NULL) {
5373
5374		return;
5375	}
5376
5377	/*
5378	 * in the error case, implicitly set the return of regs needed
5379	 * for error handling.
5380	 */
5381	status = scmd->satacmd_status_reg = nv_get8(ctlhdl,
5382	    nvp->nvp_altstatus);
5383
5384	flags = scmd->satacmd_flags;
5385
5386	if (status & SATA_STATUS_ERR) {
5387		flags.sata_copy_out_lba_low_msb = B_TRUE;
5388		flags.sata_copy_out_lba_mid_msb = B_TRUE;
5389		flags.sata_copy_out_lba_high_msb = B_TRUE;
5390		flags.sata_copy_out_lba_low_lsb = B_TRUE;
5391		flags.sata_copy_out_lba_mid_lsb = B_TRUE;
5392		flags.sata_copy_out_lba_high_lsb = B_TRUE;
5393		flags.sata_copy_out_error_reg = B_TRUE;
5394		flags.sata_copy_out_sec_count_msb = B_TRUE;
5395		flags.sata_copy_out_sec_count_lsb = B_TRUE;
5396		scmd->satacmd_status_reg = status;
5397	}
5398
5399	if (scmd->satacmd_addr_type & ATA_ADDR_LBA48) {
5400
5401		/*
5402		 * set HOB so that high byte will be read
5403		 */
5404		nv_put8(ctlhdl, nvp->nvp_devctl, ATDC_HOB|ATDC_D3);
5405
5406		/*
5407		 * get the requested high bytes
5408		 */
5409		if (flags.sata_copy_out_sec_count_msb) {
5410			scmd->satacmd_sec_count_msb =
5411			    nv_get8(cmdhdl, nvp->nvp_count);
5412		}
5413
5414		if (flags.sata_copy_out_lba_low_msb) {
5415			scmd->satacmd_lba_low_msb =
5416			    nv_get8(cmdhdl, nvp->nvp_sect);
5417		}
5418
5419		if (flags.sata_copy_out_lba_mid_msb) {
5420			scmd->satacmd_lba_mid_msb =
5421			    nv_get8(cmdhdl, nvp->nvp_lcyl);
5422		}
5423
5424		if (flags.sata_copy_out_lba_high_msb) {
5425			scmd->satacmd_lba_high_msb =
5426			    nv_get8(cmdhdl, nvp->nvp_hcyl);
5427		}
5428	}
5429
5430	/*
5431	 * disable HOB so that low byte is read
5432	 */
5433	nv_put8(ctlhdl, nvp->nvp_devctl, ATDC_D3);
5434
5435	/*
5436	 * get the requested low bytes
5437	 */
5438	if (flags.sata_copy_out_sec_count_lsb) {
5439		scmd->satacmd_sec_count_lsb = nv_get8(cmdhdl, nvp->nvp_count);
5440	}
5441
5442	if (flags.sata_copy_out_lba_low_lsb) {
5443		scmd->satacmd_lba_low_lsb = nv_get8(cmdhdl, nvp->nvp_sect);
5444	}
5445
5446	if (flags.sata_copy_out_lba_mid_lsb) {
5447		scmd->satacmd_lba_mid_lsb = nv_get8(cmdhdl, nvp->nvp_lcyl);
5448	}
5449
5450	if (flags.sata_copy_out_lba_high_lsb) {
5451		scmd->satacmd_lba_high_lsb = nv_get8(cmdhdl, nvp->nvp_hcyl);
5452	}
5453
5454	/*
5455	 * get the device register if requested
5456	 */
5457	if (flags.sata_copy_out_device_reg) {
5458		scmd->satacmd_device_reg =  nv_get8(cmdhdl, nvp->nvp_drvhd);
5459	}
5460
5461	/*
5462	 * get the error register if requested
5463	 */
5464	if (flags.sata_copy_out_error_reg) {
5465		scmd->satacmd_error_reg = nv_get8(cmdhdl, nvp->nvp_error);
5466	}
5467}
5468
5469
5470/*
5471 * Hot plug and remove interrupts can occur when the device is reset.  Just
5472 * masking the interrupt doesn't always work well because if a
5473 * different interrupt arrives on the other port, the driver can still
5474 * end up checking the state of the other port and discover the hot
5475 * interrupt flag is set even though it was masked.  Checking for recent
5476 * reset activity and then ignoring turns out to be the easiest way.
5477 */
5478static void
5479nv_report_add_remove(nv_port_t *nvp, int flags)
5480{
5481	ddi_acc_handle_t bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5];
5482	clock_t time_diff = ddi_get_lbolt() - nvp->nvp_reset_time;
5483	uint32_t sstatus;
5484	int i;
5485
5486	/*
5487	 * If reset within last 1 second ignore.  This should be
5488	 * reworked and improved instead of having this somewhat
5489	 * heavy handed clamping job.
5490	 */
5491	if (time_diff < drv_usectohz(NV_ONE_SEC)) {
5492		NVLOG((NVDBG_HOT, nvp->nvp_ctlp, nvp, "nv_report_add_remove()"
5493		    "ignoring plug interrupt was %dms ago",
5494		    TICK_TO_MSEC(time_diff)));
5495
5496		return;
5497	}
5498
5499	/*
5500	 * wait up to 1ms for sstatus to settle and reflect the true
5501	 * status of the port.  Failure to do so can create confusion
5502	 * in probe, where the incorrect sstatus value can still
5503	 * persist.
5504	 */
5505	for (i = 0; i < 1000; i++) {
5506		sstatus = nv_get32(bar5_hdl, nvp->nvp_sstatus);
5507
5508		if ((flags == NV_PORT_HOTREMOVED) &&
5509		    ((sstatus & SSTATUS_DET_DEVPRE_PHYCOM) !=
5510		    SSTATUS_DET_DEVPRE_PHYCOM)) {
5511			break;
5512		}
5513
5514		if ((flags != NV_PORT_HOTREMOVED) &&
5515		    ((sstatus & SSTATUS_DET_DEVPRE_PHYCOM) ==
5516		    SSTATUS_DET_DEVPRE_PHYCOM)) {
5517			break;
5518		}
5519		drv_usecwait(1);
5520	}
5521
5522	NVLOG((NVDBG_HOT, nvp->nvp_ctlp, nvp,
5523	    "sstatus took %i us for DEVPRE_PHYCOM to settle", i));
5524
5525	if (flags == NV_PORT_HOTREMOVED) {
5526		NVLOG((NVDBG_HOT, nvp->nvp_ctlp, nvp,
5527		    "nv_report_add_remove() hot removed"));
5528		nv_port_state_change(nvp,
5529		    SATA_EVNT_DEVICE_DETACHED,
5530		    SATA_ADDR_CPORT, 0);
5531
5532		nvp->nvp_state |= NV_PORT_HOTREMOVED;
5533	} else {
5534		NVLOG((NVDBG_HOT, nvp->nvp_ctlp, nvp,
5535		    "nv_report_add_remove() hot plugged"));
5536		nv_port_state_change(nvp, SATA_EVNT_DEVICE_ATTACHED,
5537		    SATA_ADDR_CPORT, 0);
5538	}
5539}
5540
5541
5542/*
5543 * Get request sense data and stuff it the command's sense buffer.
5544 * Start a request sense command in order to get sense data to insert
5545 * in the sata packet's rqsense buffer.  The command completion
5546 * processing is in nv_intr_pkt_pio.
5547 *
5548 * The sata framework provides a function to allocate and set-up a
5549 * request sense packet command. The reasons it is not being used here is:
5550 * a) it cannot be called in an interrupt context and this function is
5551 *    called in an interrupt context.
5552 * b) it allocates DMA resources that are not used here because this is
5553 *    implemented using PIO.
5554 *
5555 * If, in the future, this is changed to use DMA, the sata framework should
5556 * be used to allocate and set-up the error retrieval (request sense)
5557 * command.
5558 */
5559static int
5560nv_start_rqsense_pio(nv_port_t *nvp, nv_slot_t *nv_slotp)
5561{
5562	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
5563	sata_cmd_t *satacmd = &spkt->satapkt_cmd;
5564	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
5565	int cdb_len = spkt->satapkt_cmd.satacmd_acdb_len;
5566
5567	NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
5568	    "nv_start_rqsense_pio: start"));
5569
5570	/* clear the local request sense buffer before starting the command */
5571	bzero(nv_slotp->nvslot_rqsense_buff, SATA_ATAPI_RQSENSE_LEN);
5572
5573	/* Write the request sense PACKET command */
5574
5575	/* select the drive */
5576	nv_put8(cmdhdl, nvp->nvp_drvhd, satacmd->satacmd_device_reg);
5577
5578	/* make certain the drive selected */
5579	if (nv_wait(nvp, SATA_STATUS_DRDY, SATA_STATUS_BSY,
5580	    NV_SEC2USEC(5), 0) == B_FALSE) {
5581		NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
5582		    "nv_start_rqsense_pio: drive select failed"));
5583		return (NV_FAILURE);
5584	}
5585
5586	/* set up the command */
5587	nv_put8(cmdhdl, nvp->nvp_feature, 0);	/* deassert DMA and OVL */
5588	nv_put8(cmdhdl, nvp->nvp_hcyl, SATA_ATAPI_MAX_BYTES_PER_DRQ >> 8);
5589	nv_put8(cmdhdl, nvp->nvp_lcyl, SATA_ATAPI_MAX_BYTES_PER_DRQ & 0xff);
5590	nv_put8(cmdhdl, nvp->nvp_sect, 0);
5591	nv_put8(cmdhdl, nvp->nvp_count, 0);	/* no tag */
5592
5593	/* initiate the command by writing the command register last */
5594	nv_put8(cmdhdl, nvp->nvp_cmd, SATAC_PACKET);
5595
5596	/* Give the host ctlr time to do its thing, according to ATA/ATAPI */
5597	NV_DELAY_NSEC(400);
5598
5599	/*
5600	 * Wait for the device to indicate that it is ready for the command
5601	 * ATAPI protocol state - HP0: Check_Status_A
5602	 */
5603
5604	if (nv_wait3(nvp, SATA_STATUS_DRQ, SATA_STATUS_BSY, /* okay */
5605	    SATA_STATUS_ERR, SATA_STATUS_BSY, /* cmd failed */
5606	    SATA_STATUS_DF, SATA_STATUS_BSY, /* drive failed */
5607	    4000000, 0) == B_FALSE) {
5608		if (nv_get8(cmdhdl, nvp->nvp_status) &
5609		    (SATA_STATUS_ERR | SATA_STATUS_DF)) {
5610			NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
5611			    "nv_start_rqsense_pio: rqsense dev error (HP0)"));
5612		} else {
5613			NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
5614			    "nv_start_rqsense_pio: rqsense timeout (HP0)"));
5615		}
5616
5617		nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
5618		nv_complete_io(nvp, spkt, 0);
5619		nv_reset(nvp);
5620
5621		return (NV_FAILURE);
5622	}
5623
5624	/*
5625	 * Put the ATAPI command in the data register
5626	 * ATAPI protocol state - HP1: Send_Packet
5627	 */
5628
5629	ddi_rep_put16(cmdhdl, (ushort_t *)nv_rqsense_cdb,
5630	    (ushort_t *)nvp->nvp_data,
5631	    (cdb_len >> 1), DDI_DEV_NO_AUTOINCR);
5632
5633	NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
5634	    "nv_start_rqsense_pio: exiting into HP3"));
5635
5636	return (NV_SUCCESS);
5637}
5638
5639
5640#ifdef SGPIO_SUPPORT
5641/*
5642 * NVIDIA specific SGPIO LED support
5643 * Please refer to the NVIDIA documentation for additional details
5644 */
5645
5646/*
5647 * nv_sgp_led_init
5648 * Detect SGPIO support.  If present, initialize.
5649 */
5650static void
5651nv_sgp_led_init(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle)
5652{
5653	uint16_t csrp;		/* SGPIO_CSRP from PCI config space */
5654	uint32_t cbp;		/* SGPIO_CBP from PCI config space */
5655	nv_sgp_cmn_t *cmn;	/* shared data structure */
5656	char tqname[SGPIO_TQ_NAME_LEN];
5657	extern caddr_t psm_map_phys_new(paddr_t, size_t, int);
5658
5659	/*
5660	 * Initialize with appropriately invalid values in case this function
5661	 * exits without initializing SGPIO (for example, there is no SGPIO
5662	 * support).
5663	 */
5664	nvc->nvc_sgp_csr = 0;
5665	nvc->nvc_sgp_cbp = NULL;
5666
5667	/*
5668	 * CK804 can pass the sgpio_detect test even though it does not support
5669	 * SGPIO, so don't even look at a CK804.
5670	 */
5671	if (nvc->nvc_mcp55_flag != B_TRUE)
5672		return;
5673
5674	/*
5675	 * The NVIDIA SGPIO support can nominally handle 6 drives.
5676	 * However, the current implementation only supports 4 drives.
5677	 * With two drives per controller, that means only look at the
5678	 * first two controllers.
5679	 */
5680	if ((nvc->nvc_ctlr_num != 0) && (nvc->nvc_ctlr_num != 1))
5681		return;
5682
5683	/* confirm that the SGPIO registers are there */
5684	if (nv_sgp_detect(pci_conf_handle, &csrp, &cbp) != NV_SUCCESS) {
5685		NVLOG((NVDBG_INIT, nvc, NULL,
5686		    "SGPIO registers not detected"));
5687		return;
5688	}
5689
5690	/* save off the SGPIO_CSR I/O address */
5691	nvc->nvc_sgp_csr = csrp;
5692
5693	/* map in Command Block */
5694	nvc->nvc_sgp_cbp = (nv_sgp_cb_t *)psm_map_phys_new(cbp,
5695	    sizeof (nv_sgp_cb_t), PROT_READ | PROT_WRITE);
5696
5697	/* initialize the SGPIO h/w */
5698	if (nv_sgp_init(nvc) == NV_FAILURE) {
5699		nv_cmn_err(CE_WARN, nvc, NULL,
5700		    "!Unable to initialize SGPIO");
5701	}
5702
5703	if (nvc->nvc_ctlr_num == 0) {
5704		/*
5705		 * Controller 0 on the MCP55/IO55 initialized the SGPIO
5706		 * and the data that is shared between the controllers.
5707		 * The clever thing to do would be to let the first controller
5708		 * that comes up be the one that initializes all this.
5709		 * However, SGPIO state is not necessarily zeroed between
5710		 * between OS reboots, so there might be old data there.
5711		 */
5712
5713		/* allocate shared space */
5714		cmn = (nv_sgp_cmn_t *)kmem_zalloc(sizeof (nv_sgp_cmn_t),
5715		    KM_SLEEP);
5716		if (cmn == NULL) {
5717			nv_cmn_err(CE_WARN, nvc, NULL,
5718			    "!Failed to allocate shared data");
5719			return;
5720		}
5721
5722		nvc->nvc_sgp_cmn = cmn;
5723
5724		/* initialize the shared data structure */
5725		cmn->nvs_magic = SGPIO_MAGIC;
5726		cmn->nvs_in_use = (1 << nvc->nvc_ctlr_num);
5727		cmn->nvs_connected = 0;
5728		cmn->nvs_activity = 0;
5729
5730		mutex_init(&cmn->nvs_slock, NULL, MUTEX_DRIVER, NULL);
5731		mutex_init(&cmn->nvs_tlock, NULL, MUTEX_DRIVER, NULL);
5732		cv_init(&cmn->nvs_cv, NULL, CV_DRIVER, NULL);
5733
5734		/* put the address in the SGPIO scratch register */
5735#if defined(__amd64)
5736		nvc->nvc_sgp_cbp->sgpio_sr = (uint64_t)cmn;
5737#else
5738		nvc->nvc_sgp_cbp->sgpio_sr = (uint32_t)cmn;
5739#endif
5740
5741		/* start the activity LED taskq */
5742
5743		/*
5744		 * The taskq name should be unique and the time
5745		 */
5746		(void) snprintf(tqname, SGPIO_TQ_NAME_LEN,
5747		    "nvSataLed%x", (short)(ddi_get_lbolt() & 0xffff));
5748		cmn->nvs_taskq = ddi_taskq_create(nvc->nvc_dip, tqname, 1,
5749		    TASKQ_DEFAULTPRI, 0);
5750		if (cmn->nvs_taskq == NULL) {
5751			cmn->nvs_taskq_delay = 0;
5752			nv_cmn_err(CE_WARN, nvc, NULL,
5753			    "!Failed to start activity LED taskq");
5754		} else {
5755			cmn->nvs_taskq_delay = SGPIO_LOOP_WAIT_USECS;
5756			(void) ddi_taskq_dispatch(cmn->nvs_taskq,
5757			    nv_sgp_activity_led_ctl, nvc, DDI_SLEEP);
5758		}
5759
5760	} else if (nvc->nvc_ctlr_num == 1) {
5761		/*
5762		 * Controller 1 confirms that SGPIO has been initialized
5763		 * and, if so, try to get the shared data pointer, otherwise
5764		 * get the shared data pointer when accessing the data.
5765		 */
5766
5767		if (nvc->nvc_sgp_cbp->sgpio_sr != 0) {
5768			cmn = (nv_sgp_cmn_t *)nvc->nvc_sgp_cbp->sgpio_sr;
5769
5770			/*
5771			 * It looks like a pointer, but is it the shared data?
5772			 */
5773			if (cmn->nvs_magic == SGPIO_MAGIC) {
5774				nvc->nvc_sgp_cmn = cmn;
5775
5776				cmn->nvs_in_use |= (1 << nvc->nvc_ctlr_num);
5777			}
5778		}
5779	}
5780}
5781
5782/*
5783 * nv_sgp_detect
5784 * Read the SGPIO_CSR and SGPIO_CBP values from PCI config space and
5785 * report back whether both were readable.
5786 */
5787static int
5788nv_sgp_detect(ddi_acc_handle_t pci_conf_handle, uint16_t *csrpp,
5789    uint32_t *cbpp)
5790{
5791	/* get the SGPIO_CSRP */
5792	*csrpp = pci_config_get16(pci_conf_handle, SGPIO_CSRP);
5793	if (*csrpp == 0) {
5794		return (NV_FAILURE);
5795	}
5796
5797	/* SGPIO_CSRP is good, get the SGPIO_CBP */
5798	*cbpp = pci_config_get32(pci_conf_handle, SGPIO_CBP);
5799	if (*cbpp == 0) {
5800		return (NV_FAILURE);
5801	}
5802
5803	/* SGPIO_CBP is good, so we must support SGPIO */
5804	return (NV_SUCCESS);
5805}
5806
5807/*
5808 * nv_sgp_init
5809 * Initialize SGPIO.  The process is specified by NVIDIA.
5810 */
5811static int
5812nv_sgp_init(nv_ctl_t *nvc)
5813{
5814	uint32_t status;
5815	int drive_count;
5816
5817	/*
5818	 * if SGPIO status set to SGPIO_STATE_RESET, logic has been
5819	 * reset and needs to be initialized.
5820	 */
5821	status = nv_sgp_csr_read(nvc);
5822	if (SGPIO_CSR_SSTAT(status) == SGPIO_STATE_RESET) {
5823		if (nv_sgp_init_cmd(nvc) == NV_FAILURE) {
5824			/* reset and try again */
5825			nv_sgp_reset(nvc);
5826			if (nv_sgp_init_cmd(nvc) == NV_FAILURE) {
5827				NVLOG((NVDBG_ALWAYS, nvc, NULL,
5828				    "SGPIO init failed"));
5829				return (NV_FAILURE);
5830			}
5831		}
5832	}
5833
5834	/*
5835	 * NVIDIA recommends reading the supported drive count even
5836	 * though they also indicate that it is 4 at this time.
5837	 */
5838	drive_count = SGP_CR0_DRV_CNT(nvc->nvc_sgp_cbp->sgpio_cr0);
5839	if (drive_count != SGPIO_DRV_CNT_VALUE) {
5840		NVLOG((NVDBG_ALWAYS, nvc, NULL,
5841		    "SGPIO reported undocumented drive count - %d",
5842		    drive_count));
5843	}
5844
5845	NVLOG((NVDBG_INIT, nvc, NULL,
5846	    "initialized ctlr: %d csr: 0x%08x",
5847	    nvc->nvc_ctlr_num, nvc->nvc_sgp_csr));
5848
5849	return (NV_SUCCESS);
5850}
5851
5852static void
5853nv_sgp_reset(nv_ctl_t *nvc)
5854{
5855	uint32_t cmd;
5856	uint32_t status;
5857
5858	cmd = SGPIO_CMD_RESET;
5859	nv_sgp_csr_write(nvc, cmd);
5860
5861	status = nv_sgp_csr_read(nvc);
5862
5863	if (SGPIO_CSR_CSTAT(status) != SGPIO_CMD_OK) {
5864		NVLOG((NVDBG_ALWAYS, nvc, NULL,
5865		    "SGPIO reset failed: CSR - 0x%x", status));
5866	}
5867}
5868
5869static int
5870nv_sgp_init_cmd(nv_ctl_t *nvc)
5871{
5872	int seq;
5873	hrtime_t start, end;
5874	uint32_t status;
5875	uint32_t cmd;
5876
5877	/* get the old sequence value */
5878	status = nv_sgp_csr_read(nvc);
5879	seq = SGPIO_CSR_SEQ(status);
5880
5881	/* check the state since we have the info anyway */
5882	if (SGPIO_CSR_SSTAT(status) != SGPIO_STATE_OPERATIONAL) {
5883		NVLOG((NVDBG_ALWAYS, nvc, NULL,
5884		    "SGPIO init_cmd: state not operational"));
5885	}
5886
5887	/* issue command */
5888	cmd = SGPIO_CSR_CMD_SET(SGPIO_CMD_READ_PARAMS);
5889	nv_sgp_csr_write(nvc, cmd);
5890
5891	DTRACE_PROBE2(sgpio__cmd, int, cmd, int, status);
5892
5893	/* poll for completion */
5894	start = gethrtime();
5895	end = start + NV_SGP_CMD_TIMEOUT;
5896	for (;;) {
5897		status = nv_sgp_csr_read(nvc);
5898
5899		/* break on error */
5900		if (SGPIO_CSR_CSTAT(status) == SGPIO_CMD_ERROR)
5901			break;
5902
5903		/* break on command completion (seq changed) */
5904		if (SGPIO_CSR_SEQ(status) != seq) {
5905			if (SGPIO_CSR_CSTAT(status) == SGPIO_CMD_ACTIVE) {
5906				NVLOG((NVDBG_ALWAYS, nvc, NULL,
5907				    "Seq changed but command still active"));
5908			}
5909
5910			break;
5911		}
5912
5913		/* Wait 400 ns and try again */
5914		NV_DELAY_NSEC(400);
5915
5916		if (gethrtime() > end)
5917			break;
5918	}
5919
5920	if (SGPIO_CSR_CSTAT(status) == SGPIO_CMD_OK)
5921		return (NV_SUCCESS);
5922
5923	return (NV_FAILURE);
5924}
5925
5926static int
5927nv_sgp_check_set_cmn(nv_ctl_t *nvc)
5928{
5929	nv_sgp_cmn_t *cmn;
5930
5931	if (nvc->nvc_sgp_cbp == NULL)
5932		return (NV_FAILURE);
5933
5934	/* check to see if Scratch Register is set */
5935	if (nvc->nvc_sgp_cbp->sgpio_sr != 0) {
5936		nvc->nvc_sgp_cmn =
5937		    (nv_sgp_cmn_t *)nvc->nvc_sgp_cbp->sgpio_sr;
5938
5939		if (nvc->nvc_sgp_cmn->nvs_magic != SGPIO_MAGIC)
5940			return (NV_FAILURE);
5941
5942		cmn = nvc->nvc_sgp_cmn;
5943
5944		mutex_enter(&cmn->nvs_slock);
5945		cmn->nvs_in_use |= (1 << nvc->nvc_ctlr_num);
5946		mutex_exit(&cmn->nvs_slock);
5947
5948		return (NV_SUCCESS);
5949	}
5950
5951	return (NV_FAILURE);
5952}
5953
5954/*
5955 * nv_sgp_csr_read
5956 * This is just a 32-bit port read from the value that was obtained from the
5957 * PCI config space.
5958 *
5959 * XXX It was advised to use the in[bwl] function for this, even though they
5960 * are obsolete interfaces.
5961 */
5962static int
5963nv_sgp_csr_read(nv_ctl_t *nvc)
5964{
5965	return (inl(nvc->nvc_sgp_csr));
5966}
5967
5968/*
5969 * nv_sgp_csr_write
5970 * This is just a 32-bit I/O port write.  The port number was obtained from
5971 * the PCI config space.
5972 *
5973 * XXX It was advised to use the out[bwl] function for this, even though they
5974 * are obsolete interfaces.
5975 */
5976static void
5977nv_sgp_csr_write(nv_ctl_t *nvc, uint32_t val)
5978{
5979	outl(nvc->nvc_sgp_csr, val);
5980}
5981
5982/*
5983 * nv_sgp_write_data
5984 * Cause SGPIO to send Command Block data
5985 */
5986static int
5987nv_sgp_write_data(nv_ctl_t *nvc)
5988{
5989	hrtime_t start, end;
5990	uint32_t status;
5991	uint32_t cmd;
5992
5993	/* issue command */
5994	cmd = SGPIO_CSR_CMD_SET(SGPIO_CMD_WRITE_DATA);
5995	nv_sgp_csr_write(nvc, cmd);
5996
5997	/* poll for completion */
5998	start = gethrtime();
5999	end = start + NV_SGP_CMD_TIMEOUT;
6000	for (;;) {
6001		status = nv_sgp_csr_read(nvc);
6002
6003		/* break on error completion */
6004		if (SGPIO_CSR_CSTAT(status) == SGPIO_CMD_ERROR)
6005			break;
6006
6007		/* break on successful completion */
6008		if (SGPIO_CSR_CSTAT(status) == SGPIO_CMD_OK)
6009			break;
6010
6011		/* Wait 400 ns and try again */
6012		NV_DELAY_NSEC(400);
6013
6014		if (gethrtime() > end)
6015			break;
6016	}
6017
6018	if (SGPIO_CSR_CSTAT(status) == SGPIO_CMD_OK)
6019		return (NV_SUCCESS);
6020
6021	return (NV_FAILURE);
6022}
6023
6024/*
6025 * nv_sgp_activity_led_ctl
6026 * This is run as a taskq.  It wakes up at a fixed interval and checks to
6027 * see if any of the activity LEDs need to be changed.
6028 */
6029static void
6030nv_sgp_activity_led_ctl(void *arg)
6031{
6032	nv_ctl_t *nvc = (nv_ctl_t *)arg;
6033	nv_sgp_cmn_t *cmn;
6034	volatile nv_sgp_cb_t *cbp;
6035	clock_t ticks;
6036	uint8_t drv_leds;
6037	uint32_t old_leds;
6038	uint32_t new_led_state;
6039	int i;
6040
6041	cmn = nvc->nvc_sgp_cmn;
6042	cbp = nvc->nvc_sgp_cbp;
6043
6044	do {
6045		/* save off the old state of all of the LEDs */
6046		old_leds = cbp->sgpio0_tr;
6047
6048		DTRACE_PROBE3(sgpio__activity__state,
6049		    int, cmn->nvs_connected, int, cmn->nvs_activity,
6050		    int, old_leds);
6051
6052		new_led_state = 0;
6053
6054		/* for each drive */
6055		for (i = 0; i < SGPIO_DRV_CNT_VALUE; i++) {
6056
6057			/* get the current state of the LEDs for the drive */
6058			drv_leds = SGPIO0_TR_DRV(old_leds, i);
6059
6060			if ((cmn->nvs_connected & (1 << i)) == 0) {
6061				/* if not connected, turn off activity */
6062				drv_leds &= ~TR_ACTIVE_MASK;
6063				drv_leds |= TR_ACTIVE_SET(TR_ACTIVE_DISABLE);
6064
6065				new_led_state &= SGPIO0_TR_DRV_CLR(i);
6066				new_led_state |=
6067				    SGPIO0_TR_DRV_SET(drv_leds, i);
6068
6069				continue;
6070			}
6071
6072			if ((cmn->nvs_activity & (1 << i)) == 0) {
6073				/* connected, but not active */
6074				drv_leds &= ~TR_ACTIVE_MASK;
6075				drv_leds |= TR_ACTIVE_SET(TR_ACTIVE_ENABLE);
6076
6077				new_led_state &= SGPIO0_TR_DRV_CLR(i);
6078				new_led_state |=
6079				    SGPIO0_TR_DRV_SET(drv_leds, i);
6080
6081				continue;
6082			}
6083
6084			/* connected and active */
6085			if (TR_ACTIVE(drv_leds) == TR_ACTIVE_ENABLE) {
6086				/* was enabled, so disable */
6087				drv_leds &= ~TR_ACTIVE_MASK;
6088				drv_leds |=
6089				    TR_ACTIVE_SET(TR_ACTIVE_DISABLE);
6090
6091				new_led_state &= SGPIO0_TR_DRV_CLR(i);
6092				new_led_state |=
6093				    SGPIO0_TR_DRV_SET(drv_leds, i);
6094			} else {
6095				/* was disabled, so enable */
6096				drv_leds &= ~TR_ACTIVE_MASK;
6097				drv_leds |= TR_ACTIVE_SET(TR_ACTIVE_ENABLE);
6098
6099				new_led_state &= SGPIO0_TR_DRV_CLR(i);
6100				new_led_state |=
6101				    SGPIO0_TR_DRV_SET(drv_leds, i);
6102			}
6103
6104			/*
6105			 * clear the activity bit
6106			 * if there is drive activity again within the
6107			 * loop interval (now 1/16 second), nvs_activity
6108			 * will be reset and the "connected and active"
6109			 * condition above will cause the LED to blink
6110			 * off and on at the loop interval rate.  The
6111			 * rate may be increased (interval shortened) as
6112			 * long as it is not more than 1/30 second.
6113			 */
6114			mutex_enter(&cmn->nvs_slock);
6115			cmn->nvs_activity &= ~(1 << i);
6116			mutex_exit(&cmn->nvs_slock);
6117		}
6118
6119		DTRACE_PROBE1(sgpio__new__led__state, int, new_led_state);
6120
6121		/* write out LED values */
6122
6123		mutex_enter(&cmn->nvs_slock);
6124		cbp->sgpio0_tr &= ~TR_ACTIVE_MASK_ALL;
6125		cbp->sgpio0_tr |= new_led_state;
6126		cbp->sgpio_cr0 = SGP_CR0_ENABLE_MASK;
6127		mutex_exit(&cmn->nvs_slock);
6128
6129		if (nv_sgp_write_data(nvc) == NV_FAILURE) {
6130			NVLOG((NVDBG_VERBOSE, nvc, NULL,
6131			    "nv_sgp_write_data failure updating active LED"));
6132		}
6133
6134		/* now rest for the interval */
6135		mutex_enter(&cmn->nvs_tlock);
6136		ticks = drv_usectohz(cmn->nvs_taskq_delay);
6137		if (ticks > 0)
6138			(void) cv_timedwait(&cmn->nvs_cv, &cmn->nvs_tlock,
6139			    ddi_get_lbolt() + ticks);
6140		mutex_exit(&cmn->nvs_tlock);
6141	} while (ticks > 0);
6142}
6143
6144/*
6145 * nv_sgp_drive_connect
6146 * Set the flag used to indicate that the drive is attached to the HBA.
6147 * Used to let the taskq know that it should turn the Activity LED on.
6148 */
6149static void
6150nv_sgp_drive_connect(nv_ctl_t *nvc, int drive)
6151{
6152	nv_sgp_cmn_t *cmn;
6153
6154	if (nv_sgp_check_set_cmn(nvc) == NV_FAILURE)
6155		return;
6156	cmn = nvc->nvc_sgp_cmn;
6157
6158	mutex_enter(&cmn->nvs_slock);
6159	cmn->nvs_connected |= (1 << drive);
6160	mutex_exit(&cmn->nvs_slock);
6161}
6162
6163/*
6164 * nv_sgp_drive_disconnect
6165 * Clears the flag used to indicate that the drive is no longer attached
6166 * to the HBA.  Used to let the taskq know that it should turn the
6167 * Activity LED off.  The flag that indicates that the drive is in use is
6168 * also cleared.
6169 */
6170static void
6171nv_sgp_drive_disconnect(nv_ctl_t *nvc, int drive)
6172{
6173	nv_sgp_cmn_t *cmn;
6174
6175	if (nv_sgp_check_set_cmn(nvc) == NV_FAILURE)
6176		return;
6177	cmn = nvc->nvc_sgp_cmn;
6178
6179	mutex_enter(&cmn->nvs_slock);
6180	cmn->nvs_connected &= ~(1 << drive);
6181	cmn->nvs_activity &= ~(1 << drive);
6182	mutex_exit(&cmn->nvs_slock);
6183}
6184
6185/*
6186 * nv_sgp_drive_active
6187 * Sets the flag used to indicate that the drive has been accessed and the
6188 * LED should be flicked off, then on.  It is cleared at a fixed time
6189 * interval by the LED taskq and set by the sata command start.
6190 */
6191static void
6192nv_sgp_drive_active(nv_ctl_t *nvc, int drive)
6193{
6194	nv_sgp_cmn_t *cmn;
6195
6196	if (nv_sgp_check_set_cmn(nvc) == NV_FAILURE)
6197		return;
6198	cmn = nvc->nvc_sgp_cmn;
6199
6200	DTRACE_PROBE1(sgpio__active, int, drive);
6201
6202	mutex_enter(&cmn->nvs_slock);
6203	cmn->nvs_connected |= (1 << drive);
6204	cmn->nvs_activity |= (1 << drive);
6205	mutex_exit(&cmn->nvs_slock);
6206}
6207
6208
6209/*
6210 * nv_sgp_locate
6211 * Turns the Locate/OK2RM LED off or on for a particular drive.  State is
6212 * maintained in the SGPIO Command Block.
6213 */
6214static void
6215nv_sgp_locate(nv_ctl_t *nvc, int drive, int value)
6216{
6217	uint8_t leds;
6218	volatile nv_sgp_cb_t *cb = nvc->nvc_sgp_cbp;
6219	nv_sgp_cmn_t *cmn;
6220
6221	if (nv_sgp_check_set_cmn(nvc) == NV_FAILURE)
6222		return;
6223	cmn = nvc->nvc_sgp_cmn;
6224
6225	if ((drive < 0) || (drive >= SGPIO_DRV_CNT_VALUE))
6226		return;
6227
6228	DTRACE_PROBE2(sgpio__locate, int, drive, int, value);
6229
6230	mutex_enter(&cmn->nvs_slock);
6231
6232	leds = SGPIO0_TR_DRV(cb->sgpio0_tr, drive);
6233
6234	leds &= ~TR_LOCATE_MASK;
6235	leds |= TR_LOCATE_SET(value);
6236
6237	cb->sgpio0_tr &= SGPIO0_TR_DRV_CLR(drive);
6238	cb->sgpio0_tr |= SGPIO0_TR_DRV_SET(leds, drive);
6239
6240	cb->sgpio_cr0 = SGP_CR0_ENABLE_MASK;
6241
6242	mutex_exit(&cmn->nvs_slock);
6243
6244	if (nv_sgp_write_data(nvc) == NV_FAILURE) {
6245		nv_cmn_err(CE_WARN, nvc, NULL,
6246		    "!nv_sgp_write_data failure updating OK2RM/Locate LED");
6247	}
6248}
6249
6250/*
6251 * nv_sgp_error
6252 * Turns the Error/Failure LED off or on for a particular drive.  State is
6253 * maintained in the SGPIO Command Block.
6254 */
6255static void
6256nv_sgp_error(nv_ctl_t *nvc, int drive, int value)
6257{
6258	uint8_t leds;
6259	volatile nv_sgp_cb_t *cb = nvc->nvc_sgp_cbp;
6260	nv_sgp_cmn_t *cmn;
6261
6262	if (nv_sgp_check_set_cmn(nvc) == NV_FAILURE)
6263		return;
6264	cmn = nvc->nvc_sgp_cmn;
6265
6266	if ((drive < 0) || (drive >= SGPIO_DRV_CNT_VALUE))
6267		return;
6268
6269	DTRACE_PROBE2(sgpio__error, int, drive, int, value);
6270
6271	mutex_enter(&cmn->nvs_slock);
6272
6273	leds = SGPIO0_TR_DRV(cb->sgpio0_tr, drive);
6274
6275	leds &= ~TR_ERROR_MASK;
6276	leds |= TR_ERROR_SET(value);
6277
6278	cb->sgpio0_tr &= SGPIO0_TR_DRV_CLR(drive);
6279	cb->sgpio0_tr |= SGPIO0_TR_DRV_SET(leds, drive);
6280
6281	cb->sgpio_cr0 = SGP_CR0_ENABLE_MASK;
6282
6283	mutex_exit(&cmn->nvs_slock);
6284
6285	if (nv_sgp_write_data(nvc) == NV_FAILURE) {
6286		nv_cmn_err(CE_WARN, nvc, NULL,
6287		    "!nv_sgp_write_data failure updating Fail/Error LED");
6288	}
6289}
6290
6291static void
6292nv_sgp_cleanup(nv_ctl_t *nvc)
6293{
6294	int drive;
6295	uint8_t drv_leds;
6296	uint32_t led_state;
6297	volatile nv_sgp_cb_t *cb = nvc->nvc_sgp_cbp;
6298	nv_sgp_cmn_t *cmn = nvc->nvc_sgp_cmn;
6299	extern void psm_unmap_phys(caddr_t, size_t);
6300
6301	/*
6302	 * If the SGPIO command block isn't mapped or the shared data
6303	 * structure isn't present in this instance, there isn't much that
6304	 * can be cleaned up.
6305	 */
6306	if ((cb == NULL) || (cmn == NULL))
6307		return;
6308
6309	/* turn off activity LEDs for this controller */
6310	drv_leds = TR_ACTIVE_SET(TR_ACTIVE_DISABLE);
6311
6312	/* get the existing LED state */
6313	led_state = cb->sgpio0_tr;
6314
6315	/* turn off port 0 */
6316	drive = SGP_CTLR_PORT_TO_DRV(nvc->nvc_ctlr_num, 0);
6317	led_state &= SGPIO0_TR_DRV_CLR(drive);
6318	led_state |= SGPIO0_TR_DRV_SET(drv_leds, drive);
6319
6320	/* turn off port 1 */
6321	drive = SGP_CTLR_PORT_TO_DRV(nvc->nvc_ctlr_num, 1);
6322	led_state &= SGPIO0_TR_DRV_CLR(drive);
6323	led_state |= SGPIO0_TR_DRV_SET(drv_leds, drive);
6324
6325	/* set the new led state, which should turn off this ctrl's LEDs */
6326	cb->sgpio_cr0 = SGP_CR0_ENABLE_MASK;
6327	(void) nv_sgp_write_data(nvc);
6328
6329	/* clear the controller's in use bit */
6330	mutex_enter(&cmn->nvs_slock);
6331	cmn->nvs_in_use &= ~(1 << nvc->nvc_ctlr_num);
6332	mutex_exit(&cmn->nvs_slock);
6333
6334	if (cmn->nvs_in_use == 0) {
6335		/* if all "in use" bits cleared, take everything down */
6336
6337		if (cmn->nvs_taskq != NULL) {
6338			/* allow activity taskq to exit */
6339			cmn->nvs_taskq_delay = 0;
6340			cv_broadcast(&cmn->nvs_cv);
6341
6342			/* then destroy it */
6343			ddi_taskq_destroy(cmn->nvs_taskq);
6344		}
6345
6346		/* turn off all of the LEDs */
6347		cb->sgpio0_tr = 0;
6348		cb->sgpio_cr0 = SGP_CR0_ENABLE_MASK;
6349		(void) nv_sgp_write_data(nvc);
6350
6351		cb->sgpio_sr = NULL;
6352
6353		/* free resources */
6354		cv_destroy(&cmn->nvs_cv);
6355		mutex_destroy(&cmn->nvs_tlock);
6356		mutex_destroy(&cmn->nvs_slock);
6357
6358		kmem_free(nvc->nvc_sgp_cmn, sizeof (nv_sgp_cmn_t));
6359	}
6360
6361	nvc->nvc_sgp_cmn = NULL;
6362
6363	/* unmap the SGPIO Command Block */
6364	psm_unmap_phys((caddr_t)nvc->nvc_sgp_cbp, sizeof (nv_sgp_cb_t));
6365}
6366#endif	/* SGPIO_SUPPORT */
6367