nv_sata.c revision 7824:c3bde0414354
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22/*
23 * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
24 * Use is subject to license terms.
25 */
26
27/*
28 *
29 * nv_sata is a combo SATA HBA driver for ck804/mcp55 based chipsets.
30 *
31 * NCQ
32 * ---
33 *
34 * A portion of the NCQ is in place, but is incomplete.  NCQ is disabled
35 * and is likely to be revisited in the future.
36 *
37 *
38 * Power Management
39 * ----------------
40 *
41 * Normally power management would be responsible for ensuring the device
42 * is quiescent and then changing power states to the device, such as
43 * powering down parts or all of the device.  mcp55/ck804 is unique in
44 * that it is only available as part of a larger southbridge chipset, so
45 * removing power to the device isn't possible.  Switches to control
46 * power management states D0/D3 in the PCI configuration space appear to
47 * be supported but changes to these states are apparently are ignored.
48 * The only further PM that the driver _could_ do is shut down the PHY,
49 * but in order to deliver the first rev of the driver sooner than later,
50 * that will be deferred until some future phase.
51 *
52 * Since the driver currently will not directly change any power state to
53 * the device, no power() entry point will be required.  However, it is
54 * possible that in ACPI power state S3, aka suspend to RAM, that power
55 * can be removed to the device, and the driver cannot rely on BIOS to
56 * have reset any state.  For the time being, there is no known
57 * non-default configurations that need to be programmed.  This judgement
58 * is based on the port of the legacy ata driver not having any such
59 * functionality and based on conversations with the PM team.  If such a
60 * restoration is later deemed necessary it can be incorporated into the
61 * DDI_RESUME processing.
62 *
63 */
64
65#include <sys/scsi/scsi.h>
66#include <sys/pci.h>
67#include <sys/byteorder.h>
68#include <sys/sunddi.h>
69#include <sys/sata/sata_hba.h>
70#ifdef SGPIO_SUPPORT
71#include <sys/sata/adapters/nv_sata/nv_sgpio.h>
72#include <sys/devctl.h>
73#include <sys/sdt.h>
74#endif
75#include <sys/sata/adapters/nv_sata/nv_sata.h>
76#include <sys/disp.h>
77#include <sys/note.h>
78#include <sys/promif.h>
79
80
81/*
82 * Function prototypes for driver entry points
83 */
84static int nv_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
85static int nv_detach(dev_info_t *dip, ddi_detach_cmd_t cmd);
86static int nv_quiesce(dev_info_t *dip);
87static int nv_getinfo(dev_info_t *dip, ddi_info_cmd_t infocmd,
88    void *arg, void **result);
89
90/*
91 * Function prototypes for entry points from sata service module
92 * These functions are distinguished from other local functions
93 * by the prefix "nv_sata_"
94 */
95static int nv_sata_start(dev_info_t *dip, sata_pkt_t *spkt);
96static int nv_sata_abort(dev_info_t *dip, sata_pkt_t *spkt, int);
97static int nv_sata_reset(dev_info_t *dip, sata_device_t *sd);
98static int nv_sata_activate(dev_info_t *dip, sata_device_t *sd);
99static int nv_sata_deactivate(dev_info_t *dip, sata_device_t *sd);
100
101/*
102 * Local function prototypes
103 */
104static uint_t mcp55_intr(caddr_t arg1, caddr_t arg2);
105static uint_t mcp04_intr(caddr_t arg1, caddr_t arg2);
106static int nv_add_legacy_intrs(nv_ctl_t *nvc);
107#ifdef NV_MSI_SUPPORTED
108static int nv_add_msi_intrs(nv_ctl_t *nvc);
109#endif
110static void nv_rem_intrs(nv_ctl_t *nvc);
111static int nv_start_common(nv_port_t *nvp, sata_pkt_t *spkt);
112static int nv_start_nodata(nv_port_t *nvp, int slot);
113static void nv_intr_nodata(nv_port_t *nvp, nv_slot_t *spkt);
114static int nv_start_pio_in(nv_port_t *nvp, int slot);
115static int nv_start_pio_out(nv_port_t *nvp, int slot);
116static void nv_intr_pio_in(nv_port_t *nvp, nv_slot_t *spkt);
117static void nv_intr_pio_out(nv_port_t *nvp, nv_slot_t *spkt);
118static int nv_start_pkt_pio(nv_port_t *nvp, int slot);
119static void nv_intr_pkt_pio(nv_port_t *nvp, nv_slot_t *nv_slotp);
120static int nv_start_dma(nv_port_t *nvp, int slot);
121static void nv_intr_dma(nv_port_t *nvp, struct nv_slot *spkt);
122static void nv_log(uint_t flag, nv_ctl_t *nvc, nv_port_t *nvp, char *fmt, ...);
123static void nv_uninit_ctl(nv_ctl_t *nvc);
124static void mcp55_reg_init(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle);
125static void mcp04_reg_init(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle);
126static void nv_uninit_port(nv_port_t *nvp);
127static int nv_init_port(nv_port_t *nvp);
128static int nv_init_ctl(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle);
129static int mcp55_packet_complete_intr(nv_ctl_t *nvc, nv_port_t *nvp);
130#ifdef NCQ
131static int mcp55_dma_setup_intr(nv_ctl_t *nvc, nv_port_t *nvp);
132#endif
133static void nv_start_dma_engine(nv_port_t *nvp, int slot);
134static void nv_port_state_change(nv_port_t *nvp, int event, uint8_t addr_type,
135    int state);
136static boolean_t nv_check_link(uint32_t sstatus);
137static void nv_common_reg_init(nv_ctl_t *nvc);
138static void mcp04_intr_process(nv_ctl_t *nvc, uint8_t intr_status);
139static void nv_reset(nv_port_t *nvp);
140static void nv_complete_io(nv_port_t *nvp,  sata_pkt_t *spkt, int slot);
141static void nv_timeout(void *);
142static int nv_poll_wait(nv_port_t *nvp, sata_pkt_t *spkt);
143static void nv_cmn_err(int ce, nv_ctl_t *nvc, nv_port_t *nvp, char *fmt, ...);
144static void nv_read_signature(nv_port_t *nvp);
145static void mcp55_set_intr(nv_port_t *nvp, int flag);
146static void mcp04_set_intr(nv_port_t *nvp, int flag);
147static void nv_resume(nv_port_t *nvp);
148static void nv_suspend(nv_port_t *nvp);
149static int nv_start_sync(nv_port_t *nvp, sata_pkt_t *spkt);
150static int nv_abort_active(nv_port_t *nvp, sata_pkt_t *spkt, int abort_reason);
151static void nv_copy_registers(nv_port_t *nvp, sata_device_t *sd,
152    sata_pkt_t *spkt);
153static void nv_report_add_remove(nv_port_t *nvp, int flags);
154static int nv_start_async(nv_port_t *nvp, sata_pkt_t *spkt);
155static int nv_wait3(nv_port_t *nvp, uchar_t onbits1, uchar_t offbits1,
156    uchar_t failure_onbits2, uchar_t failure_offbits2,
157    uchar_t failure_onbits3, uchar_t failure_offbits3,
158    uint_t timeout_usec, int type_wait);
159static int nv_wait(nv_port_t *nvp, uchar_t onbits, uchar_t offbits,
160    uint_t timeout_usec, int type_wait);
161static int nv_start_rqsense_pio(nv_port_t *nvp, nv_slot_t *nv_slotp);
162
163#ifdef SGPIO_SUPPORT
164static int nv_open(dev_t *devp, int flag, int otyp, cred_t *credp);
165static int nv_close(dev_t dev, int flag, int otyp, cred_t *credp);
166static int nv_ioctl(dev_t dev, int cmd, intptr_t arg, int mode,
167    cred_t *credp, int *rvalp);
168
169static void nv_sgp_led_init(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle);
170static int nv_sgp_detect(ddi_acc_handle_t pci_conf_handle, uint16_t *csrpp,
171    uint32_t *cbpp);
172static int nv_sgp_init(nv_ctl_t *nvc);
173static void nv_sgp_reset(nv_ctl_t *nvc);
174static int nv_sgp_init_cmd(nv_ctl_t *nvc);
175static int nv_sgp_check_set_cmn(nv_ctl_t *nvc);
176static int nv_sgp_csr_read(nv_ctl_t *nvc);
177static void nv_sgp_csr_write(nv_ctl_t *nvc, uint32_t val);
178static int nv_sgp_write_data(nv_ctl_t *nvc);
179static void nv_sgp_activity_led_ctl(void *arg);
180static void nv_sgp_drive_connect(nv_ctl_t *nvc, int drive);
181static void nv_sgp_drive_disconnect(nv_ctl_t *nvc, int drive);
182static void nv_sgp_drive_active(nv_ctl_t *nvc, int drive);
183static void nv_sgp_locate(nv_ctl_t *nvc, int drive, int value);
184static void nv_sgp_error(nv_ctl_t *nvc, int drive, int value);
185static void nv_sgp_cleanup(nv_ctl_t *nvc);
186#endif
187
188
189/*
190 * DMA attributes for the data buffer for x86.  dma_attr_burstsizes is unused.
191 * Verify if needed if ported to other ISA.
192 */
193static ddi_dma_attr_t buffer_dma_attr = {
194	DMA_ATTR_V0,		/* dma_attr_version */
195	0,			/* dma_attr_addr_lo: lowest bus address */
196	0xffffffffull,		/* dma_attr_addr_hi: */
197	NV_BM_64K_BOUNDARY - 1,	/* dma_attr_count_max i.e for one cookie */
198	4,			/* dma_attr_align */
199	1,			/* dma_attr_burstsizes. */
200	1,			/* dma_attr_minxfer */
201	0xffffffffull,		/* dma_attr_max xfer including all cookies */
202	0xffffffffull,		/* dma_attr_seg */
203	NV_DMA_NSEGS,		/* dma_attr_sgllen */
204	512,			/* dma_attr_granular */
205	0,			/* dma_attr_flags */
206};
207
208
209/*
210 * DMA attributes for PRD tables
211 */
212ddi_dma_attr_t nv_prd_dma_attr = {
213	DMA_ATTR_V0,		/* dma_attr_version */
214	0,			/* dma_attr_addr_lo */
215	0xffffffffull,		/* dma_attr_addr_hi */
216	NV_BM_64K_BOUNDARY - 1,	/* dma_attr_count_max */
217	4,			/* dma_attr_align */
218	1,			/* dma_attr_burstsizes */
219	1,			/* dma_attr_minxfer */
220	NV_BM_64K_BOUNDARY,	/* dma_attr_maxxfer */
221	NV_BM_64K_BOUNDARY - 1,	/* dma_attr_seg */
222	1,			/* dma_attr_sgllen */
223	1,			/* dma_attr_granular */
224	0			/* dma_attr_flags */
225};
226
227/*
228 * Device access attributes
229 */
230static ddi_device_acc_attr_t accattr = {
231    DDI_DEVICE_ATTR_V0,
232    DDI_STRUCTURE_LE_ACC,
233    DDI_STRICTORDER_ACC
234};
235
236
237#ifdef SGPIO_SUPPORT
238static struct cb_ops nv_cb_ops = {
239	nv_open,		/* open */
240	nv_close,		/* close */
241	nodev,			/* strategy (block) */
242	nodev,			/* print (block) */
243	nodev,			/* dump (block) */
244	nodev,			/* read */
245	nodev,			/* write */
246	nv_ioctl,		/* ioctl */
247	nodev,			/* devmap */
248	nodev,			/* mmap */
249	nodev,			/* segmap */
250	nochpoll,		/* chpoll */
251	ddi_prop_op,		/* prop_op */
252	NULL,			/* streams */
253	D_NEW | D_MP |
254	D_64BIT | D_HOTPLUG,	/* flags */
255	CB_REV			/* rev */
256};
257#endif  /* SGPIO_SUPPORT */
258
259
260static struct dev_ops nv_dev_ops = {
261	DEVO_REV,		/* devo_rev */
262	0,			/* refcnt  */
263	nv_getinfo,		/* info */
264	nulldev,		/* identify */
265	nulldev,		/* probe */
266	nv_attach,		/* attach */
267	nv_detach,		/* detach */
268	nodev,			/* no reset */
269#ifdef SGPIO_SUPPORT
270	&nv_cb_ops,		/* driver operations */
271#else
272	(struct cb_ops *)0,	/* driver operations */
273#endif
274	NULL,			/* bus operations */
275	NULL,			/* power */
276	nv_quiesce		/* quiesce */
277};
278
279
280/*
281 * Request Sense CDB for ATAPI
282 */
283static const uint8_t nv_rqsense_cdb[16] = {
284	SCMD_REQUEST_SENSE,
285	0,
286	0,
287	0,
288	SATA_ATAPI_MIN_RQSENSE_LEN,
289	0,
290	0, 0, 0, 0, 0, 0, 0, 0, 0, 0	/* pad out to max CDB length */
291};
292
293
294static sata_tran_hotplug_ops_t nv_hotplug_ops;
295
296extern struct mod_ops mod_driverops;
297
298static  struct modldrv modldrv = {
299	&mod_driverops,	/* driverops */
300	"Nvidia ck804/mcp55 HBA",
301	&nv_dev_ops,	/* driver ops */
302};
303
304static  struct modlinkage modlinkage = {
305	MODREV_1,
306	&modldrv,
307	NULL
308};
309
310
311/*
312 * wait between checks of reg status
313 */
314int nv_usec_delay = NV_WAIT_REG_CHECK;
315
316/*
317 * The following is needed for nv_vcmn_err()
318 */
319static kmutex_t nv_log_mutex; /* protects nv_log_buf */
320static char nv_log_buf[NV_STRING_512];
321int nv_debug_flags = NVDBG_ALWAYS;
322int nv_log_to_console = B_FALSE;
323
324int nv_log_delay = 0;
325int nv_prom_print = B_FALSE;
326
327/*
328 * for debugging
329 */
330#ifdef DEBUG
331int ncq_commands = 0;
332int non_ncq_commands = 0;
333#endif
334
335/*
336 * Opaque state pointer to be initialized by ddi_soft_state_init()
337 */
338static void *nv_statep	= NULL;
339
340
341static sata_tran_hotplug_ops_t nv_hotplug_ops = {
342	SATA_TRAN_HOTPLUG_OPS_REV_1,	/* structure version */
343	nv_sata_activate,	/* activate port. cfgadm -c connect */
344	nv_sata_deactivate	/* deactivate port. cfgadm -c disconnect */
345};
346
347
348/*
349 *  nv module initialization
350 */
351int
352_init(void)
353{
354	int	error;
355
356	error = ddi_soft_state_init(&nv_statep, sizeof (nv_ctl_t), 0);
357
358	if (error != 0) {
359
360		return (error);
361	}
362
363	mutex_init(&nv_log_mutex, NULL, MUTEX_DRIVER, NULL);
364
365	if ((error = sata_hba_init(&modlinkage)) != 0) {
366		ddi_soft_state_fini(&nv_statep);
367		mutex_destroy(&nv_log_mutex);
368
369		return (error);
370	}
371
372	error = mod_install(&modlinkage);
373	if (error != 0) {
374		sata_hba_fini(&modlinkage);
375		ddi_soft_state_fini(&nv_statep);
376		mutex_destroy(&nv_log_mutex);
377
378		return (error);
379	}
380
381	return (error);
382}
383
384
385/*
386 * nv module uninitialize
387 */
388int
389_fini(void)
390{
391	int	error;
392
393	error = mod_remove(&modlinkage);
394
395	if (error != 0) {
396		return (error);
397	}
398
399	/*
400	 * remove the resources allocated in _init()
401	 */
402	mutex_destroy(&nv_log_mutex);
403	sata_hba_fini(&modlinkage);
404	ddi_soft_state_fini(&nv_statep);
405
406	return (error);
407}
408
409
410/*
411 * nv _info entry point
412 */
413int
414_info(struct modinfo *modinfop)
415{
416	return (mod_info(&modlinkage, modinfop));
417}
418
419
420/*
421 * these wrappers for ddi_{get,put}8 are for observability
422 * with dtrace
423 */
424#ifdef DEBUG
425
426static void
427nv_put8(ddi_acc_handle_t handle, uint8_t *dev_addr, uint8_t value)
428{
429	ddi_put8(handle, dev_addr, value);
430}
431
432static void
433nv_put32(ddi_acc_handle_t handle, uint32_t *dev_addr, uint32_t value)
434{
435	ddi_put32(handle, dev_addr, value);
436}
437
438static uint32_t
439nv_get32(ddi_acc_handle_t handle, uint32_t *dev_addr)
440{
441	return (ddi_get32(handle, dev_addr));
442}
443
444static void
445nv_put16(ddi_acc_handle_t handle, uint16_t *dev_addr, uint16_t value)
446{
447	ddi_put16(handle, dev_addr, value);
448}
449
450static uint16_t
451nv_get16(ddi_acc_handle_t handle, uint16_t *dev_addr)
452{
453	return (ddi_get16(handle, dev_addr));
454}
455
456static uint8_t
457nv_get8(ddi_acc_handle_t handle, uint8_t *dev_addr)
458{
459	return (ddi_get8(handle, dev_addr));
460}
461
462#else
463
464#define	nv_put8 ddi_put8
465#define	nv_put32 ddi_put32
466#define	nv_get32 ddi_get32
467#define	nv_put16 ddi_put16
468#define	nv_get16 ddi_get16
469#define	nv_get8 ddi_get8
470
471#endif
472
473
474/*
475 * Driver attach
476 */
477static int
478nv_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
479{
480	int status, attach_state, intr_types, bar, i, command;
481	int inst = ddi_get_instance(dip);
482	ddi_acc_handle_t pci_conf_handle;
483	nv_ctl_t *nvc;
484	uint8_t subclass;
485	uint32_t reg32;
486#ifdef SGPIO_SUPPORT
487	pci_regspec_t *regs;
488	int rlen;
489#endif
490
491	switch (cmd) {
492
493	case DDI_ATTACH:
494
495		NVLOG((NVDBG_INIT, NULL, NULL,
496		    "nv_attach(): DDI_ATTACH inst %d", inst));
497
498		attach_state = ATTACH_PROGRESS_NONE;
499
500		status = ddi_soft_state_zalloc(nv_statep, inst);
501
502		if (status != DDI_SUCCESS) {
503			break;
504		}
505
506		nvc = ddi_get_soft_state(nv_statep, inst);
507
508		nvc->nvc_dip = dip;
509
510		attach_state |= ATTACH_PROGRESS_STATEP_ALLOC;
511
512		if (pci_config_setup(dip, &pci_conf_handle) == DDI_SUCCESS) {
513			nvc->nvc_revid = pci_config_get8(pci_conf_handle,
514			    PCI_CONF_REVID);
515			NVLOG((NVDBG_INIT, NULL, NULL,
516			    "inst %d: silicon revid is %x nv_debug_flags=%x",
517			    inst, nvc->nvc_revid, nv_debug_flags));
518		} else {
519			break;
520		}
521
522		attach_state |= ATTACH_PROGRESS_CONF_HANDLE;
523
524		/*
525		 * If a device is attached after a suspend/resume, sometimes
526		 * the command register is zero, as it might not be set by
527		 * BIOS or a parent.  Set it again here.
528		 */
529		command = pci_config_get16(pci_conf_handle, PCI_CONF_COMM);
530
531		if (command == 0) {
532			cmn_err(CE_WARN, "nv_sata%d: restoring PCI command"
533			    " register", inst);
534			pci_config_put16(pci_conf_handle, PCI_CONF_COMM,
535			    PCI_COMM_IO|PCI_COMM_MAE|PCI_COMM_ME);
536		}
537
538		subclass = pci_config_get8(pci_conf_handle, PCI_CONF_SUBCLASS);
539
540		if (subclass & PCI_MASS_RAID) {
541			cmn_err(CE_WARN,
542			    "attach failed: RAID mode not supported");
543			break;
544		}
545
546		/*
547		 * the 6 bars of the controller are:
548		 * 0: port 0 task file
549		 * 1: port 0 status
550		 * 2: port 1 task file
551		 * 3: port 1 status
552		 * 4: bus master for both ports
553		 * 5: extended registers for SATA features
554		 */
555		for (bar = 0; bar < 6; bar++) {
556			status = ddi_regs_map_setup(dip, bar + 1,
557			    (caddr_t *)&nvc->nvc_bar_addr[bar], 0, 0, &accattr,
558			    &nvc->nvc_bar_hdl[bar]);
559
560			if (status != DDI_SUCCESS) {
561				NVLOG((NVDBG_INIT, nvc, NULL,
562				    "ddi_regs_map_setup failure for bar"
563				    " %d status = %d", bar, status));
564				break;
565			}
566		}
567
568		attach_state |= ATTACH_PROGRESS_BARS;
569
570		/*
571		 * initialize controller and driver core
572		 */
573		status = nv_init_ctl(nvc, pci_conf_handle);
574
575		if (status == NV_FAILURE) {
576			NVLOG((NVDBG_INIT, nvc, NULL, "nv_init_ctl failed"));
577
578			break;
579		}
580
581		attach_state |= ATTACH_PROGRESS_CTL_SETUP;
582
583		/*
584		 * initialize mutexes
585		 */
586		mutex_init(&nvc->nvc_mutex, NULL, MUTEX_DRIVER,
587		    DDI_INTR_PRI(nvc->nvc_intr_pri));
588
589		attach_state |= ATTACH_PROGRESS_MUTEX_INIT;
590
591		/*
592		 * get supported interrupt types
593		 */
594		if (ddi_intr_get_supported_types(dip, &intr_types) !=
595		    DDI_SUCCESS) {
596			nv_cmn_err(CE_WARN, nvc, NULL,
597			    "!ddi_intr_get_supported_types failed");
598			NVLOG((NVDBG_INIT, nvc, NULL,
599			    "interrupt supported types failed"));
600
601			break;
602		}
603
604		NVLOG((NVDBG_INIT, nvc, NULL,
605		    "ddi_intr_get_supported_types() returned: 0x%x",
606		    intr_types));
607
608#ifdef NV_MSI_SUPPORTED
609		if (intr_types & DDI_INTR_TYPE_MSI) {
610			NVLOG((NVDBG_INIT, nvc, NULL,
611			    "using MSI interrupt type"));
612
613			/*
614			 * Try MSI first, but fall back to legacy if MSI
615			 * attach fails
616			 */
617			if (nv_add_msi_intrs(nvc) == DDI_SUCCESS) {
618				nvc->nvc_intr_type = DDI_INTR_TYPE_MSI;
619				attach_state |= ATTACH_PROGRESS_INTR_ADDED;
620				NVLOG((NVDBG_INIT, nvc, NULL,
621				    "MSI interrupt setup done"));
622			} else {
623				nv_cmn_err(CE_CONT, nvc, NULL,
624				    "!MSI registration failed "
625				    "will try Legacy interrupts");
626			}
627		}
628#endif
629
630		/*
631		 * Either the MSI interrupt setup has failed or only
632		 * the fixed interrupts are available on the system.
633		 */
634		if (!(attach_state & ATTACH_PROGRESS_INTR_ADDED) &&
635		    (intr_types & DDI_INTR_TYPE_FIXED)) {
636
637			NVLOG((NVDBG_INIT, nvc, NULL,
638			    "using Legacy interrupt type"));
639
640			if (nv_add_legacy_intrs(nvc) == DDI_SUCCESS) {
641				nvc->nvc_intr_type = DDI_INTR_TYPE_FIXED;
642				attach_state |= ATTACH_PROGRESS_INTR_ADDED;
643				NVLOG((NVDBG_INIT, nvc, NULL,
644				    "Legacy interrupt setup done"));
645			} else {
646				nv_cmn_err(CE_WARN, nvc, NULL,
647				    "!legacy interrupt setup failed");
648				NVLOG((NVDBG_INIT, nvc, NULL,
649				    "legacy interrupt setup failed"));
650				break;
651			}
652		}
653
654		if (!(attach_state & ATTACH_PROGRESS_INTR_ADDED)) {
655			NVLOG((NVDBG_INIT, nvc, NULL,
656			    "no interrupts registered"));
657			break;
658		}
659
660#ifdef SGPIO_SUPPORT
661		/*
662		 * save off the controller number
663		 */
664		(void) ddi_getlongprop(DDI_DEV_T_NONE, dip, DDI_PROP_DONTPASS,
665		    "reg", (caddr_t)&regs, &rlen);
666		nvc->nvc_ctlr_num = PCI_REG_FUNC_G(regs->pci_phys_hi);
667		kmem_free(regs, rlen);
668
669		/*
670		 * initialize SGPIO
671		 */
672		nv_sgp_led_init(nvc, pci_conf_handle);
673#endif	/* SGPIO_SUPPORT */
674
675		/*
676		 * attach to sata module
677		 */
678		if (sata_hba_attach(nvc->nvc_dip,
679		    &nvc->nvc_sata_hba_tran,
680		    DDI_ATTACH) != DDI_SUCCESS) {
681			attach_state |= ATTACH_PROGRESS_SATA_MODULE;
682
683			break;
684		}
685
686		pci_config_teardown(&pci_conf_handle);
687
688		NVLOG((NVDBG_INIT, nvc, NULL, "nv_attach DDI_SUCCESS"));
689
690		return (DDI_SUCCESS);
691
692	case DDI_RESUME:
693
694		nvc = ddi_get_soft_state(nv_statep, inst);
695
696		NVLOG((NVDBG_INIT, nvc, NULL,
697		    "nv_attach(): DDI_RESUME inst %d", inst));
698
699		if (pci_config_setup(dip, &pci_conf_handle) != DDI_SUCCESS) {
700			return (DDI_FAILURE);
701		}
702
703		/*
704		 * If a device is attached after a suspend/resume, sometimes
705		 * the command register is zero, as it might not be set by
706		 * BIOS or a parent.  Set it again here.
707		 */
708		command = pci_config_get16(pci_conf_handle, PCI_CONF_COMM);
709
710		if (command == 0) {
711			pci_config_put16(pci_conf_handle, PCI_CONF_COMM,
712			    PCI_COMM_IO|PCI_COMM_MAE|PCI_COMM_ME);
713		}
714
715		/*
716		 * Need to set bit 2 to 1 at config offset 0x50
717		 * to enable access to the bar5 registers.
718		 */
719		reg32 = pci_config_get32(pci_conf_handle, NV_SATA_CFG_20);
720
721		if ((reg32 & NV_BAR5_SPACE_EN) != NV_BAR5_SPACE_EN) {
722			pci_config_put32(pci_conf_handle, NV_SATA_CFG_20,
723			    reg32 | NV_BAR5_SPACE_EN);
724		}
725
726		nvc->nvc_state &= ~NV_CTRL_SUSPEND;
727
728		for (i = 0; i < NV_MAX_PORTS(nvc); i++) {
729			nv_resume(&(nvc->nvc_port[i]));
730		}
731
732		pci_config_teardown(&pci_conf_handle);
733
734		return (DDI_SUCCESS);
735
736	default:
737		return (DDI_FAILURE);
738	}
739
740
741	/*
742	 * DDI_ATTACH failure path starts here
743	 */
744
745	if (attach_state & ATTACH_PROGRESS_INTR_ADDED) {
746		nv_rem_intrs(nvc);
747	}
748
749	if (attach_state & ATTACH_PROGRESS_SATA_MODULE) {
750		/*
751		 * Remove timers
752		 */
753		int port = 0;
754		nv_port_t *nvp;
755
756		for (; port < NV_MAX_PORTS(nvc); port++) {
757			nvp = &(nvc->nvc_port[port]);
758			if (nvp->nvp_timeout_id != 0) {
759				(void) untimeout(nvp->nvp_timeout_id);
760			}
761		}
762	}
763
764	if (attach_state & ATTACH_PROGRESS_MUTEX_INIT) {
765		mutex_destroy(&nvc->nvc_mutex);
766	}
767
768	if (attach_state & ATTACH_PROGRESS_CTL_SETUP) {
769		nv_uninit_ctl(nvc);
770	}
771
772	if (attach_state & ATTACH_PROGRESS_BARS) {
773		while (--bar >= 0) {
774			ddi_regs_map_free(&nvc->nvc_bar_hdl[bar]);
775		}
776	}
777
778	if (attach_state & ATTACH_PROGRESS_STATEP_ALLOC) {
779		ddi_soft_state_free(nv_statep, inst);
780	}
781
782	if (attach_state & ATTACH_PROGRESS_CONF_HANDLE) {
783		pci_config_teardown(&pci_conf_handle);
784	}
785
786	cmn_err(CE_WARN, "nv_sata%d attach failed", inst);
787
788	return (DDI_FAILURE);
789}
790
791
792static int
793nv_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
794{
795	int i, port, inst = ddi_get_instance(dip);
796	nv_ctl_t *nvc;
797	nv_port_t *nvp;
798
799	nvc = ddi_get_soft_state(nv_statep, inst);
800
801	switch (cmd) {
802
803	case DDI_DETACH:
804
805		NVLOG((NVDBG_INIT, nvc, NULL, "nv_detach: DDI_DETACH"));
806
807		/*
808		 * Remove interrupts
809		 */
810		nv_rem_intrs(nvc);
811
812		/*
813		 * Remove timers
814		 */
815		for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
816			nvp = &(nvc->nvc_port[port]);
817			if (nvp->nvp_timeout_id != 0) {
818				(void) untimeout(nvp->nvp_timeout_id);
819			}
820		}
821
822		/*
823		 * Remove maps
824		 */
825		for (i = 0; i < 6; i++) {
826			ddi_regs_map_free(&nvc->nvc_bar_hdl[i]);
827		}
828
829		/*
830		 * Destroy mutexes
831		 */
832		mutex_destroy(&nvc->nvc_mutex);
833
834		/*
835		 * Uninitialize the controller
836		 */
837		nv_uninit_ctl(nvc);
838
839#ifdef SGPIO_SUPPORT
840		/*
841		 * release SGPIO resources
842		 */
843		nv_sgp_cleanup(nvc);
844#endif
845
846		/*
847		 * unregister from the sata module
848		 */
849		(void) sata_hba_detach(nvc->nvc_dip, DDI_DETACH);
850
851		/*
852		 * Free soft state
853		 */
854		ddi_soft_state_free(nv_statep, inst);
855
856		return (DDI_SUCCESS);
857
858	case DDI_SUSPEND:
859		/*
860		 * The PM functions for suspend and resume are incomplete
861		 * and need additional work.  It may or may not work in
862		 * the current state.
863		 */
864		NVLOG((NVDBG_INIT, nvc, NULL, "nv_detach: DDI_SUSPEND"));
865
866		for (i = 0; i < NV_MAX_PORTS(nvc); i++) {
867			nv_suspend(&(nvc->nvc_port[i]));
868		}
869
870		nvc->nvc_state |= NV_CTRL_SUSPEND;
871
872		return (DDI_SUCCESS);
873
874	default:
875		return (DDI_FAILURE);
876	}
877}
878
879
880/*ARGSUSED*/
881static int
882nv_getinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
883{
884	nv_ctl_t *nvc;
885	int instance;
886	dev_t dev;
887
888	dev = (dev_t)arg;
889	instance = getminor(dev);
890
891	switch (infocmd) {
892	case DDI_INFO_DEVT2DEVINFO:
893		nvc = ddi_get_soft_state(nv_statep,  instance);
894		if (nvc != NULL) {
895			*result = nvc->nvc_dip;
896			return (DDI_SUCCESS);
897		} else {
898			*result = NULL;
899			return (DDI_FAILURE);
900		}
901	case DDI_INFO_DEVT2INSTANCE:
902		*(int *)result = instance;
903		break;
904	default:
905		break;
906	}
907	return (DDI_SUCCESS);
908}
909
910
911#ifdef SGPIO_SUPPORT
912/* ARGSUSED */
913static int
914nv_open(dev_t *devp, int flag, int otyp, cred_t *credp)
915{
916	nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, getminor(*devp));
917
918	if (nvc == NULL) {
919		return (ENXIO);
920	}
921
922	return (0);
923}
924
925
926/* ARGSUSED */
927static int
928nv_close(dev_t dev, int flag, int otyp, cred_t *credp)
929{
930	return (0);
931}
932
933
934/* ARGSUSED */
935static int
936nv_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp, int *rvalp)
937{
938	nv_ctl_t *nvc;
939	int inst;
940	int status;
941	int ctlr, port;
942	int drive;
943	uint8_t curr_led;
944	struct dc_led_ctl led;
945
946	inst = getminor(dev);
947	if (inst == -1) {
948		return (EBADF);
949	}
950
951	nvc = ddi_get_soft_state(nv_statep, inst);
952	if (nvc == NULL) {
953		return (EBADF);
954	}
955
956	if ((nvc->nvc_sgp_cbp == NULL) || (nvc->nvc_sgp_cmn == NULL)) {
957		return (EIO);
958	}
959
960	switch (cmd) {
961	case DEVCTL_SET_LED:
962		status = ddi_copyin((void *)arg, &led,
963		    sizeof (struct dc_led_ctl), mode);
964		if (status != 0)
965			return (EFAULT);
966
967		/*
968		 * Since only the first two controller currently support
969		 * SGPIO (as per NVIDIA docs), this code will as well.
970		 * Note that this validate the port value within led_state
971		 * as well.
972		 */
973
974		ctlr = SGP_DRV_TO_CTLR(led.led_number);
975		if ((ctlr != 0) && (ctlr != 1))
976			return (ENXIO);
977
978		if ((led.led_state & DCL_STATE_FAST_BLNK) ||
979		    (led.led_state & DCL_STATE_SLOW_BLNK)) {
980			return (EINVAL);
981		}
982
983		drive = led.led_number;
984
985		if ((led.led_ctl_active == DCL_CNTRL_OFF) ||
986		    (led.led_state == DCL_STATE_OFF)) {
987
988			if (led.led_type == DCL_TYPE_DEVICE_FAIL) {
989				nv_sgp_error(nvc, drive, TR_ERROR_DISABLE);
990			} else if (led.led_type == DCL_TYPE_DEVICE_OK2RM) {
991				nv_sgp_locate(nvc, drive, TR_LOCATE_DISABLE);
992			} else {
993				return (ENXIO);
994			}
995
996			port = SGP_DRV_TO_PORT(led.led_number);
997			nvc->nvc_port[port].nvp_sgp_ioctl_mod |= led.led_type;
998		}
999
1000		if (led.led_ctl_active == DCL_CNTRL_ON) {
1001			if (led.led_type == DCL_TYPE_DEVICE_FAIL) {
1002				nv_sgp_error(nvc, drive, TR_ERROR_ENABLE);
1003			} else if (led.led_type == DCL_TYPE_DEVICE_OK2RM) {
1004				nv_sgp_locate(nvc, drive, TR_LOCATE_ENABLE);
1005			} else {
1006				return (ENXIO);
1007			}
1008
1009			port = SGP_DRV_TO_PORT(led.led_number);
1010			nvc->nvc_port[port].nvp_sgp_ioctl_mod |= led.led_type;
1011		}
1012
1013		break;
1014
1015	case DEVCTL_GET_LED:
1016		status = ddi_copyin((void *)arg, &led,
1017		    sizeof (struct dc_led_ctl), mode);
1018		if (status != 0)
1019			return (EFAULT);
1020
1021		/*
1022		 * Since only the first two controller currently support
1023		 * SGPIO (as per NVIDIA docs), this code will as well.
1024		 * Note that this validate the port value within led_state
1025		 * as well.
1026		 */
1027
1028		ctlr = SGP_DRV_TO_CTLR(led.led_number);
1029		if ((ctlr != 0) && (ctlr != 1))
1030			return (ENXIO);
1031
1032		curr_led = SGPIO0_TR_DRV(nvc->nvc_sgp_cbp->sgpio0_tr,
1033		    led.led_number);
1034
1035		port = SGP_DRV_TO_PORT(led.led_number);
1036		if (nvc->nvc_port[port].nvp_sgp_ioctl_mod & led.led_type) {
1037			led.led_ctl_active = DCL_CNTRL_ON;
1038
1039			if (led.led_type == DCL_TYPE_DEVICE_FAIL) {
1040				if (TR_ERROR(curr_led) == TR_ERROR_DISABLE)
1041					led.led_state = DCL_STATE_OFF;
1042				else
1043					led.led_state = DCL_STATE_ON;
1044			} else if (led.led_type == DCL_TYPE_DEVICE_OK2RM) {
1045				if (TR_LOCATE(curr_led) == TR_LOCATE_DISABLE)
1046					led.led_state = DCL_STATE_OFF;
1047				else
1048					led.led_state = DCL_STATE_ON;
1049			} else {
1050				return (ENXIO);
1051			}
1052		} else {
1053			led.led_ctl_active = DCL_CNTRL_OFF;
1054			/*
1055			 * Not really off, but never set and no constant for
1056			 * tri-state
1057			 */
1058			led.led_state = DCL_STATE_OFF;
1059		}
1060
1061		status = ddi_copyout(&led, (void *)arg,
1062		    sizeof (struct dc_led_ctl), mode);
1063		if (status != 0)
1064			return (EFAULT);
1065
1066		break;
1067
1068	case DEVCTL_NUM_LEDS:
1069		led.led_number = SGPIO_DRV_CNT_VALUE;
1070		led.led_ctl_active = 1;
1071		led.led_type = 3;
1072
1073		/*
1074		 * According to documentation, NVIDIA SGPIO is supposed to
1075		 * support blinking, but it does not seem to work in practice.
1076		 */
1077		led.led_state = DCL_STATE_ON;
1078
1079		status = ddi_copyout(&led, (void *)arg,
1080		    sizeof (struct dc_led_ctl), mode);
1081		if (status != 0)
1082			return (EFAULT);
1083
1084		break;
1085
1086	default:
1087		return (EINVAL);
1088	}
1089
1090	return (0);
1091}
1092#endif	/* SGPIO_SUPPORT */
1093
1094
1095/*
1096 * Called by sata module to probe a port.  Port and device state
1097 * are not changed here... only reported back to the sata module.
1098 *
1099 * If probe confirms a device is present for the first time, it will
1100 * initiate a device reset, then probe will be called again and the
1101 * signature will be check.  If the signature is valid, data structures
1102 * will be initialized.
1103 */
1104static int
1105nv_sata_probe(dev_info_t *dip, sata_device_t *sd)
1106{
1107	nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip));
1108	uint8_t cport = sd->satadev_addr.cport;
1109	uint8_t pmport = sd->satadev_addr.pmport;
1110	uint8_t qual = sd->satadev_addr.qual;
1111	clock_t nv_lbolt = ddi_get_lbolt();
1112	nv_port_t *nvp;
1113
1114	if (cport >= NV_MAX_PORTS(nvc)) {
1115		sd->satadev_type = SATA_DTYPE_NONE;
1116		sd->satadev_state = SATA_STATE_UNKNOWN;
1117
1118		return (SATA_FAILURE);
1119	}
1120
1121	ASSERT(nvc->nvc_port != NULL);
1122	nvp = &(nvc->nvc_port[cport]);
1123	ASSERT(nvp != NULL);
1124
1125	NVLOG((NVDBG_PROBE, nvc, nvp,
1126	    "nv_sata_probe: enter cport: 0x%x, pmport: 0x%x, "
1127	    "qual: 0x%x", cport, pmport, qual));
1128
1129	mutex_enter(&nvp->nvp_mutex);
1130
1131	/*
1132	 * This check seems to be done in the SATA module.
1133	 * It may not be required here
1134	 */
1135	if (nvp->nvp_state & NV_PORT_INACTIVE) {
1136		nv_cmn_err(CE_WARN, nvc, nvp,
1137		    "port inactive.  Use cfgadm to activate");
1138		sd->satadev_type = SATA_DTYPE_UNKNOWN;
1139		sd->satadev_state = SATA_PSTATE_SHUTDOWN;
1140		mutex_exit(&nvp->nvp_mutex);
1141
1142		return (SATA_FAILURE);
1143	}
1144
1145	if (qual == SATA_ADDR_PMPORT) {
1146		sd->satadev_type = SATA_DTYPE_NONE;
1147		sd->satadev_state = SATA_STATE_UNKNOWN;
1148		mutex_exit(&nvp->nvp_mutex);
1149		nv_cmn_err(CE_WARN, nvc, nvp,
1150		    "controller does not support port multiplier");
1151
1152		return (SATA_FAILURE);
1153	}
1154
1155	sd->satadev_state = SATA_PSTATE_PWRON;
1156
1157	nv_copy_registers(nvp, sd, NULL);
1158
1159	/*
1160	 * determine link status
1161	 */
1162	if (nv_check_link(sd->satadev_scr.sstatus) == B_FALSE) {
1163		uint8_t det;
1164
1165		/*
1166		 * Reset will cause the link to go down for a short period of
1167		 * time.  If link is lost for less than 2 seconds ignore it
1168		 * so that the reset can progress.
1169		 */
1170		if (nvp->nvp_state & NV_PORT_RESET_PROBE) {
1171
1172			if (nvp->nvp_link_lost_time == 0) {
1173				nvp->nvp_link_lost_time = nv_lbolt;
1174			}
1175
1176			if (TICK_TO_SEC(nv_lbolt -
1177			    nvp->nvp_link_lost_time) < NV_LINK_LOST_OK) {
1178				NVLOG((NVDBG_ALWAYS, nvp->nvp_ctlp, nvp,
1179				    "probe: intermittent link lost while"
1180				    " resetting"));
1181				/*
1182				 * fake status of link so that probe continues
1183				 */
1184				SSTATUS_SET_IPM(sd->satadev_scr.sstatus,
1185				    SSTATUS_IPM_ACTIVE);
1186				SSTATUS_SET_DET(sd->satadev_scr.sstatus,
1187				    SSTATUS_DET_DEVPRE_PHYCOM);
1188				sd->satadev_type = SATA_DTYPE_UNKNOWN;
1189				mutex_exit(&nvp->nvp_mutex);
1190
1191				return (SATA_SUCCESS);
1192			} else {
1193				nvp->nvp_state &=
1194				    ~(NV_PORT_RESET_PROBE|NV_PORT_RESET);
1195			}
1196		}
1197
1198		/*
1199		 * no link, so tear down port and abort all active packets
1200		 */
1201
1202		det = (sd->satadev_scr.sstatus & SSTATUS_DET) >>
1203		    SSTATUS_DET_SHIFT;
1204
1205		switch (det) {
1206		case SSTATUS_DET_NODEV:
1207		case SSTATUS_DET_PHYOFFLINE:
1208			sd->satadev_type = SATA_DTYPE_NONE;
1209			break;
1210		default:
1211			sd->satadev_type = SATA_DTYPE_UNKNOWN;
1212			break;
1213		}
1214
1215		NVLOG((NVDBG_PROBE, nvp->nvp_ctlp, nvp,
1216		    "probe: link lost invoking nv_abort_active"));
1217
1218		(void) nv_abort_active(nvp, NULL, SATA_PKT_TIMEOUT);
1219		nv_uninit_port(nvp);
1220
1221		mutex_exit(&nvp->nvp_mutex);
1222
1223		return (SATA_SUCCESS);
1224	} else {
1225		nvp->nvp_link_lost_time = 0;
1226	}
1227
1228	/*
1229	 * A device is present so clear hotremoved flag
1230	 */
1231	nvp->nvp_state &= ~NV_PORT_HOTREMOVED;
1232
1233#ifdef SGPIO_SUPPORT
1234	nv_sgp_drive_connect(nvp->nvp_ctlp, SGP_CTLR_PORT_TO_DRV(
1235	    nvp->nvp_ctlp->nvc_ctlr_num, nvp->nvp_port_num));
1236#endif
1237
1238	/*
1239	 * If the signature was acquired previously there is no need to
1240	 * do it again.
1241	 */
1242	if (nvp->nvp_signature != 0) {
1243		NVLOG((NVDBG_PROBE, nvp->nvp_ctlp, nvp,
1244		    "probe: signature acquired previously"));
1245		sd->satadev_type = nvp->nvp_type;
1246		mutex_exit(&nvp->nvp_mutex);
1247
1248		return (SATA_SUCCESS);
1249	}
1250
1251	/*
1252	 * If NV_PORT_RESET is not set, this is the first time through
1253	 * so perform reset and return.
1254	 */
1255	if ((nvp->nvp_state & NV_PORT_RESET) == 0) {
1256		NVLOG((NVDBG_PROBE, nvp->nvp_ctlp, nvp,
1257		    "probe: first reset to get sig"));
1258		nvp->nvp_state |= NV_PORT_RESET_PROBE;
1259		nv_reset(nvp);
1260		sd->satadev_type = nvp->nvp_type = SATA_DTYPE_UNKNOWN;
1261		nvp->nvp_probe_time = nv_lbolt;
1262		mutex_exit(&nvp->nvp_mutex);
1263
1264		return (SATA_SUCCESS);
1265	}
1266
1267	/*
1268	 * Reset was done previously.  see if the signature is
1269	 * available.
1270	 */
1271	nv_read_signature(nvp);
1272	sd->satadev_type = nvp->nvp_type;
1273
1274	/*
1275	 * Some drives may require additional resets to get a
1276	 * valid signature.  If a drive was not just powered up, the signature
1277	 * should arrive within half a second of reset.  Therefore if more
1278	 * than 5 seconds has elapsed while waiting for a signature, reset
1279	 * again.  These extra resets do not appear to create problems when
1280	 * the drive is spinning up for more than this reset period.
1281	 */
1282	if (nvp->nvp_signature == 0) {
1283		if (TICK_TO_SEC(nv_lbolt - nvp->nvp_reset_time) > 5) {
1284			NVLOG((NVDBG_PROBE, nvc, nvp, "additional reset"
1285			    " during signature acquisition"));
1286			nv_reset(nvp);
1287		}
1288
1289		mutex_exit(&nvp->nvp_mutex);
1290
1291		return (SATA_SUCCESS);
1292	}
1293
1294	NVLOG((NVDBG_PROBE, nvc, nvp, "signature acquired after %d ms",
1295	    TICK_TO_MSEC(nv_lbolt - nvp->nvp_probe_time)));
1296
1297	/*
1298	 * nv_sata only deals with ATA disks and ATAPI CD/DVDs so far.  If
1299	 * it is not either of those, then just return.
1300	 */
1301	if ((nvp->nvp_type != SATA_DTYPE_ATADISK) &&
1302	    (nvp->nvp_type != SATA_DTYPE_ATAPICD)) {
1303		NVLOG((NVDBG_PROBE, nvc, nvp, "Driver currently handles only"
1304		    " disks/CDs/DVDs.  Signature acquired was %X",
1305		    nvp->nvp_signature));
1306		mutex_exit(&nvp->nvp_mutex);
1307
1308		return (SATA_SUCCESS);
1309	}
1310
1311	/*
1312	 * make sure structures are initialized
1313	 */
1314	if (nv_init_port(nvp) == NV_SUCCESS) {
1315		NVLOG((NVDBG_PROBE, nvc, nvp,
1316		    "device detected and set up at port %d", cport));
1317		mutex_exit(&nvp->nvp_mutex);
1318
1319		return (SATA_SUCCESS);
1320	} else {
1321		nv_cmn_err(CE_WARN, nvc, nvp, "failed to set up data "
1322		    "structures for port %d", cport);
1323		mutex_exit(&nvp->nvp_mutex);
1324
1325		return (SATA_FAILURE);
1326	}
1327	/*NOTREACHED*/
1328}
1329
1330
1331/*
1332 * Called by sata module to start a new command.
1333 */
1334static int
1335nv_sata_start(dev_info_t *dip, sata_pkt_t *spkt)
1336{
1337	int cport = spkt->satapkt_device.satadev_addr.cport;
1338	nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip));
1339	nv_port_t *nvp = &(nvc->nvc_port[cport]);
1340	int ret;
1341
1342	NVLOG((NVDBG_ENTRY, nvc, nvp, "nv_sata_start: opmode: 0x%x cmd=%x",
1343	    spkt->satapkt_op_mode, spkt->satapkt_cmd.satacmd_cmd_reg));
1344
1345	mutex_enter(&nvp->nvp_mutex);
1346
1347	/*
1348	 * hotremoved is an intermediate state where the link was lost,
1349	 * but the hotplug event has not yet been processed by the sata
1350	 * module.  Fail the request.
1351	 */
1352	if (nvp->nvp_state & NV_PORT_HOTREMOVED) {
1353		spkt->satapkt_reason = SATA_PKT_PORT_ERROR;
1354		spkt->satapkt_device.satadev_state = SATA_STATE_UNKNOWN;
1355		NVLOG((NVDBG_ERRS, nvc, nvp,
1356		    "nv_sata_start: NV_PORT_HOTREMOVED"));
1357		nv_copy_registers(nvp, &spkt->satapkt_device, NULL);
1358		mutex_exit(&nvp->nvp_mutex);
1359
1360		return (SATA_TRAN_PORT_ERROR);
1361	}
1362
1363	if (nvp->nvp_state & NV_PORT_RESET) {
1364		NVLOG((NVDBG_ERRS, nvc, nvp,
1365		    "still waiting for reset completion"));
1366		spkt->satapkt_reason = SATA_PKT_BUSY;
1367		mutex_exit(&nvp->nvp_mutex);
1368
1369		/*
1370		 * If in panic, timeouts do not occur, so fake one
1371		 * so that the signature can be acquired to complete
1372		 * the reset handling.
1373		 */
1374		if (ddi_in_panic()) {
1375			nv_timeout(nvp);
1376		}
1377
1378		return (SATA_TRAN_BUSY);
1379	}
1380
1381	if (nvp->nvp_type == SATA_DTYPE_NONE) {
1382		spkt->satapkt_reason = SATA_PKT_PORT_ERROR;
1383		NVLOG((NVDBG_ERRS, nvc, nvp,
1384		    "nv_sata_start: SATA_DTYPE_NONE"));
1385		nv_copy_registers(nvp, &spkt->satapkt_device, NULL);
1386		mutex_exit(&nvp->nvp_mutex);
1387
1388		return (SATA_TRAN_PORT_ERROR);
1389	}
1390
1391	if (spkt->satapkt_device.satadev_type == SATA_DTYPE_PMULT) {
1392		ASSERT(nvp->nvp_type == SATA_DTYPE_PMULT);
1393		nv_cmn_err(CE_WARN, nvc, nvp,
1394		    "port multipliers not supported by controller");
1395		spkt->satapkt_reason = SATA_PKT_CMD_UNSUPPORTED;
1396		mutex_exit(&nvp->nvp_mutex);
1397
1398		return (SATA_TRAN_CMD_UNSUPPORTED);
1399	}
1400
1401	if ((nvp->nvp_state & NV_PORT_INIT) == 0) {
1402		spkt->satapkt_reason = SATA_PKT_PORT_ERROR;
1403		NVLOG((NVDBG_ERRS, nvc, nvp,
1404		    "nv_sata_start: port not yet initialized"));
1405		nv_copy_registers(nvp, &spkt->satapkt_device, NULL);
1406		mutex_exit(&nvp->nvp_mutex);
1407
1408		return (SATA_TRAN_PORT_ERROR);
1409	}
1410
1411	if (nvp->nvp_state & NV_PORT_INACTIVE) {
1412		spkt->satapkt_reason = SATA_PKT_PORT_ERROR;
1413		NVLOG((NVDBG_ERRS, nvc, nvp,
1414		    "nv_sata_start: NV_PORT_INACTIVE"));
1415		nv_copy_registers(nvp, &spkt->satapkt_device, NULL);
1416		mutex_exit(&nvp->nvp_mutex);
1417
1418		return (SATA_TRAN_PORT_ERROR);
1419	}
1420
1421	if (nvp->nvp_state & NV_PORT_FAILED) {
1422		spkt->satapkt_reason = SATA_PKT_PORT_ERROR;
1423		NVLOG((NVDBG_ERRS, nvc, nvp,
1424		    "nv_sata_start: NV_PORT_FAILED state"));
1425		nv_copy_registers(nvp, &spkt->satapkt_device, NULL);
1426		mutex_exit(&nvp->nvp_mutex);
1427
1428		return (SATA_TRAN_PORT_ERROR);
1429	}
1430
1431	/*
1432	 * after a device reset, and then when sata module restore processing
1433	 * is complete, the sata module will set sata_clear_dev_reset which
1434	 * indicates that restore processing has completed and normal
1435	 * non-restore related commands should be processed.
1436	 */
1437	if (spkt->satapkt_cmd.satacmd_flags.sata_clear_dev_reset) {
1438		nvp->nvp_state &= ~NV_PORT_RESTORE;
1439		NVLOG((NVDBG_ENTRY, nvc, nvp,
1440		    "nv_sata_start: clearing NV_PORT_RESTORE"));
1441	}
1442
1443	/*
1444	 * if the device was recently reset as indicated by NV_PORT_RESTORE,
1445	 * only allow commands which restore device state.  The sata module
1446	 * marks such commands with with sata_ignore_dev_reset.
1447	 *
1448	 * during coredump, nv_reset is called and but then the restore
1449	 * doesn't happen.  For now, workaround by ignoring the wait for
1450	 * restore if the system is panicing.
1451	 */
1452	if ((nvp->nvp_state & NV_PORT_RESTORE) &&
1453	    !(spkt->satapkt_cmd.satacmd_flags.sata_ignore_dev_reset) &&
1454	    (ddi_in_panic() == 0)) {
1455		spkt->satapkt_reason = SATA_PKT_BUSY;
1456		NVLOG((NVDBG_ENTRY, nvc, nvp,
1457		    "nv_sata_start: waiting for restore "));
1458		mutex_exit(&nvp->nvp_mutex);
1459
1460		return (SATA_TRAN_BUSY);
1461	}
1462
1463	if (nvp->nvp_state & NV_PORT_ABORTING) {
1464		spkt->satapkt_reason = SATA_PKT_BUSY;
1465		NVLOG((NVDBG_ERRS, nvc, nvp,
1466		    "nv_sata_start: NV_PORT_ABORTING"));
1467		mutex_exit(&nvp->nvp_mutex);
1468
1469		return (SATA_TRAN_BUSY);
1470	}
1471
1472	if (spkt->satapkt_op_mode &
1473	    (SATA_OPMODE_POLLING|SATA_OPMODE_SYNCH)) {
1474
1475		ret = nv_start_sync(nvp, spkt);
1476
1477		mutex_exit(&nvp->nvp_mutex);
1478
1479		return (ret);
1480	}
1481
1482	/*
1483	 * start command asynchronous command
1484	 */
1485	ret = nv_start_async(nvp, spkt);
1486
1487	mutex_exit(&nvp->nvp_mutex);
1488
1489	return (ret);
1490}
1491
1492
1493/*
1494 * SATA_OPMODE_POLLING implies the driver is in a
1495 * synchronous mode, and SATA_OPMODE_SYNCH is also set.
1496 * If only SATA_OPMODE_SYNCH is set, the driver can use
1497 * interrupts and sleep wait on a cv.
1498 *
1499 * If SATA_OPMODE_POLLING is set, the driver can't use
1500 * interrupts and must busy wait and simulate the
1501 * interrupts by waiting for BSY to be cleared.
1502 *
1503 * Synchronous mode has to return BUSY if there are
1504 * any other commands already on the drive.
1505 */
1506static int
1507nv_start_sync(nv_port_t *nvp, sata_pkt_t *spkt)
1508{
1509	nv_ctl_t *nvc = nvp->nvp_ctlp;
1510	int ret;
1511
1512	NVLOG((NVDBG_SYNC, nvp->nvp_ctlp, nvp, "nv_sata_satapkt_sync: entry"));
1513
1514	if (nvp->nvp_ncq_run != 0 || nvp->nvp_non_ncq_run != 0) {
1515		spkt->satapkt_reason = SATA_PKT_BUSY;
1516		NVLOG((NVDBG_SYNC, nvp->nvp_ctlp, nvp,
1517		    "nv_sata_satapkt_sync: device is busy, sync cmd rejected"
1518		    "ncq_run: %d non_ncq_run: %d  spkt: %p",
1519		    nvp->nvp_ncq_run, nvp->nvp_non_ncq_run,
1520		    (&(nvp->nvp_slot[0]))->nvslot_spkt));
1521
1522		return (SATA_TRAN_BUSY);
1523	}
1524
1525	/*
1526	 * if SYNC but not POLL, verify that this is not on interrupt thread.
1527	 */
1528	if (!(spkt->satapkt_op_mode & SATA_OPMODE_POLLING) &&
1529	    servicing_interrupt()) {
1530		spkt->satapkt_reason = SATA_PKT_BUSY;
1531		NVLOG((NVDBG_SYNC, nvp->nvp_ctlp, nvp,
1532		    "SYNC mode not allowed during interrupt"));
1533
1534		return (SATA_TRAN_BUSY);
1535
1536	}
1537
1538	/*
1539	 * disable interrupt generation if in polled mode
1540	 */
1541	if (spkt->satapkt_op_mode & SATA_OPMODE_POLLING) {
1542		(*(nvc->nvc_set_intr))(nvp, NV_INTR_DISABLE);
1543	}
1544
1545	if ((ret = nv_start_common(nvp, spkt)) != SATA_TRAN_ACCEPTED) {
1546		if (spkt->satapkt_op_mode & SATA_OPMODE_POLLING) {
1547			(*(nvc->nvc_set_intr))(nvp, NV_INTR_ENABLE);
1548		}
1549
1550		return (ret);
1551	}
1552
1553	if (spkt->satapkt_op_mode & SATA_OPMODE_POLLING) {
1554		mutex_exit(&nvp->nvp_mutex);
1555		ret = nv_poll_wait(nvp, spkt);
1556		mutex_enter(&nvp->nvp_mutex);
1557
1558		(*(nvc->nvc_set_intr))(nvp, NV_INTR_ENABLE);
1559
1560		NVLOG((NVDBG_SYNC, nvp->nvp_ctlp, nvp, "nv_sata_satapkt_sync:"
1561		    " done % reason %d", ret));
1562
1563		return (ret);
1564	}
1565
1566	/*
1567	 * non-polling synchronous mode handling.  The interrupt will signal
1568	 * when the IO is completed.
1569	 */
1570	cv_wait(&nvp->nvp_poll_cv, &nvp->nvp_mutex);
1571
1572	if (spkt->satapkt_reason != SATA_PKT_COMPLETED) {
1573
1574		spkt->satapkt_reason = SATA_PKT_TIMEOUT;
1575	}
1576
1577	NVLOG((NVDBG_SYNC, nvp->nvp_ctlp, nvp, "nv_sata_satapkt_sync:"
1578	    " done % reason %d", spkt->satapkt_reason));
1579
1580	return (SATA_TRAN_ACCEPTED);
1581}
1582
1583
1584static int
1585nv_poll_wait(nv_port_t *nvp, sata_pkt_t *spkt)
1586{
1587	int ret;
1588	nv_ctl_t *nvc = nvp->nvp_ctlp;
1589#if ! defined(__lock_lint)
1590	nv_slot_t *nv_slotp = &(nvp->nvp_slot[0]); /* not NCQ aware */
1591#endif
1592
1593	NVLOG((NVDBG_SYNC, nvc, nvp, "nv_poll_wait: enter"));
1594
1595	for (;;) {
1596
1597		NV_DELAY_NSEC(400);
1598
1599		NVLOG((NVDBG_SYNC, nvc, nvp, "nv_poll_wait: before nv_wait"));
1600		if (nv_wait(nvp, 0, SATA_STATUS_BSY,
1601		    NV_SEC2USEC(spkt->satapkt_time), NV_NOSLEEP) == B_FALSE) {
1602			mutex_enter(&nvp->nvp_mutex);
1603			spkt->satapkt_reason = SATA_PKT_TIMEOUT;
1604			nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
1605			nv_reset(nvp);
1606			nv_complete_io(nvp, spkt, 0);
1607			mutex_exit(&nvp->nvp_mutex);
1608			NVLOG((NVDBG_SYNC, nvc, nvp, "nv_poll_wait: "
1609			    "SATA_STATUS_BSY"));
1610
1611			return (SATA_TRAN_ACCEPTED);
1612		}
1613
1614		NVLOG((NVDBG_SYNC, nvc, nvp, "nv_poll_wait: before nvc_intr"));
1615
1616		/*
1617		 * Simulate interrupt.
1618		 */
1619		ret = (*(nvc->nvc_interrupt))((caddr_t)nvc, NULL);
1620		NVLOG((NVDBG_SYNC, nvc, nvp, "nv_poll_wait: after nvc_intr"));
1621
1622		if (ret != DDI_INTR_CLAIMED) {
1623			NVLOG((NVDBG_SYNC, nvc, nvp, "nv_poll_wait:"
1624			    " unclaimed -- resetting"));
1625			mutex_enter(&nvp->nvp_mutex);
1626			nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
1627			nv_reset(nvp);
1628			spkt->satapkt_reason = SATA_PKT_TIMEOUT;
1629			nv_complete_io(nvp, spkt, 0);
1630			mutex_exit(&nvp->nvp_mutex);
1631
1632			return (SATA_TRAN_ACCEPTED);
1633		}
1634
1635#if ! defined(__lock_lint)
1636		if (nv_slotp->nvslot_flags == NVSLOT_COMPLETE) {
1637			/*
1638			 * packet is complete
1639			 */
1640			return (SATA_TRAN_ACCEPTED);
1641		}
1642#endif
1643	}
1644	/*NOTREACHED*/
1645}
1646
1647
1648/*
1649 * Called by sata module to abort outstanding packets.
1650 */
1651/*ARGSUSED*/
1652static int
1653nv_sata_abort(dev_info_t *dip, sata_pkt_t *spkt, int flag)
1654{
1655	int cport = spkt->satapkt_device.satadev_addr.cport;
1656	nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip));
1657	nv_port_t *nvp = &(nvc->nvc_port[cport]);
1658	int c_a, ret;
1659
1660	ASSERT(cport < NV_MAX_PORTS(nvc));
1661	NVLOG((NVDBG_ENTRY, nvc, nvp, "nv_sata_abort %d %p", flag, spkt));
1662
1663	mutex_enter(&nvp->nvp_mutex);
1664
1665	if (nvp->nvp_state & NV_PORT_INACTIVE) {
1666		mutex_exit(&nvp->nvp_mutex);
1667		nv_cmn_err(CE_WARN, nvc, nvp,
1668		    "abort request failed: port inactive");
1669
1670		return (SATA_FAILURE);
1671	}
1672
1673	/*
1674	 * spkt == NULL then abort all commands
1675	 */
1676	c_a = nv_abort_active(nvp, spkt, SATA_PKT_ABORTED);
1677
1678	if (c_a) {
1679		NVLOG((NVDBG_ENTRY, nvc, nvp,
1680		    "packets aborted running=%d", c_a));
1681		ret = SATA_SUCCESS;
1682	} else {
1683		if (spkt == NULL) {
1684			NVLOG((NVDBG_ENTRY, nvc, nvp, "no spkts to abort"));
1685		} else {
1686			NVLOG((NVDBG_ENTRY, nvc, nvp,
1687			    "can't find spkt to abort"));
1688		}
1689		ret = SATA_FAILURE;
1690	}
1691
1692	mutex_exit(&nvp->nvp_mutex);
1693
1694	return (ret);
1695}
1696
1697
1698/*
1699 * if spkt == NULL abort all pkts running, otherwise
1700 * abort the requested packet.  must be called with nv_mutex
1701 * held and returns with it held.  Not NCQ aware.
1702 */
1703static int
1704nv_abort_active(nv_port_t *nvp, sata_pkt_t *spkt, int abort_reason)
1705{
1706	int aborted = 0, i, reset_once = B_FALSE;
1707	struct nv_slot *nv_slotp;
1708	sata_pkt_t *spkt_slot;
1709
1710	ASSERT(MUTEX_HELD(&nvp->nvp_mutex));
1711
1712	/*
1713	 * return if the port is not configured
1714	 */
1715	if (nvp->nvp_slot == NULL) {
1716		NVLOG((NVDBG_ENTRY, nvp->nvp_ctlp, nvp,
1717		    "nv_abort_active: not configured so returning"));
1718
1719		return (0);
1720	}
1721
1722	NVLOG((NVDBG_ENTRY, nvp->nvp_ctlp, nvp, "nv_abort_active"));
1723
1724	nvp->nvp_state |= NV_PORT_ABORTING;
1725
1726	for (i = 0; i < nvp->nvp_queue_depth; i++) {
1727
1728		nv_slotp = &(nvp->nvp_slot[i]);
1729		spkt_slot = nv_slotp->nvslot_spkt;
1730
1731		/*
1732		 * skip if not active command in slot
1733		 */
1734		if (spkt_slot == NULL) {
1735			continue;
1736		}
1737
1738		/*
1739		 * if a specific packet was requested, skip if
1740		 * this is not a match
1741		 */
1742		if ((spkt != NULL) && (spkt != spkt_slot)) {
1743			continue;
1744		}
1745
1746		/*
1747		 * stop the hardware.  This could need reworking
1748		 * when NCQ is enabled in the driver.
1749		 */
1750		if (reset_once == B_FALSE) {
1751			ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl;
1752
1753			/*
1754			 * stop DMA engine
1755			 */
1756			nv_put8(bmhdl, nvp->nvp_bmicx,  0);
1757
1758			nv_reset(nvp);
1759			reset_once = B_TRUE;
1760		}
1761
1762		spkt_slot->satapkt_reason = abort_reason;
1763		nv_complete_io(nvp, spkt_slot, i);
1764		aborted++;
1765	}
1766
1767	nvp->nvp_state &= ~NV_PORT_ABORTING;
1768
1769	return (aborted);
1770}
1771
1772
1773/*
1774 * Called by sata module to reset a port, device, or the controller.
1775 */
1776static int
1777nv_sata_reset(dev_info_t *dip, sata_device_t *sd)
1778{
1779	int cport = sd->satadev_addr.cport;
1780	nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip));
1781	nv_port_t *nvp = &(nvc->nvc_port[cport]);
1782	int ret = SATA_SUCCESS;
1783
1784	ASSERT(cport < NV_MAX_PORTS(nvc));
1785
1786	NVLOG((NVDBG_ENTRY, nvc, nvp, "nv_sata_reset"));
1787
1788	mutex_enter(&nvp->nvp_mutex);
1789
1790	switch (sd->satadev_addr.qual) {
1791
1792	case SATA_ADDR_CPORT:
1793		/*FALLTHROUGH*/
1794	case SATA_ADDR_DCPORT:
1795		nv_reset(nvp);
1796		(void) nv_abort_active(nvp, NULL, SATA_PKT_RESET);
1797
1798		break;
1799	case SATA_ADDR_CNTRL:
1800		NVLOG((NVDBG_ENTRY, nvc, nvp,
1801		    "nv_sata_reset: constroller reset not supported"));
1802
1803		break;
1804	case SATA_ADDR_PMPORT:
1805	case SATA_ADDR_DPMPORT:
1806		NVLOG((NVDBG_ENTRY, nvc, nvp,
1807		    "nv_sata_reset: port multipliers not supported"));
1808		/*FALLTHROUGH*/
1809	default:
1810		/*
1811		 * unsupported case
1812		 */
1813		ret = SATA_FAILURE;
1814		break;
1815	}
1816
1817	if (ret == SATA_SUCCESS) {
1818		/*
1819		 * If the port is inactive, do a quiet reset and don't attempt
1820		 * to wait for reset completion or do any post reset processing
1821		 */
1822		if (nvp->nvp_state & NV_PORT_INACTIVE) {
1823			nvp->nvp_state &= ~NV_PORT_RESET;
1824			nvp->nvp_reset_time = 0;
1825		}
1826
1827		/*
1828		 * clear the port failed flag
1829		 */
1830		nvp->nvp_state &= ~NV_PORT_FAILED;
1831	}
1832
1833	mutex_exit(&nvp->nvp_mutex);
1834
1835	return (ret);
1836}
1837
1838
1839/*
1840 * Sata entry point to handle port activation.  cfgadm -c connect
1841 */
1842static int
1843nv_sata_activate(dev_info_t *dip, sata_device_t *sd)
1844{
1845	int cport = sd->satadev_addr.cport;
1846	nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip));
1847	nv_port_t *nvp = &(nvc->nvc_port[cport]);
1848
1849	ASSERT(cport < NV_MAX_PORTS(nvc));
1850	NVLOG((NVDBG_ENTRY, nvc, nvp, "nv_sata_activate"));
1851
1852	mutex_enter(&nvp->nvp_mutex);
1853
1854	sd->satadev_state = SATA_STATE_READY;
1855
1856	nv_copy_registers(nvp, sd, NULL);
1857
1858	(*(nvc->nvc_set_intr))(nvp, NV_INTR_ENABLE);
1859
1860	nvp->nvp_state = 0;
1861
1862	mutex_exit(&nvp->nvp_mutex);
1863
1864	return (SATA_SUCCESS);
1865}
1866
1867
1868/*
1869 * Sata entry point to handle port deactivation.  cfgadm -c disconnect
1870 */
1871static int
1872nv_sata_deactivate(dev_info_t *dip, sata_device_t *sd)
1873{
1874	int cport = sd->satadev_addr.cport;
1875	nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip));
1876	nv_port_t *nvp = &(nvc->nvc_port[cport]);
1877
1878	ASSERT(cport < NV_MAX_PORTS(nvc));
1879	NVLOG((NVDBG_ENTRY, nvc, nvp, "nv_sata_deactivate"));
1880
1881	mutex_enter(&nvp->nvp_mutex);
1882
1883	(void) nv_abort_active(nvp, NULL, SATA_PKT_RESET);
1884
1885	/*
1886	 * mark the device as inaccessible
1887	 */
1888	nvp->nvp_state &= ~NV_PORT_INACTIVE;
1889
1890	/*
1891	 * disable the interrupts on port
1892	 */
1893	(*(nvc->nvc_set_intr))(nvp, NV_INTR_DISABLE);
1894
1895	nv_uninit_port(nvp);
1896
1897	sd->satadev_state = SATA_PSTATE_SHUTDOWN;
1898	nv_copy_registers(nvp, sd, NULL);
1899
1900	mutex_exit(&nvp->nvp_mutex);
1901
1902	return (SATA_SUCCESS);
1903}
1904
1905
1906/*
1907 * find an empty slot in the driver's queue, increment counters,
1908 * and then invoke the appropriate PIO or DMA start routine.
1909 */
1910static int
1911nv_start_common(nv_port_t *nvp, sata_pkt_t *spkt)
1912{
1913	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
1914	int on_bit = 0x01, slot, sactive, ret, ncq = 0;
1915	uint8_t cmd = spkt->satapkt_cmd.satacmd_cmd_reg;
1916	int direction = sata_cmdp->satacmd_flags.sata_data_direction;
1917	nv_ctl_t *nvc = nvp->nvp_ctlp;
1918	nv_slot_t *nv_slotp;
1919	boolean_t dma_cmd;
1920
1921	NVLOG((NVDBG_DELIVER, nvc, nvp, "nv_start_common  entered: cmd: 0x%x",
1922	    sata_cmdp->satacmd_cmd_reg));
1923
1924	if ((cmd == SATAC_WRITE_FPDMA_QUEUED) ||
1925	    (cmd == SATAC_READ_FPDMA_QUEUED)) {
1926		nvp->nvp_ncq_run++;
1927		/*
1928		 * search for an empty NCQ slot.  by the time, it's already
1929		 * been determined by the caller that there is room on the
1930		 * queue.
1931		 */
1932		for (slot = 0; slot < nvp->nvp_queue_depth; slot++,
1933		    on_bit <<= 1) {
1934			if ((nvp->nvp_sactive_cache & on_bit) == 0) {
1935				break;
1936			}
1937		}
1938
1939		/*
1940		 * the first empty slot found, should not exceed the queue
1941		 * depth of the drive.  if it does it's an error.
1942		 */
1943		ASSERT(slot != nvp->nvp_queue_depth);
1944
1945		sactive = nv_get32(nvc->nvc_bar_hdl[5],
1946		    nvp->nvp_sactive);
1947		ASSERT((sactive & on_bit) == 0);
1948		nv_put32(nvc->nvc_bar_hdl[5], nvp->nvp_sactive, on_bit);
1949		NVLOG((NVDBG_INIT, nvc, nvp, "setting SACTIVE onbit: %X",
1950		    on_bit));
1951		nvp->nvp_sactive_cache |= on_bit;
1952
1953		ncq = NVSLOT_NCQ;
1954
1955	} else {
1956		nvp->nvp_non_ncq_run++;
1957		slot = 0;
1958	}
1959
1960	nv_slotp = (nv_slot_t *)&nvp->nvp_slot[slot];
1961
1962	ASSERT(nv_slotp->nvslot_spkt == NULL);
1963
1964	nv_slotp->nvslot_spkt = spkt;
1965	nv_slotp->nvslot_flags = ncq;
1966
1967	/*
1968	 * the sata module doesn't indicate which commands utilize the
1969	 * DMA engine, so find out using this switch table.
1970	 */
1971	switch (spkt->satapkt_cmd.satacmd_cmd_reg) {
1972	case SATAC_READ_DMA_EXT:
1973	case SATAC_WRITE_DMA_EXT:
1974	case SATAC_WRITE_DMA:
1975	case SATAC_READ_DMA:
1976	case SATAC_READ_DMA_QUEUED:
1977	case SATAC_READ_DMA_QUEUED_EXT:
1978	case SATAC_WRITE_DMA_QUEUED:
1979	case SATAC_WRITE_DMA_QUEUED_EXT:
1980	case SATAC_READ_FPDMA_QUEUED:
1981	case SATAC_WRITE_FPDMA_QUEUED:
1982		dma_cmd = B_TRUE;
1983		break;
1984	default:
1985		dma_cmd = B_FALSE;
1986	}
1987
1988	if (sata_cmdp->satacmd_num_dma_cookies != 0 && dma_cmd == B_TRUE) {
1989		NVLOG((NVDBG_DELIVER, nvc,  nvp, "DMA command"));
1990		nv_slotp->nvslot_start = nv_start_dma;
1991		nv_slotp->nvslot_intr = nv_intr_dma;
1992	} else if (spkt->satapkt_cmd.satacmd_cmd_reg == SATAC_PACKET) {
1993		NVLOG((NVDBG_DELIVER, nvc,  nvp, "packet command"));
1994		nv_slotp->nvslot_start = nv_start_pkt_pio;
1995		nv_slotp->nvslot_intr = nv_intr_pkt_pio;
1996		if ((direction == SATA_DIR_READ) ||
1997		    (direction == SATA_DIR_WRITE)) {
1998			nv_slotp->nvslot_byte_count =
1999			    spkt->satapkt_cmd.satacmd_bp->b_bcount;
2000			nv_slotp->nvslot_v_addr =
2001			    spkt->satapkt_cmd.satacmd_bp->b_un.b_addr;
2002			/*
2003			 * Freeing DMA resources allocated by the framework
2004			 * now to avoid buffer overwrite (dma sync) problems
2005			 * when the buffer is released at command completion.
2006			 * Primarily an issue on systems with more than
2007			 * 4GB of memory.
2008			 */
2009			sata_free_dma_resources(spkt);
2010		}
2011	} else if (direction == SATA_DIR_NODATA_XFER) {
2012		NVLOG((NVDBG_DELIVER, nvc, nvp, "non-data command"));
2013		nv_slotp->nvslot_start = nv_start_nodata;
2014		nv_slotp->nvslot_intr = nv_intr_nodata;
2015	} else if (direction == SATA_DIR_READ) {
2016		NVLOG((NVDBG_DELIVER, nvc, nvp, "pio in command"));
2017		nv_slotp->nvslot_start = nv_start_pio_in;
2018		nv_slotp->nvslot_intr = nv_intr_pio_in;
2019		nv_slotp->nvslot_byte_count =
2020		    spkt->satapkt_cmd.satacmd_bp->b_bcount;
2021		nv_slotp->nvslot_v_addr =
2022		    spkt->satapkt_cmd.satacmd_bp->b_un.b_addr;
2023		/*
2024		 * Freeing DMA resources allocated by the framework now to
2025		 * avoid buffer overwrite (dma sync) problems when the buffer
2026		 * is released at command completion.  This is not an issue
2027		 * for write because write does not update the buffer.
2028		 * Primarily an issue on systems with more than 4GB of memory.
2029		 */
2030		sata_free_dma_resources(spkt);
2031	} else if (direction == SATA_DIR_WRITE) {
2032		NVLOG((NVDBG_DELIVER, nvc, nvp, "pio out command"));
2033		nv_slotp->nvslot_start = nv_start_pio_out;
2034		nv_slotp->nvslot_intr = nv_intr_pio_out;
2035		nv_slotp->nvslot_byte_count =
2036		    spkt->satapkt_cmd.satacmd_bp->b_bcount;
2037		nv_slotp->nvslot_v_addr =
2038		    spkt->satapkt_cmd.satacmd_bp->b_un.b_addr;
2039	} else {
2040		nv_cmn_err(CE_WARN, nvc, nvp, "malformed command: direction"
2041		    " %d cookies %d cmd %x",
2042		    sata_cmdp->satacmd_flags.sata_data_direction,
2043		    sata_cmdp->satacmd_num_dma_cookies,  cmd);
2044		spkt->satapkt_reason = SATA_PKT_CMD_UNSUPPORTED;
2045		ret = SATA_TRAN_CMD_UNSUPPORTED;
2046
2047		goto fail;
2048	}
2049
2050	if ((ret = (*nv_slotp->nvslot_start)(nvp, slot)) ==
2051	    SATA_TRAN_ACCEPTED) {
2052#ifdef SGPIO_SUPPORT
2053		nv_sgp_drive_active(nvp->nvp_ctlp,
2054		    (nvp->nvp_ctlp->nvc_ctlr_num * 2) + nvp->nvp_port_num);
2055#endif
2056		nv_slotp->nvslot_stime = ddi_get_lbolt();
2057
2058		/*
2059		 * start timer if it's not already running and this packet
2060		 * is not requesting polled mode.
2061		 */
2062		if ((nvp->nvp_timeout_id == 0) &&
2063		    ((spkt->satapkt_op_mode & SATA_OPMODE_POLLING) == 0)) {
2064			nvp->nvp_timeout_id = timeout(nv_timeout, (void *)nvp,
2065			    drv_usectohz(NV_ONE_SEC));
2066		}
2067
2068		return (SATA_TRAN_ACCEPTED);
2069	}
2070
2071	fail:
2072
2073	spkt->satapkt_reason = SATA_TRAN_PORT_ERROR;
2074
2075	if (ncq == NVSLOT_NCQ) {
2076		nvp->nvp_ncq_run--;
2077		nvp->nvp_sactive_cache &= ~on_bit;
2078	} else {
2079		nvp->nvp_non_ncq_run--;
2080	}
2081	nv_slotp->nvslot_spkt = NULL;
2082	nv_slotp->nvslot_flags = 0;
2083
2084	return (ret);
2085}
2086
2087
2088/*
2089 * Check if the signature is ready and if non-zero translate
2090 * it into a solaris sata defined type.
2091 */
2092static void
2093nv_read_signature(nv_port_t *nvp)
2094{
2095	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
2096
2097	nvp->nvp_signature = nv_get8(cmdhdl, nvp->nvp_count);
2098	nvp->nvp_signature |= (nv_get8(cmdhdl, nvp->nvp_sect) << 8);
2099	nvp->nvp_signature |= (nv_get8(cmdhdl, nvp->nvp_lcyl) << 16);
2100	nvp->nvp_signature |= (nv_get8(cmdhdl, nvp->nvp_hcyl) << 24);
2101
2102	switch (nvp->nvp_signature) {
2103
2104	case NV_SIG_DISK:
2105		NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp, "drive is a disk"));
2106		nvp->nvp_type = SATA_DTYPE_ATADISK;
2107		break;
2108	case NV_SIG_ATAPI:
2109		NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp,
2110		    "drive is an optical device"));
2111		nvp->nvp_type = SATA_DTYPE_ATAPICD;
2112		break;
2113	case NV_SIG_PM:
2114		NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp,
2115		    "device is a port multiplier"));
2116		nvp->nvp_type = SATA_DTYPE_PMULT;
2117		break;
2118	case NV_SIG_NOTREADY:
2119		NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp,
2120		    "signature not ready"));
2121		nvp->nvp_type = SATA_DTYPE_UNKNOWN;
2122		break;
2123	default:
2124		nv_cmn_err(CE_WARN, nvp->nvp_ctlp, nvp, "signature %X not"
2125		    " recognized", nvp->nvp_signature);
2126		nvp->nvp_type = SATA_DTYPE_UNKNOWN;
2127		break;
2128	}
2129
2130	if (nvp->nvp_signature) {
2131		nvp->nvp_state &= ~(NV_PORT_RESET_PROBE|NV_PORT_RESET);
2132	}
2133}
2134
2135
2136/*
2137 * Reset the port
2138 */
2139static void
2140nv_reset(nv_port_t *nvp)
2141{
2142	ddi_acc_handle_t bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5];
2143	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
2144	nv_ctl_t *nvc = nvp->nvp_ctlp;
2145	uint32_t sctrl;
2146
2147	NVLOG((NVDBG_ENTRY, nvc, nvp, "nv_reset()"));
2148
2149	ASSERT(mutex_owned(&nvp->nvp_mutex));
2150
2151	/*
2152	 * clear signature registers
2153	 */
2154	nv_put8(cmdhdl, nvp->nvp_sect, 0);
2155	nv_put8(cmdhdl, nvp->nvp_lcyl, 0);
2156	nv_put8(cmdhdl, nvp->nvp_hcyl, 0);
2157	nv_put8(cmdhdl, nvp->nvp_count, 0);
2158
2159	nvp->nvp_signature = 0;
2160	nvp->nvp_type = 0;
2161	nvp->nvp_state |= NV_PORT_RESET;
2162	nvp->nvp_reset_time = ddi_get_lbolt();
2163	nvp->nvp_link_lost_time = 0;
2164
2165	/*
2166	 * assert reset in PHY by writing a 1 to bit 0 scontrol
2167	 */
2168	sctrl = nv_get32(bar5_hdl, nvp->nvp_sctrl);
2169
2170	nv_put32(bar5_hdl, nvp->nvp_sctrl, sctrl | SCONTROL_DET_COMRESET);
2171
2172	/*
2173	 * wait 1ms
2174	 */
2175	drv_usecwait(1000);
2176
2177	/*
2178	 * de-assert reset in PHY
2179	 */
2180	nv_put32(bar5_hdl, nvp->nvp_sctrl, sctrl);
2181
2182	/*
2183	 * make sure timer is running
2184	 */
2185	if (nvp->nvp_timeout_id == 0) {
2186		nvp->nvp_timeout_id = timeout(nv_timeout, (void *)nvp,
2187		    drv_usectohz(NV_ONE_SEC));
2188	}
2189}
2190
2191
2192/*
2193 * Initialize register handling specific to mcp55
2194 */
2195/* ARGSUSED */
2196static void
2197mcp55_reg_init(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle)
2198{
2199	nv_port_t *nvp;
2200	uchar_t *bar5  = nvc->nvc_bar_addr[5];
2201	uint8_t off, port;
2202
2203	nvc->nvc_mcp55_ctl = (uint32_t *)(bar5 + MCP55_CTL);
2204	nvc->nvc_mcp55_ncq = (uint32_t *)(bar5 + MCP55_NCQ);
2205
2206	for (port = 0, off = 0; port < NV_MAX_PORTS(nvc); port++, off += 2) {
2207		nvp = &(nvc->nvc_port[port]);
2208		nvp->nvp_mcp55_int_status =
2209		    (uint16_t *)(bar5 + MCP55_INT_STATUS + off);
2210		nvp->nvp_mcp55_int_ctl =
2211		    (uint16_t *)(bar5 + MCP55_INT_CTL + off);
2212
2213		/*
2214		 * clear any previous interrupts asserted
2215		 */
2216		nv_put16(nvc->nvc_bar_hdl[5], nvp->nvp_mcp55_int_status,
2217		    MCP55_INT_CLEAR);
2218
2219		/*
2220		 * These are the interrupts to accept for now.  The spec
2221		 * says these are enable bits, but nvidia has indicated
2222		 * these are masking bits.  Even though they may be masked
2223		 * out to prevent asserting the main interrupt, they can
2224		 * still be asserted while reading the interrupt status
2225		 * register, so that needs to be considered in the interrupt
2226		 * handler.
2227		 */
2228		nv_put16(nvc->nvc_bar_hdl[5], nvp->nvp_mcp55_int_ctl,
2229		    ~(MCP55_INT_IGNORE));
2230	}
2231
2232	/*
2233	 * Allow the driver to program the BM on the first command instead
2234	 * of waiting for an interrupt.
2235	 */
2236#ifdef NCQ
2237	flags = MCP_SATA_AE_NCQ_PDEV_FIRST_CMD | MCP_SATA_AE_NCQ_SDEV_FIRST_CMD;
2238	nv_put32(nvc->nvc_bar_hdl[5], nvc->nvc_mcp55_ncq, flags);
2239	flags = MCP_SATA_AE_CTL_PRI_SWNCQ | MCP_SATA_AE_CTL_SEC_SWNCQ;
2240	nv_put32(nvc->nvc_bar_hdl[5], nvc->nvc_mcp55_ctl, flags);
2241#endif
2242
2243
2244#if 0
2245	/*
2246	 * This caused problems on some but not all mcp55 based systems.
2247	 * DMA writes would never complete.  This happens even on small
2248	 * mem systems, and only setting NV_40BIT_PRD below and not
2249	 * buffer_dma_attr.dma_attr_addr_hi, so it seems to be a hardware
2250	 * issue that needs further investigation.
2251	 */
2252
2253	/*
2254	 * mcp55 rev A03 and above supports 40-bit physical addressing.
2255	 * Enable DMA to take advantage of that.
2256	 *
2257	 */
2258	if (nvc->nvc_revid >= 0xa3) {
2259		uint32_t reg32;
2260		NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp, "rev id is %X and"
2261		    " is capable of 40-bit addressing", nvc->nvc_revid));
2262		buffer_dma_attr.dma_attr_addr_hi = 0xffffffffffull;
2263		reg32 = pci_config_get32(pci_conf_handle, NV_SATA_CFG_20);
2264		pci_config_put32(pci_conf_handle, NV_SATA_CFG_20,
2265		    reg32 |NV_40BIT_PRD);
2266	} else {
2267		NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp, "rev is %X and is "
2268		    "not capable of 40-bit addressing", nvc->nvc_revid));
2269	}
2270#endif
2271
2272}
2273
2274
2275/*
2276 * Initialize register handling specific to mcp04
2277 */
2278static void
2279mcp04_reg_init(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle)
2280{
2281	uchar_t *bar5  = nvc->nvc_bar_addr[5];
2282	uint32_t reg32;
2283	uint16_t reg16;
2284	nv_port_t *nvp;
2285	int j;
2286
2287	/*
2288	 * delay hotplug interrupts until PHYRDY.
2289	 */
2290	reg32 = pci_config_get32(pci_conf_handle, NV_SATA_CFG_42);
2291	pci_config_put32(pci_conf_handle, NV_SATA_CFG_42,
2292	    reg32 | MCP04_CFG_DELAY_HOTPLUG_INTR);
2293
2294	/*
2295	 * enable hot plug interrupts for channel x and y
2296	 */
2297	reg16 = nv_get16(nvc->nvc_bar_hdl[5],
2298	    (uint16_t *)(bar5 + NV_ADMACTL_X));
2299	nv_put16(nvc->nvc_bar_hdl[5], (uint16_t *)(bar5 + NV_ADMACTL_X),
2300	    NV_HIRQ_EN | reg16);
2301
2302
2303	reg16 = nv_get16(nvc->nvc_bar_hdl[5],
2304	    (uint16_t *)(bar5 + NV_ADMACTL_Y));
2305	nv_put16(nvc->nvc_bar_hdl[5], (uint16_t *)(bar5 + NV_ADMACTL_Y),
2306	    NV_HIRQ_EN | reg16);
2307
2308	nvc->nvc_mcp04_int_status = (uint8_t *)(bar5 + MCP04_SATA_INT_STATUS);
2309
2310	/*
2311	 * clear any existing interrupt pending then enable
2312	 */
2313	for (j = 0; j < NV_MAX_PORTS(nvc); j++) {
2314		nvp = &(nvc->nvc_port[j]);
2315		mutex_enter(&nvp->nvp_mutex);
2316		(*(nvp->nvp_ctlp->nvc_set_intr))(nvp,
2317		    NV_INTR_CLEAR_ALL|NV_INTR_ENABLE);
2318		mutex_exit(&nvp->nvp_mutex);
2319	}
2320}
2321
2322
2323/*
2324 * Initialize the controller and set up driver data structures.
2325 * determine if ck804 or mcp55 class.
2326 */
2327static int
2328nv_init_ctl(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle)
2329{
2330	struct sata_hba_tran stran;
2331	nv_port_t *nvp;
2332	int j, ck804;
2333	uchar_t *cmd_addr, *ctl_addr, *bm_addr;
2334	ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5];
2335	uchar_t *bar5  = nvc->nvc_bar_addr[5];
2336	uint32_t reg32;
2337	uint8_t reg8, reg8_save;
2338
2339	NVLOG((NVDBG_INIT, nvc, NULL, "nv_init_ctl entered"));
2340
2341	ck804 = B_TRUE;
2342#ifdef SGPIO_SUPPORT
2343	nvc->nvc_mcp55_flag = B_FALSE;
2344#endif
2345
2346	/*
2347	 * Need to set bit 2 to 1 at config offset 0x50
2348	 * to enable access to the bar5 registers.
2349	 */
2350	reg32 = pci_config_get32(pci_conf_handle, NV_SATA_CFG_20);
2351	pci_config_put32(pci_conf_handle, NV_SATA_CFG_20,
2352	    reg32 | NV_BAR5_SPACE_EN);
2353
2354	/*
2355	 * Determine if this is ck804 or mcp55.  ck804 will map in the
2356	 * task file registers into bar5 while mcp55 won't.  The offset of
2357	 * the task file registers in mcp55's space is unused, so it will
2358	 * return zero.  So check one of the task file registers to see if it is
2359	 * writable and reads back what was written.  If it's mcp55 it will
2360	 * return back 0xff whereas ck804 will return the value written.
2361	 */
2362	reg8_save = nv_get8(bar5_hdl,
2363	    (uint8_t *)(bar5 + NV_BAR5_TRAN_LEN_CH_X));
2364
2365
2366	for (j = 1; j < 3; j++) {
2367
2368		nv_put8(bar5_hdl, (uint8_t *)(bar5 + NV_BAR5_TRAN_LEN_CH_X), j);
2369		reg8 = nv_get8(bar5_hdl,
2370		    (uint8_t *)(bar5 + NV_BAR5_TRAN_LEN_CH_X));
2371
2372		if (reg8 != j) {
2373			ck804 = B_FALSE;
2374			nvc->nvc_mcp55_flag = B_TRUE;
2375			break;
2376		}
2377	}
2378
2379	nv_put8(bar5_hdl, (uint8_t *)(bar5 + NV_BAR5_TRAN_LEN_CH_X), reg8_save);
2380
2381	if (ck804 == B_TRUE) {
2382		NVLOG((NVDBG_INIT, nvc, NULL, "controller is CK804"));
2383		nvc->nvc_interrupt = mcp04_intr;
2384		nvc->nvc_reg_init = mcp04_reg_init;
2385		nvc->nvc_set_intr = mcp04_set_intr;
2386	} else {
2387		NVLOG((NVDBG_INIT, nvc, NULL, "controller is MCP55"));
2388		nvc->nvc_interrupt = mcp55_intr;
2389		nvc->nvc_reg_init = mcp55_reg_init;
2390		nvc->nvc_set_intr = mcp55_set_intr;
2391	}
2392
2393
2394	stran.sata_tran_hba_rev = SATA_TRAN_HBA_REV_2;
2395	stran.sata_tran_hba_dip = nvc->nvc_dip;
2396	stran.sata_tran_hba_dma_attr = &buffer_dma_attr;
2397	stran.sata_tran_hba_num_cports = NV_NUM_CPORTS;
2398	stran.sata_tran_hba_features_support =
2399	    SATA_CTLF_HOTPLUG | SATA_CTLF_ASN | SATA_CTLF_ATAPI;
2400	stran.sata_tran_hba_qdepth = NV_QUEUE_SLOTS;
2401	stran.sata_tran_probe_port = nv_sata_probe;
2402	stran.sata_tran_start = nv_sata_start;
2403	stran.sata_tran_abort = nv_sata_abort;
2404	stran.sata_tran_reset_dport = nv_sata_reset;
2405	stran.sata_tran_selftest = NULL;
2406	stran.sata_tran_hotplug_ops = &nv_hotplug_ops;
2407	stran.sata_tran_pwrmgt_ops = NULL;
2408	stran.sata_tran_ioctl = NULL;
2409	nvc->nvc_sata_hba_tran = stran;
2410
2411	nvc->nvc_port = kmem_zalloc(sizeof (nv_port_t) * NV_MAX_PORTS(nvc),
2412	    KM_SLEEP);
2413
2414	/*
2415	 * initialize registers common to all chipsets
2416	 */
2417	nv_common_reg_init(nvc);
2418
2419	for (j = 0; j < NV_MAX_PORTS(nvc); j++) {
2420		nvp = &(nvc->nvc_port[j]);
2421
2422		cmd_addr = nvp->nvp_cmd_addr;
2423		ctl_addr = nvp->nvp_ctl_addr;
2424		bm_addr = nvp->nvp_bm_addr;
2425
2426		mutex_init(&nvp->nvp_mutex, NULL, MUTEX_DRIVER,
2427		    DDI_INTR_PRI(nvc->nvc_intr_pri));
2428
2429		cv_init(&nvp->nvp_poll_cv, NULL, CV_DRIVER, NULL);
2430
2431		nvp->nvp_data	= cmd_addr + NV_DATA;
2432		nvp->nvp_error	= cmd_addr + NV_ERROR;
2433		nvp->nvp_feature = cmd_addr + NV_FEATURE;
2434		nvp->nvp_count	= cmd_addr + NV_COUNT;
2435		nvp->nvp_sect	= cmd_addr + NV_SECT;
2436		nvp->nvp_lcyl	= cmd_addr + NV_LCYL;
2437		nvp->nvp_hcyl	= cmd_addr + NV_HCYL;
2438		nvp->nvp_drvhd	= cmd_addr + NV_DRVHD;
2439		nvp->nvp_status	= cmd_addr + NV_STATUS;
2440		nvp->nvp_cmd	= cmd_addr + NV_CMD;
2441		nvp->nvp_altstatus = ctl_addr + NV_ALTSTATUS;
2442		nvp->nvp_devctl	= ctl_addr + NV_DEVCTL;
2443
2444		nvp->nvp_bmicx	= bm_addr + BMICX_REG;
2445		nvp->nvp_bmisx	= bm_addr + BMISX_REG;
2446		nvp->nvp_bmidtpx = (uint32_t *)(bm_addr + BMIDTPX_REG);
2447
2448		nvp->nvp_state = 0;
2449	}
2450
2451	/*
2452	 * initialize register by calling chip specific reg initialization
2453	 */
2454	(*(nvc->nvc_reg_init))(nvc, pci_conf_handle);
2455
2456	return (NV_SUCCESS);
2457}
2458
2459
2460/*
2461 * Initialize data structures with enough slots to handle queuing, if
2462 * enabled.  NV_QUEUE_SLOTS will be set to 1 or 32, depending on whether
2463 * NCQ support is built into the driver and enabled.  It might have been
2464 * better to derive the true size from the drive itself, but the sata
2465 * module only sends down that information on the first NCQ command,
2466 * which means possibly re-sizing the structures on an interrupt stack,
2467 * making error handling more messy.  The easy way is to just allocate
2468 * all 32 slots, which is what most drives support anyway.
2469 */
2470static int
2471nv_init_port(nv_port_t *nvp)
2472{
2473	nv_ctl_t *nvc = nvp->nvp_ctlp;
2474	size_t	prd_size = sizeof (prde_t) * NV_DMA_NSEGS;
2475	dev_info_t *dip = nvc->nvc_dip;
2476	ddi_device_acc_attr_t dev_attr;
2477	size_t buf_size;
2478	ddi_dma_cookie_t cookie;
2479	uint_t count;
2480	int rc, i;
2481
2482	dev_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
2483	dev_attr.devacc_attr_endian_flags = DDI_NEVERSWAP_ACC;
2484	dev_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
2485
2486	if (nvp->nvp_state & NV_PORT_INIT) {
2487		NVLOG((NVDBG_INIT, nvc, nvp,
2488		    "nv_init_port previously initialized"));
2489
2490		return (NV_SUCCESS);
2491	} else {
2492		NVLOG((NVDBG_INIT, nvc, nvp, "nv_init_port initializing"));
2493	}
2494
2495	nvp->nvp_sg_dma_hdl = kmem_zalloc(sizeof (ddi_dma_handle_t) *
2496	    NV_QUEUE_SLOTS, KM_SLEEP);
2497
2498	nvp->nvp_sg_acc_hdl = kmem_zalloc(sizeof (ddi_acc_handle_t) *
2499	    NV_QUEUE_SLOTS, KM_SLEEP);
2500
2501	nvp->nvp_sg_addr = kmem_zalloc(sizeof (caddr_t) *
2502	    NV_QUEUE_SLOTS, KM_SLEEP);
2503
2504	nvp->nvp_sg_paddr = kmem_zalloc(sizeof (uint32_t) *
2505	    NV_QUEUE_SLOTS, KM_SLEEP);
2506
2507	nvp->nvp_slot = kmem_zalloc(sizeof (nv_slot_t) * NV_QUEUE_SLOTS,
2508	    KM_SLEEP);
2509
2510	for (i = 0; i < NV_QUEUE_SLOTS; i++) {
2511
2512		rc = ddi_dma_alloc_handle(dip, &nv_prd_dma_attr,
2513		    DDI_DMA_SLEEP, NULL, &(nvp->nvp_sg_dma_hdl[i]));
2514
2515		if (rc != DDI_SUCCESS) {
2516			nv_uninit_port(nvp);
2517
2518			return (NV_FAILURE);
2519		}
2520
2521		rc = ddi_dma_mem_alloc(nvp->nvp_sg_dma_hdl[i], prd_size,
2522		    &dev_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
2523		    NULL, &(nvp->nvp_sg_addr[i]), &buf_size,
2524		    &(nvp->nvp_sg_acc_hdl[i]));
2525
2526		if (rc != DDI_SUCCESS) {
2527			nv_uninit_port(nvp);
2528
2529			return (NV_FAILURE);
2530		}
2531
2532		rc = ddi_dma_addr_bind_handle(nvp->nvp_sg_dma_hdl[i], NULL,
2533		    nvp->nvp_sg_addr[i], buf_size,
2534		    DDI_DMA_WRITE | DDI_DMA_CONSISTENT,
2535		    DDI_DMA_SLEEP, NULL, &cookie, &count);
2536
2537		if (rc != DDI_DMA_MAPPED) {
2538			nv_uninit_port(nvp);
2539
2540			return (NV_FAILURE);
2541		}
2542
2543		ASSERT(count == 1);
2544		ASSERT((cookie.dmac_address & (sizeof (int) - 1)) == 0);
2545
2546		ASSERT(cookie.dmac_laddress <= UINT32_MAX);
2547
2548		nvp->nvp_sg_paddr[i] = cookie.dmac_address;
2549	}
2550
2551	/*
2552	 * nvp_queue_depth represents the actual drive queue depth, not the
2553	 * number of slots allocated in the structures (which may be more).
2554	 * Actual queue depth is only learned after the first NCQ command, so
2555	 * initialize it to 1 for now.
2556	 */
2557	nvp->nvp_queue_depth = 1;
2558
2559	nvp->nvp_state |= NV_PORT_INIT;
2560
2561	return (NV_SUCCESS);
2562}
2563
2564
2565/*
2566 * Free dynamically allocated structures for port.
2567 */
2568static void
2569nv_uninit_port(nv_port_t *nvp)
2570{
2571	int i;
2572
2573	/*
2574	 * It is possible to reach here before a port has been initialized or
2575	 * after it has already been uninitialized.  Just return in that case.
2576	 */
2577	if (nvp->nvp_slot == NULL) {
2578
2579		return;
2580	}
2581
2582	NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp,
2583	    "nv_uninit_port uninitializing"));
2584
2585	nvp->nvp_type = SATA_DTYPE_NONE;
2586
2587	for (i = 0; i < NV_QUEUE_SLOTS; i++) {
2588		if (nvp->nvp_sg_paddr[i]) {
2589			(void) ddi_dma_unbind_handle(nvp->nvp_sg_dma_hdl[i]);
2590		}
2591
2592		if (nvp->nvp_sg_acc_hdl[i] != NULL) {
2593			ddi_dma_mem_free(&(nvp->nvp_sg_acc_hdl[i]));
2594		}
2595
2596		if (nvp->nvp_sg_dma_hdl[i] != NULL) {
2597			ddi_dma_free_handle(&(nvp->nvp_sg_dma_hdl[i]));
2598		}
2599	}
2600
2601	kmem_free(nvp->nvp_slot, sizeof (nv_slot_t) * NV_QUEUE_SLOTS);
2602	nvp->nvp_slot = NULL;
2603
2604	kmem_free(nvp->nvp_sg_dma_hdl,
2605	    sizeof (ddi_dma_handle_t) * NV_QUEUE_SLOTS);
2606	nvp->nvp_sg_dma_hdl = NULL;
2607
2608	kmem_free(nvp->nvp_sg_acc_hdl,
2609	    sizeof (ddi_acc_handle_t) * NV_QUEUE_SLOTS);
2610	nvp->nvp_sg_acc_hdl = NULL;
2611
2612	kmem_free(nvp->nvp_sg_addr, sizeof (caddr_t) * NV_QUEUE_SLOTS);
2613	nvp->nvp_sg_addr = NULL;
2614
2615	kmem_free(nvp->nvp_sg_paddr, sizeof (uint32_t) * NV_QUEUE_SLOTS);
2616	nvp->nvp_sg_paddr = NULL;
2617
2618	nvp->nvp_state &= ~NV_PORT_INIT;
2619	nvp->nvp_signature = 0;
2620}
2621
2622
2623/*
2624 * Cache register offsets and access handles to frequently accessed registers
2625 * which are common to either chipset.
2626 */
2627static void
2628nv_common_reg_init(nv_ctl_t *nvc)
2629{
2630	uchar_t *bar5_addr = nvc->nvc_bar_addr[5];
2631	uchar_t *bm_addr_offset, *sreg_offset;
2632	uint8_t bar, port;
2633	nv_port_t *nvp;
2634
2635	for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
2636		if (port == 0) {
2637			bar = NV_BAR_0;
2638			bm_addr_offset = 0;
2639			sreg_offset = (uchar_t *)(CH0_SREG_OFFSET + bar5_addr);
2640		} else {
2641			bar = NV_BAR_2;
2642			bm_addr_offset = (uchar_t *)8;
2643			sreg_offset = (uchar_t *)(CH1_SREG_OFFSET + bar5_addr);
2644		}
2645
2646		nvp = &(nvc->nvc_port[port]);
2647		nvp->nvp_ctlp = nvc;
2648		nvp->nvp_port_num = port;
2649		NVLOG((NVDBG_INIT, nvc, nvp, "setting up port mappings"));
2650
2651		nvp->nvp_cmd_hdl = nvc->nvc_bar_hdl[bar];
2652		nvp->nvp_cmd_addr = nvc->nvc_bar_addr[bar];
2653		nvp->nvp_ctl_hdl = nvc->nvc_bar_hdl[bar + 1];
2654		nvp->nvp_ctl_addr = nvc->nvc_bar_addr[bar + 1];
2655		nvp->nvp_bm_hdl = nvc->nvc_bar_hdl[NV_BAR_4];
2656		nvp->nvp_bm_addr = nvc->nvc_bar_addr[NV_BAR_4] +
2657		    (long)bm_addr_offset;
2658
2659		nvp->nvp_sstatus = (uint32_t *)(sreg_offset + NV_SSTATUS);
2660		nvp->nvp_serror = (uint32_t *)(sreg_offset + NV_SERROR);
2661		nvp->nvp_sactive = (uint32_t *)(sreg_offset + NV_SACTIVE);
2662		nvp->nvp_sctrl = (uint32_t *)(sreg_offset + NV_SCTRL);
2663	}
2664}
2665
2666
2667static void
2668nv_uninit_ctl(nv_ctl_t *nvc)
2669{
2670	int port;
2671	nv_port_t *nvp;
2672
2673	NVLOG((NVDBG_INIT, nvc, NULL, "nv_uninit_ctl entered"));
2674
2675	for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
2676		nvp = &(nvc->nvc_port[port]);
2677		mutex_enter(&nvp->nvp_mutex);
2678		NVLOG((NVDBG_INIT, nvc, nvp, "uninitializing port"));
2679		nv_uninit_port(nvp);
2680		mutex_exit(&nvp->nvp_mutex);
2681		mutex_destroy(&nvp->nvp_mutex);
2682		cv_destroy(&nvp->nvp_poll_cv);
2683	}
2684
2685	kmem_free(nvc->nvc_port, NV_MAX_PORTS(nvc) * sizeof (nv_port_t));
2686	nvc->nvc_port = NULL;
2687}
2688
2689
2690/*
2691 * mcp04 interrupt.  This is a wrapper around mcp04_intr_process so
2692 * that interrupts from other devices can be disregarded while dtracing.
2693 */
2694/* ARGSUSED */
2695static uint_t
2696mcp04_intr(caddr_t arg1, caddr_t arg2)
2697{
2698	nv_ctl_t *nvc = (nv_ctl_t *)arg1;
2699	uint8_t intr_status;
2700	ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5];
2701
2702	intr_status = ddi_get8(bar5_hdl, nvc->nvc_mcp04_int_status);
2703
2704	if (intr_status == 0) {
2705
2706		return (DDI_INTR_UNCLAIMED);
2707	}
2708
2709	mcp04_intr_process(nvc, intr_status);
2710
2711	return (DDI_INTR_CLAIMED);
2712}
2713
2714
2715/*
2716 * Main interrupt handler for ck804.  handles normal device
2717 * interrupts as well as port hot plug and remove interrupts.
2718 *
2719 */
2720static void
2721mcp04_intr_process(nv_ctl_t *nvc, uint8_t intr_status)
2722{
2723
2724	int port, i;
2725	nv_port_t *nvp;
2726	nv_slot_t *nv_slotp;
2727	uchar_t	status;
2728	sata_pkt_t *spkt;
2729	uint8_t bmstatus, clear_bits;
2730	ddi_acc_handle_t bmhdl;
2731	int nvcleared = 0;
2732	ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5];
2733	uint32_t sstatus;
2734	int port_mask_hot[] = {
2735		MCP04_INT_PDEV_HOT, MCP04_INT_SDEV_HOT,
2736	};
2737	int port_mask_pm[] = {
2738		MCP04_INT_PDEV_PM, MCP04_INT_SDEV_PM,
2739	};
2740
2741	NVLOG((NVDBG_INTR, nvc, NULL,
2742	    "mcp04_intr_process entered intr_status=%x", intr_status));
2743
2744	/*
2745	 * For command completion interrupt, explicit clear is not required.
2746	 * however, for the error cases explicit clear is performed.
2747	 */
2748	for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
2749
2750		int port_mask[] = {MCP04_INT_PDEV_INT, MCP04_INT_SDEV_INT};
2751
2752		if ((port_mask[port] & intr_status) == 0) {
2753			continue;
2754		}
2755
2756		NVLOG((NVDBG_INTR, nvc, NULL,
2757		    "mcp04_intr_process interrupt on port %d", port));
2758
2759		nvp = &(nvc->nvc_port[port]);
2760
2761		mutex_enter(&nvp->nvp_mutex);
2762
2763		/*
2764		 * there was a corner case found where an interrupt
2765		 * arrived before nvp_slot was set.  Should
2766		 * probably should track down why that happens and try
2767		 * to eliminate that source and then get rid of this
2768		 * check.
2769		 */
2770		if (nvp->nvp_slot == NULL) {
2771			status = nv_get8(nvp->nvp_ctl_hdl, nvp->nvp_status);
2772			NVLOG((NVDBG_ALWAYS, nvc, nvp, "spurious interrupt "
2773			    "received before initialization "
2774			    "completed status=%x", status));
2775			mutex_exit(&nvp->nvp_mutex);
2776
2777			/*
2778			 * clear interrupt bits
2779			 */
2780			nv_put8(bar5_hdl, nvc->nvc_mcp04_int_status,
2781			    port_mask[port]);
2782
2783			continue;
2784		}
2785
2786		if ((&(nvp->nvp_slot[0]))->nvslot_spkt == NULL)  {
2787			status = nv_get8(nvp->nvp_ctl_hdl, nvp->nvp_status);
2788			NVLOG((NVDBG_ALWAYS, nvc, nvp, "spurious interrupt "
2789			    " no command in progress status=%x", status));
2790			mutex_exit(&nvp->nvp_mutex);
2791
2792			/*
2793			 * clear interrupt bits
2794			 */
2795			nv_put8(bar5_hdl, nvc->nvc_mcp04_int_status,
2796			    port_mask[port]);
2797
2798			continue;
2799		}
2800
2801		bmhdl = nvp->nvp_bm_hdl;
2802		bmstatus = nv_get8(bmhdl, nvp->nvp_bmisx);
2803
2804		if (!(bmstatus & BMISX_IDEINTS)) {
2805			mutex_exit(&nvp->nvp_mutex);
2806
2807			continue;
2808		}
2809
2810		status = nv_get8(nvp->nvp_ctl_hdl, nvp->nvp_altstatus);
2811
2812		if (status & SATA_STATUS_BSY) {
2813			mutex_exit(&nvp->nvp_mutex);
2814
2815			continue;
2816		}
2817
2818		nv_slotp = &(nvp->nvp_slot[0]);
2819
2820		ASSERT(nv_slotp);
2821
2822		spkt = nv_slotp->nvslot_spkt;
2823
2824		if (spkt == NULL) {
2825			mutex_exit(&nvp->nvp_mutex);
2826
2827			continue;
2828		}
2829
2830		(*nv_slotp->nvslot_intr)(nvp, nv_slotp);
2831
2832		nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
2833
2834		/*
2835		 * If there is no link cannot be certain about the completion
2836		 * of the packet, so abort it.
2837		 */
2838		if (nv_check_link((&spkt->satapkt_device)->
2839		    satadev_scr.sstatus) == B_FALSE) {
2840
2841			(void) nv_abort_active(nvp, NULL, SATA_PKT_PORT_ERROR);
2842
2843		} else if (nv_slotp->nvslot_flags == NVSLOT_COMPLETE) {
2844
2845			nv_complete_io(nvp, spkt, 0);
2846		}
2847
2848		mutex_exit(&nvp->nvp_mutex);
2849	}
2850
2851	/*
2852	 * mcp04 often doesn't correctly distinguish hot add/remove
2853	 * interrupts.  Frequently both the ADD and the REMOVE bits
2854	 * are asserted, whether it was a remove or add.  Use sstatus
2855	 * to distinguish hot add from hot remove.
2856	 */
2857
2858	for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
2859		clear_bits = 0;
2860
2861		nvp = &(nvc->nvc_port[port]);
2862		mutex_enter(&nvp->nvp_mutex);
2863
2864		if ((port_mask_pm[port] & intr_status) != 0) {
2865			clear_bits = port_mask_pm[port];
2866			NVLOG((NVDBG_HOT, nvc, nvp,
2867			    "clearing PM interrupt bit: %x",
2868			    intr_status & port_mask_pm[port]));
2869		}
2870
2871		if ((port_mask_hot[port] & intr_status) == 0) {
2872			if (clear_bits != 0) {
2873				goto clear;
2874			} else {
2875				mutex_exit(&nvp->nvp_mutex);
2876				continue;
2877			}
2878		}
2879
2880		/*
2881		 * reaching here means there was a hot add or remove.
2882		 */
2883		clear_bits |= port_mask_hot[port];
2884
2885		ASSERT(nvc->nvc_port[port].nvp_sstatus);
2886
2887		sstatus = nv_get32(bar5_hdl,
2888		    nvc->nvc_port[port].nvp_sstatus);
2889
2890		if ((sstatus & SSTATUS_DET_DEVPRE_PHYCOM) ==
2891		    SSTATUS_DET_DEVPRE_PHYCOM) {
2892			nv_report_add_remove(nvp, 0);
2893		} else {
2894			nv_report_add_remove(nvp, NV_PORT_HOTREMOVED);
2895		}
2896	clear:
2897		/*
2898		 * clear interrupt bits.  explicit interrupt clear is
2899		 * required for hotplug interrupts.
2900		 */
2901		nv_put8(bar5_hdl, nvc->nvc_mcp04_int_status, clear_bits);
2902
2903		/*
2904		 * make sure it's flushed and cleared.  If not try
2905		 * again.  Sometimes it has been observed to not clear
2906		 * on the first try.
2907		 */
2908		intr_status = nv_get8(bar5_hdl, nvc->nvc_mcp04_int_status);
2909
2910		/*
2911		 * make 10 additional attempts to clear the interrupt
2912		 */
2913		for (i = 0; (intr_status & clear_bits) && (i < 10); i++) {
2914			NVLOG((NVDBG_ALWAYS, nvc, nvp, "inst_status=%x "
2915			    "still not clear try=%d", intr_status,
2916			    ++nvcleared));
2917			nv_put8(bar5_hdl, nvc->nvc_mcp04_int_status,
2918			    clear_bits);
2919			intr_status = nv_get8(bar5_hdl,
2920			    nvc->nvc_mcp04_int_status);
2921		}
2922
2923		/*
2924		 * if still not clear, log a message and disable the
2925		 * port. highly unlikely that this path is taken, but it
2926		 * gives protection against a wedged interrupt.
2927		 */
2928		if (intr_status & clear_bits) {
2929			(*(nvc->nvc_set_intr))(nvp, NV_INTR_DISABLE);
2930			nv_port_state_change(nvp, SATA_EVNT_PORT_FAILED,
2931			    SATA_ADDR_CPORT, SATA_PSTATE_FAILED);
2932			nvp->nvp_state |= NV_PORT_FAILED;
2933			(void) nv_abort_active(nvp, NULL, SATA_PKT_DEV_ERROR);
2934			nv_cmn_err(CE_WARN, nvc, nvp, "unable to clear "
2935			    "interrupt.  disabling port intr_status=%X",
2936			    intr_status);
2937		}
2938
2939		mutex_exit(&nvp->nvp_mutex);
2940	}
2941}
2942
2943
2944/*
2945 * Interrupt handler for mcp55.  It is invoked by the wrapper for each port
2946 * on the controller, to handle completion and hot plug and remove events.
2947 *
2948 */
2949static uint_t
2950mcp55_intr_port(nv_port_t *nvp)
2951{
2952	nv_ctl_t *nvc = nvp->nvp_ctlp;
2953	ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5];
2954	uint8_t clear = 0, intr_cycles = 0;
2955	int ret = DDI_INTR_UNCLAIMED;
2956	uint16_t int_status;
2957
2958	NVLOG((NVDBG_INTR, nvc, nvp, "mcp55_intr_port entered"));
2959
2960	for (;;) {
2961		/*
2962		 * read current interrupt status
2963		 */
2964		int_status = nv_get16(bar5_hdl, nvp->nvp_mcp55_int_status);
2965
2966		NVLOG((NVDBG_INTR, nvc, nvp, "int_status = %x", int_status));
2967
2968		/*
2969		 * MCP55_INT_IGNORE interrupts will show up in the status,
2970		 * but are masked out from causing an interrupt to be generated
2971		 * to the processor.  Ignore them here by masking them out.
2972		 */
2973		int_status &= ~(MCP55_INT_IGNORE);
2974
2975		/*
2976		 * exit the loop when no more interrupts to process
2977		 */
2978		if (int_status == 0) {
2979
2980			break;
2981		}
2982
2983		if (int_status & MCP55_INT_COMPLETE) {
2984			NVLOG((NVDBG_INTR, nvc, nvp,
2985			    "mcp55_packet_complete_intr"));
2986			/*
2987			 * since int_status was set, return DDI_INTR_CLAIMED
2988			 * from the DDI's perspective even though the packet
2989			 * completion may not have succeeded.  If it fails,
2990			 * need to manually clear the interrupt, otherwise
2991			 * clearing is implicit.
2992			 */
2993			ret = DDI_INTR_CLAIMED;
2994			if (mcp55_packet_complete_intr(nvc, nvp) ==
2995			    NV_FAILURE) {
2996				clear = MCP55_INT_COMPLETE;
2997			} else {
2998				intr_cycles = 0;
2999			}
3000		}
3001
3002		if (int_status & MCP55_INT_DMA_SETUP) {
3003			NVLOG((NVDBG_INTR, nvc, nvp, "mcp55_dma_setup_intr"));
3004
3005			/*
3006			 * Needs to be cleared before starting the BM, so do it
3007			 * now.  make sure this is still working.
3008			 */
3009			nv_put16(bar5_hdl, nvp->nvp_mcp55_int_status,
3010			    MCP55_INT_DMA_SETUP);
3011#ifdef NCQ
3012			ret = mcp55_dma_setup_intr(nvc, nvp);
3013#endif
3014		}
3015
3016		if (int_status & MCP55_INT_REM) {
3017			NVLOG((NVDBG_INTR, nvc, nvp, "mcp55 device removed"));
3018			clear = MCP55_INT_REM;
3019			ret = DDI_INTR_CLAIMED;
3020
3021			mutex_enter(&nvp->nvp_mutex);
3022			nv_report_add_remove(nvp, NV_PORT_HOTREMOVED);
3023			mutex_exit(&nvp->nvp_mutex);
3024
3025		} else if (int_status & MCP55_INT_ADD) {
3026			NVLOG((NVDBG_HOT, nvc, nvp, "mcp55 device added"));
3027			clear = MCP55_INT_ADD;
3028			ret = DDI_INTR_CLAIMED;
3029
3030			mutex_enter(&nvp->nvp_mutex);
3031			nv_report_add_remove(nvp, 0);
3032			mutex_exit(&nvp->nvp_mutex);
3033		}
3034
3035		if (clear) {
3036			nv_put16(bar5_hdl, nvp->nvp_mcp55_int_status, clear);
3037			clear = 0;
3038		}
3039
3040		if (intr_cycles++ == NV_MAX_INTR_LOOP) {
3041			nv_cmn_err(CE_WARN, nvc, nvp, "excessive interrupt "
3042			    "processing.  Disabling port int_status=%X"
3043			    " clear=%X", int_status, clear);
3044			mutex_enter(&nvp->nvp_mutex);
3045			(*(nvc->nvc_set_intr))(nvp, NV_INTR_DISABLE);
3046			nv_port_state_change(nvp, SATA_EVNT_PORT_FAILED,
3047			    SATA_ADDR_CPORT, SATA_PSTATE_FAILED);
3048			nvp->nvp_state |= NV_PORT_FAILED;
3049			(void) nv_abort_active(nvp, NULL, SATA_PKT_DEV_ERROR);
3050			mutex_exit(&nvp->nvp_mutex);
3051		}
3052	}
3053
3054	NVLOG((NVDBG_INTR, nvc, nvp, "mcp55_intr_port: finished ret=%d", ret));
3055
3056	return (ret);
3057}
3058
3059
3060/* ARGSUSED */
3061static uint_t
3062mcp55_intr(caddr_t arg1, caddr_t arg2)
3063{
3064	nv_ctl_t *nvc = (nv_ctl_t *)arg1;
3065	int ret;
3066
3067	ret = mcp55_intr_port(&(nvc->nvc_port[0]));
3068	ret |= mcp55_intr_port(&(nvc->nvc_port[1]));
3069
3070	return (ret);
3071}
3072
3073
3074#ifdef NCQ
3075/*
3076 * with software driven NCQ on mcp55, an interrupt occurs right
3077 * before the drive is ready to do a DMA transfer.  At this point,
3078 * the PRD table needs to be programmed and the DMA engine enabled
3079 * and ready to go.
3080 *
3081 * -- MCP_SATA_AE_INT_STATUS_SDEV_DMA_SETUP indicates the interrupt
3082 * -- MCP_SATA_AE_NCQ_PDEV_DMA_SETUP_TAG shows which command is ready
3083 * -- clear bit 0 of master command reg
3084 * -- program PRD
3085 * -- clear the interrupt status bit for the DMA Setup FIS
3086 * -- set bit 0 of the bus master command register
3087 */
3088static int
3089mcp55_dma_setup_intr(nv_ctl_t *nvc, nv_port_t *nvp)
3090{
3091	int slot;
3092	ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl;
3093	uint8_t bmicx;
3094	int port = nvp->nvp_port_num;
3095	uint8_t tag_shift[] = {MCP_SATA_AE_NCQ_PDEV_DMA_SETUP_TAG_SHIFT,
3096	    MCP_SATA_AE_NCQ_SDEV_DMA_SETUP_TAG_SHIFT};
3097
3098	nv_cmn_err(CE_PANIC, nvc, nvp,
3099	    "this is should not be executed at all until NCQ");
3100
3101	mutex_enter(&nvp->nvp_mutex);
3102
3103	slot = nv_get32(nvc->nvc_bar_hdl[5], nvc->nvc_mcp55_ncq);
3104
3105	slot = (slot >> tag_shift[port]) & MCP_SATA_AE_NCQ_DMA_SETUP_TAG_MASK;
3106
3107	NVLOG((NVDBG_INTR, nvc, nvp, "mcp55_dma_setup_intr slot %d"
3108	    " nvp_slot_sactive %X", slot, nvp->nvp_sactive_cache));
3109
3110	/*
3111	 * halt the DMA engine.  This step is necessary according to
3112	 * the mcp55 spec, probably since there may have been a "first" packet
3113	 * that already programmed the DMA engine, but may not turn out to
3114	 * be the first one processed.
3115	 */
3116	bmicx = nv_get8(bmhdl, nvp->nvp_bmicx);
3117
3118#if 0
3119	if (bmicx & BMICX_SSBM) {
3120		NVLOG((NVDBG_INTR, nvc, nvp, "BM was already enabled for "
3121		    "another packet.  Cancelling and reprogramming"));
3122		nv_put8(bmhdl, nvp->nvp_bmicx,  bmicx & ~BMICX_SSBM);
3123	}
3124#endif
3125	nv_put8(bmhdl, nvp->nvp_bmicx,  bmicx & ~BMICX_SSBM);
3126
3127	nv_start_dma_engine(nvp, slot);
3128
3129	mutex_exit(&nvp->nvp_mutex);
3130
3131	return (DDI_INTR_CLAIMED);
3132}
3133#endif /* NCQ */
3134
3135
3136/*
3137 * packet completion interrupt.  If the packet is complete, invoke
3138 * the packet completion callback.
3139 */
3140static int
3141mcp55_packet_complete_intr(nv_ctl_t *nvc, nv_port_t *nvp)
3142{
3143	uint8_t status, bmstatus;
3144	ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl;
3145	int sactive;
3146	int active_pkt_bit = 0, active_pkt = 0, ncq_command = B_FALSE;
3147	sata_pkt_t *spkt;
3148	nv_slot_t *nv_slotp;
3149
3150	mutex_enter(&nvp->nvp_mutex);
3151
3152	bmstatus = nv_get8(bmhdl, nvp->nvp_bmisx);
3153
3154	if (!(bmstatus & BMISX_IDEINTS)) {
3155		NVLOG((NVDBG_INTR, nvc, nvp, "BMISX_IDEINTS not set"));
3156		mutex_exit(&nvp->nvp_mutex);
3157
3158		return (NV_FAILURE);
3159	}
3160
3161	/*
3162	 * If the just completed item is a non-ncq command, the busy
3163	 * bit should not be set
3164	 */
3165	if (nvp->nvp_non_ncq_run) {
3166		status = nv_get8(nvp->nvp_ctl_hdl, nvp->nvp_altstatus);
3167		if (status & SATA_STATUS_BSY) {
3168			nv_cmn_err(CE_WARN, nvc, nvp,
3169			    "unexpected SATA_STATUS_BSY set");
3170			mutex_exit(&nvp->nvp_mutex);
3171			/*
3172			 * calling function will clear interrupt.  then
3173			 * the real interrupt will either arrive or the
3174			 * packet timeout handling will take over and
3175			 * reset.
3176			 */
3177			return (NV_FAILURE);
3178		}
3179
3180	} else {
3181		/*
3182		 * NCQ check for BSY here and wait if still bsy before
3183		 * continuing. Rather than wait for it to be cleared
3184		 * when starting a packet and wasting CPU time, the starting
3185		 * thread can exit immediate, but might have to spin here
3186		 * for a bit possibly.  Needs more work and experimentation.
3187		 */
3188		ASSERT(nvp->nvp_ncq_run);
3189	}
3190
3191
3192	if (nvp->nvp_ncq_run) {
3193		ncq_command = B_TRUE;
3194		ASSERT(nvp->nvp_non_ncq_run == 0);
3195	} else {
3196		ASSERT(nvp->nvp_non_ncq_run != 0);
3197	}
3198
3199	/*
3200	 * active_pkt_bit will represent the bitmap of the single completed
3201	 * packet.  Because of the nature of sw assisted NCQ, only one
3202	 * command will complete per interrupt.
3203	 */
3204
3205	if (ncq_command == B_FALSE) {
3206		active_pkt = 0;
3207	} else {
3208		/*
3209		 * NCQ: determine which command just completed, by examining
3210		 * which bit cleared in the register since last written.
3211		 */
3212		sactive = nv_get32(nvc->nvc_bar_hdl[5], nvp->nvp_sactive);
3213
3214		active_pkt_bit = ~sactive & nvp->nvp_sactive_cache;
3215
3216		ASSERT(active_pkt_bit);
3217
3218
3219		/*
3220		 * this failure path needs more work to handle the
3221		 * error condition and recovery.
3222		 */
3223		if (active_pkt_bit == 0) {
3224			ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
3225
3226			nv_cmn_err(CE_CONT, nvc, nvp, "ERROR sactive = %X  "
3227			    "nvp->nvp_sactive %X", sactive,
3228			    nvp->nvp_sactive_cache);
3229
3230			(void) nv_get8(cmdhdl, nvp->nvp_status);
3231
3232			mutex_exit(&nvp->nvp_mutex);
3233
3234			return (NV_FAILURE);
3235		}
3236
3237		for (active_pkt = 0; (active_pkt_bit & 0x1) != 0x1;
3238		    active_pkt++, active_pkt_bit >>= 1) {
3239		}
3240
3241		/*
3242		 * make sure only one bit is ever turned on
3243		 */
3244		ASSERT(active_pkt_bit == 1);
3245
3246		nvp->nvp_sactive_cache &= ~(0x01 << active_pkt);
3247	}
3248
3249	nv_slotp = &(nvp->nvp_slot[active_pkt]);
3250
3251	spkt = nv_slotp->nvslot_spkt;
3252
3253	ASSERT(spkt != NULL);
3254
3255	(*nv_slotp->nvslot_intr)(nvp, nv_slotp);
3256
3257	nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
3258
3259	/*
3260	 * If there is no link cannot be certain about the completion
3261	 * of the packet, so abort it.
3262	 */
3263	if (nv_check_link((&spkt->satapkt_device)->
3264	    satadev_scr.sstatus) == B_FALSE) {
3265		(void) nv_abort_active(nvp, NULL, SATA_PKT_PORT_ERROR);
3266
3267	} else if (nv_slotp->nvslot_flags == NVSLOT_COMPLETE) {
3268
3269		nv_complete_io(nvp, spkt, active_pkt);
3270	}
3271
3272	mutex_exit(&nvp->nvp_mutex);
3273
3274	return (NV_SUCCESS);
3275}
3276
3277
3278static void
3279nv_complete_io(nv_port_t *nvp, sata_pkt_t *spkt, int slot)
3280{
3281
3282	ASSERT(MUTEX_HELD(&nvp->nvp_mutex));
3283
3284	if ((&(nvp->nvp_slot[slot]))->nvslot_flags & NVSLOT_NCQ) {
3285		nvp->nvp_ncq_run--;
3286	} else {
3287		nvp->nvp_non_ncq_run--;
3288	}
3289
3290	/*
3291	 * mark the packet slot idle so it can be reused.  Do this before
3292	 * calling satapkt_comp so the slot can be reused.
3293	 */
3294	(&(nvp->nvp_slot[slot]))->nvslot_spkt = NULL;
3295
3296	if (spkt->satapkt_op_mode & SATA_OPMODE_SYNCH) {
3297		/*
3298		 * If this is not timed polled mode cmd, which has an
3299		 * active thread monitoring for completion, then need
3300		 * to signal the sleeping thread that the cmd is complete.
3301		 */
3302		if ((spkt->satapkt_op_mode & SATA_OPMODE_POLLING) == 0) {
3303			cv_signal(&nvp->nvp_poll_cv);
3304		}
3305
3306		return;
3307	}
3308
3309	if (spkt->satapkt_comp != NULL) {
3310		mutex_exit(&nvp->nvp_mutex);
3311		(*spkt->satapkt_comp)(spkt);
3312		mutex_enter(&nvp->nvp_mutex);
3313	}
3314}
3315
3316
3317/*
3318 * check whether packet is ncq command or not.  for ncq command,
3319 * start it if there is still room on queue.  for non-ncq command only
3320 * start if no other command is running.
3321 */
3322static int
3323nv_start_async(nv_port_t *nvp, sata_pkt_t *spkt)
3324{
3325	uint8_t cmd, ncq;
3326
3327	NVLOG((NVDBG_ENTRY, nvp->nvp_ctlp, nvp, "nv_start_async: entry"));
3328
3329	cmd = spkt->satapkt_cmd.satacmd_cmd_reg;
3330
3331	ncq = ((cmd == SATAC_WRITE_FPDMA_QUEUED) ||
3332	    (cmd == SATAC_READ_FPDMA_QUEUED));
3333
3334	if (ncq == B_FALSE) {
3335
3336		if ((nvp->nvp_non_ncq_run == 1) ||
3337		    (nvp->nvp_ncq_run > 0)) {
3338			/*
3339			 * next command is non-ncq which can't run
3340			 * concurrently.  exit and return queue full.
3341			 */
3342			spkt->satapkt_reason = SATA_PKT_QUEUE_FULL;
3343
3344			return (SATA_TRAN_QUEUE_FULL);
3345		}
3346
3347		return (nv_start_common(nvp, spkt));
3348	}
3349
3350	/*
3351	 * ncq == B_TRUE
3352	 */
3353	if (nvp->nvp_non_ncq_run == 1) {
3354		/*
3355		 * cannot start any NCQ commands when there
3356		 * is a non-NCQ command running.
3357		 */
3358		spkt->satapkt_reason = SATA_PKT_QUEUE_FULL;
3359
3360		return (SATA_TRAN_QUEUE_FULL);
3361	}
3362
3363#ifdef NCQ
3364	/*
3365	 * this is not compiled for now as satapkt_device.satadev_qdepth
3366	 * is being pulled out until NCQ support is later addressed
3367	 *
3368	 * nvp_queue_depth is initialized by the first NCQ command
3369	 * received.
3370	 */
3371	if (nvp->nvp_queue_depth == 1) {
3372		nvp->nvp_queue_depth =
3373		    spkt->satapkt_device.satadev_qdepth;
3374
3375		ASSERT(nvp->nvp_queue_depth > 1);
3376
3377		NVLOG((NVDBG_ENTRY, nvp->nvp_ctlp, nvp,
3378		    "nv_process_queue: nvp_queue_depth set to %d",
3379		    nvp->nvp_queue_depth));
3380	}
3381#endif
3382
3383	if (nvp->nvp_ncq_run >= nvp->nvp_queue_depth) {
3384		/*
3385		 * max number of NCQ commands already active
3386		 */
3387		spkt->satapkt_reason = SATA_PKT_QUEUE_FULL;
3388
3389		return (SATA_TRAN_QUEUE_FULL);
3390	}
3391
3392	return (nv_start_common(nvp, spkt));
3393}
3394
3395
3396/*
3397 * configure INTx and legacy interrupts
3398 */
3399static int
3400nv_add_legacy_intrs(nv_ctl_t *nvc)
3401{
3402	dev_info_t	*devinfo = nvc->nvc_dip;
3403	int		actual, count = 0;
3404	int		x, y, rc, inum = 0;
3405
3406	NVLOG((NVDBG_ENTRY, nvc, NULL, "nv_add_legacy_intrs"));
3407
3408	/*
3409	 * get number of interrupts
3410	 */
3411	rc = ddi_intr_get_nintrs(devinfo, DDI_INTR_TYPE_FIXED, &count);
3412	if ((rc != DDI_SUCCESS) || (count == 0)) {
3413		NVLOG((NVDBG_INTR, nvc, NULL,
3414		    "ddi_intr_get_nintrs() failed, "
3415		    "rc %d count %d", rc, count));
3416
3417		return (DDI_FAILURE);
3418	}
3419
3420	/*
3421	 * allocate an array of interrupt handles
3422	 */
3423	nvc->nvc_intr_size = count * sizeof (ddi_intr_handle_t);
3424	nvc->nvc_htable = kmem_zalloc(nvc->nvc_intr_size, KM_SLEEP);
3425
3426	/*
3427	 * call ddi_intr_alloc()
3428	 */
3429	rc = ddi_intr_alloc(devinfo, nvc->nvc_htable, DDI_INTR_TYPE_FIXED,
3430	    inum, count, &actual, DDI_INTR_ALLOC_STRICT);
3431
3432	if ((rc != DDI_SUCCESS) || (actual == 0)) {
3433		nv_cmn_err(CE_WARN, nvc, NULL,
3434		    "ddi_intr_alloc() failed, rc %d", rc);
3435		kmem_free(nvc->nvc_htable, nvc->nvc_intr_size);
3436
3437		return (DDI_FAILURE);
3438	}
3439
3440	if (actual < count) {
3441		nv_cmn_err(CE_WARN, nvc, NULL,
3442		    "ddi_intr_alloc: requested: %d, received: %d",
3443		    count, actual);
3444
3445		goto failure;
3446	}
3447
3448	nvc->nvc_intr_cnt = actual;
3449
3450	/*
3451	 * get intr priority
3452	 */
3453	if (ddi_intr_get_pri(nvc->nvc_htable[0], &nvc->nvc_intr_pri) !=
3454	    DDI_SUCCESS) {
3455		nv_cmn_err(CE_WARN, nvc, NULL, "ddi_intr_get_pri() failed");
3456
3457		goto failure;
3458	}
3459
3460	/*
3461	 * Test for high level mutex
3462	 */
3463	if (nvc->nvc_intr_pri >= ddi_intr_get_hilevel_pri()) {
3464		nv_cmn_err(CE_WARN, nvc, NULL,
3465		    "nv_add_legacy_intrs: high level intr not supported");
3466
3467		goto failure;
3468	}
3469
3470	for (x = 0; x < actual; x++) {
3471		if (ddi_intr_add_handler(nvc->nvc_htable[x],
3472		    nvc->nvc_interrupt, (caddr_t)nvc, NULL) != DDI_SUCCESS) {
3473			nv_cmn_err(CE_WARN, nvc, NULL,
3474			    "ddi_intr_add_handler() failed");
3475
3476			goto failure;
3477		}
3478	}
3479
3480	/*
3481	 * call ddi_intr_enable() for legacy interrupts
3482	 */
3483	for (x = 0; x < nvc->nvc_intr_cnt; x++) {
3484		(void) ddi_intr_enable(nvc->nvc_htable[x]);
3485	}
3486
3487	return (DDI_SUCCESS);
3488
3489	failure:
3490	/*
3491	 * free allocated intr and nvc_htable
3492	 */
3493	for (y = 0; y < actual; y++) {
3494		(void) ddi_intr_free(nvc->nvc_htable[y]);
3495	}
3496
3497	kmem_free(nvc->nvc_htable, nvc->nvc_intr_size);
3498
3499	return (DDI_FAILURE);
3500}
3501
3502#ifdef	NV_MSI_SUPPORTED
3503/*
3504 * configure MSI interrupts
3505 */
3506static int
3507nv_add_msi_intrs(nv_ctl_t *nvc)
3508{
3509	dev_info_t	*devinfo = nvc->nvc_dip;
3510	int		count, avail, actual;
3511	int		x, y, rc, inum = 0;
3512
3513	NVLOG((NVDBG_ENTRY, nvc, NULL, "nv_add_msi_intrs"));
3514
3515	/*
3516	 * get number of interrupts
3517	 */
3518	rc = ddi_intr_get_nintrs(devinfo, DDI_INTR_TYPE_MSI, &count);
3519	if ((rc != DDI_SUCCESS) || (count == 0)) {
3520		nv_cmn_err(CE_WARN, nvc, NULL,
3521		    "ddi_intr_get_nintrs() failed, "
3522		    "rc %d count %d", rc, count);
3523
3524		return (DDI_FAILURE);
3525	}
3526
3527	/*
3528	 * get number of available interrupts
3529	 */
3530	rc = ddi_intr_get_navail(devinfo, DDI_INTR_TYPE_MSI, &avail);
3531	if ((rc != DDI_SUCCESS) || (avail == 0)) {
3532		nv_cmn_err(CE_WARN, nvc, NULL,
3533		    "ddi_intr_get_navail() failed, "
3534		    "rc %d avail %d", rc, avail);
3535
3536		return (DDI_FAILURE);
3537	}
3538
3539	if (avail < count) {
3540		nv_cmn_err(CE_WARN, nvc, NULL,
3541		    "ddi_intr_get_nvail returned %d ddi_intr_get_nintrs: %d",
3542		    avail, count);
3543	}
3544
3545	/*
3546	 * allocate an array of interrupt handles
3547	 */
3548	nvc->nvc_intr_size = count * sizeof (ddi_intr_handle_t);
3549	nvc->nvc_htable = kmem_alloc(nvc->nvc_intr_size, KM_SLEEP);
3550
3551	rc = ddi_intr_alloc(devinfo, nvc->nvc_htable, DDI_INTR_TYPE_MSI,
3552	    inum, count, &actual, DDI_INTR_ALLOC_NORMAL);
3553
3554	if ((rc != DDI_SUCCESS) || (actual == 0)) {
3555		nv_cmn_err(CE_WARN, nvc, NULL,
3556		    "ddi_intr_alloc() failed, rc %d", rc);
3557		kmem_free(nvc->nvc_htable, nvc->nvc_intr_size);
3558
3559		return (DDI_FAILURE);
3560	}
3561
3562	/*
3563	 * Use interrupt count returned or abort?
3564	 */
3565	if (actual < count) {
3566		NVLOG((NVDBG_INIT, nvc, NULL,
3567		    "Requested: %d, Received: %d", count, actual));
3568	}
3569
3570	nvc->nvc_intr_cnt = actual;
3571
3572	/*
3573	 * get priority for first msi, assume remaining are all the same
3574	 */
3575	if (ddi_intr_get_pri(nvc->nvc_htable[0], &nvc->nvc_intr_pri) !=
3576	    DDI_SUCCESS) {
3577		nv_cmn_err(CE_WARN, nvc, NULL, "ddi_intr_get_pri() failed");
3578
3579		goto failure;
3580	}
3581
3582	/*
3583	 * test for high level mutex
3584	 */
3585	if (nvc->nvc_intr_pri >= ddi_intr_get_hilevel_pri()) {
3586		nv_cmn_err(CE_WARN, nvc, NULL,
3587		    "nv_add_msi_intrs: high level intr not supported");
3588
3589		goto failure;
3590	}
3591
3592	/*
3593	 * Call ddi_intr_add_handler()
3594	 */
3595	for (x = 0; x < actual; x++) {
3596		if (ddi_intr_add_handler(nvc->nvc_htable[x],
3597		    nvc->nvc_interrupt, (caddr_t)nvc, NULL) != DDI_SUCCESS) {
3598			nv_cmn_err(CE_WARN, nvc, NULL,
3599			    "ddi_intr_add_handler() failed");
3600
3601			goto failure;
3602		}
3603	}
3604
3605	(void) ddi_intr_get_cap(nvc->nvc_htable[0], &nvc->nvc_intr_cap);
3606
3607	if (nvc->nvc_intr_cap & DDI_INTR_FLAG_BLOCK) {
3608		(void) ddi_intr_block_enable(nvc->nvc_htable,
3609		    nvc->nvc_intr_cnt);
3610	} else {
3611		/*
3612		 * Call ddi_intr_enable() for MSI non block enable
3613		 */
3614		for (x = 0; x < nvc->nvc_intr_cnt; x++) {
3615			(void) ddi_intr_enable(nvc->nvc_htable[x]);
3616		}
3617	}
3618
3619	return (DDI_SUCCESS);
3620
3621	failure:
3622	/*
3623	 * free allocated intr and nvc_htable
3624	 */
3625	for (y = 0; y < actual; y++) {
3626		(void) ddi_intr_free(nvc->nvc_htable[y]);
3627	}
3628
3629	kmem_free(nvc->nvc_htable, nvc->nvc_intr_size);
3630
3631	return (DDI_FAILURE);
3632}
3633#endif
3634
3635
3636static void
3637nv_rem_intrs(nv_ctl_t *nvc)
3638{
3639	int x, i;
3640	nv_port_t *nvp;
3641
3642	NVLOG((NVDBG_ENTRY, nvc, NULL, "nv_rem_intrs"));
3643
3644	/*
3645	 * prevent controller from generating interrupts by
3646	 * masking them out.  This is an extra precaution.
3647	 */
3648	for (i = 0; i < NV_MAX_PORTS(nvc); i++) {
3649		nvp = (&nvc->nvc_port[i]);
3650		mutex_enter(&nvp->nvp_mutex);
3651		(*(nvc->nvc_set_intr))(nvp, NV_INTR_DISABLE);
3652		mutex_exit(&nvp->nvp_mutex);
3653	}
3654
3655	/*
3656	 * disable all interrupts
3657	 */
3658	if ((nvc->nvc_intr_type == DDI_INTR_TYPE_MSI) &&
3659	    (nvc->nvc_intr_cap & DDI_INTR_FLAG_BLOCK)) {
3660		(void) ddi_intr_block_disable(nvc->nvc_htable,
3661		    nvc->nvc_intr_cnt);
3662	} else {
3663		for (x = 0; x < nvc->nvc_intr_cnt; x++) {
3664			(void) ddi_intr_disable(nvc->nvc_htable[x]);
3665		}
3666	}
3667
3668	for (x = 0; x < nvc->nvc_intr_cnt; x++) {
3669		(void) ddi_intr_remove_handler(nvc->nvc_htable[x]);
3670		(void) ddi_intr_free(nvc->nvc_htable[x]);
3671	}
3672
3673	kmem_free(nvc->nvc_htable, nvc->nvc_intr_size);
3674}
3675
3676
3677/*
3678 * variable argument wrapper for cmn_err.  prefixes the instance and port
3679 * number if possible
3680 */
3681static void
3682nv_vcmn_err(int ce, nv_ctl_t *nvc, nv_port_t *nvp, char *fmt, va_list ap)
3683{
3684	char port[NV_STRING_10];
3685	char inst[NV_STRING_10];
3686
3687	mutex_enter(&nv_log_mutex);
3688
3689	if (nvc) {
3690		(void) snprintf(inst, NV_STRING_10, "inst %d",
3691		    ddi_get_instance(nvc->nvc_dip));
3692	} else {
3693		inst[0] = '\0';
3694	}
3695
3696	if (nvp) {
3697		(void) sprintf(port, " port %d", nvp->nvp_port_num);
3698	} else {
3699		port[0] = '\0';
3700	}
3701
3702	(void) sprintf(nv_log_buf, "nv_sata %s%s%s", inst, port,
3703	    (inst[0]|port[0] ? ": " :""));
3704
3705	(void) vsnprintf(&nv_log_buf[strlen(nv_log_buf)],
3706	    NV_STRING_512 - strlen(nv_log_buf), fmt, ap);
3707
3708	/*
3709	 * normally set to log to console but in some debug situations it
3710	 * may be useful to log only to a file.
3711	 */
3712	if (nv_log_to_console) {
3713		if (nv_prom_print) {
3714			prom_printf("%s\n", nv_log_buf);
3715		} else {
3716			cmn_err(ce, "%s", nv_log_buf);
3717		}
3718
3719
3720	} else {
3721		cmn_err(ce, "!%s", nv_log_buf);
3722	}
3723
3724	mutex_exit(&nv_log_mutex);
3725}
3726
3727
3728/*
3729 * wrapper for cmn_err
3730 */
3731static void
3732nv_cmn_err(int ce, nv_ctl_t *nvc, nv_port_t *nvp, char *fmt, ...)
3733{
3734	va_list ap;
3735
3736	va_start(ap, fmt);
3737	nv_vcmn_err(ce, nvc, nvp, fmt, ap);
3738	va_end(ap);
3739}
3740
3741
3742#if defined(DEBUG)
3743/*
3744 * prefixes the instance and port number if possible to the debug message
3745 */
3746static void
3747nv_log(uint_t flag, nv_ctl_t *nvc, nv_port_t *nvp, char *fmt, ...)
3748{
3749	va_list ap;
3750
3751	if ((nv_debug_flags & flag) == 0) {
3752		return;
3753	}
3754
3755	va_start(ap, fmt);
3756	nv_vcmn_err(CE_NOTE, nvc, nvp, fmt, ap);
3757	va_end(ap);
3758
3759	/*
3760	 * useful for some debugging situations
3761	 */
3762	if (nv_log_delay) {
3763		drv_usecwait(nv_log_delay);
3764	}
3765
3766}
3767#endif /* DEBUG */
3768
3769
3770/*
3771 * program registers which are common to all commands
3772 */
3773static void
3774nv_program_taskfile_regs(nv_port_t *nvp, int slot)
3775{
3776	nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
3777	sata_pkt_t *spkt;
3778	sata_cmd_t *satacmd;
3779	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
3780	uint8_t cmd, ncq = B_FALSE;
3781
3782	spkt = nv_slotp->nvslot_spkt;
3783	satacmd = &spkt->satapkt_cmd;
3784	cmd = satacmd->satacmd_cmd_reg;
3785
3786	ASSERT(nvp->nvp_slot);
3787
3788	if ((cmd == SATAC_WRITE_FPDMA_QUEUED) ||
3789	    (cmd == SATAC_READ_FPDMA_QUEUED)) {
3790		ncq = B_TRUE;
3791	}
3792
3793	/*
3794	 * select the drive
3795	 */
3796	nv_put8(cmdhdl, nvp->nvp_drvhd, satacmd->satacmd_device_reg);
3797
3798	/*
3799	 * make certain the drive selected
3800	 */
3801	if (nv_wait(nvp, SATA_STATUS_DRDY, SATA_STATUS_BSY,
3802	    NV_SEC2USEC(5), 0) == B_FALSE) {
3803
3804		return;
3805	}
3806
3807	switch (spkt->satapkt_cmd.satacmd_addr_type) {
3808
3809	case ATA_ADDR_LBA:
3810		NVLOG((NVDBG_DELIVER, nvp->nvp_ctlp, nvp, "ATA_ADDR_LBA mode"));
3811
3812		nv_put8(cmdhdl, nvp->nvp_count, satacmd->satacmd_sec_count_lsb);
3813		nv_put8(cmdhdl, nvp->nvp_hcyl, satacmd->satacmd_lba_high_lsb);
3814		nv_put8(cmdhdl, nvp->nvp_lcyl, satacmd->satacmd_lba_mid_lsb);
3815		nv_put8(cmdhdl, nvp->nvp_sect, satacmd->satacmd_lba_low_lsb);
3816
3817		break;
3818
3819	case ATA_ADDR_LBA28:
3820		NVLOG((NVDBG_DELIVER, nvp->nvp_ctlp, nvp,
3821		    "ATA_ADDR_LBA28 mode"));
3822		/*
3823		 * NCQ only uses 48-bit addressing
3824		 */
3825		ASSERT(ncq != B_TRUE);
3826
3827		nv_put8(cmdhdl, nvp->nvp_count, satacmd->satacmd_sec_count_lsb);
3828		nv_put8(cmdhdl, nvp->nvp_hcyl, satacmd->satacmd_lba_high_lsb);
3829		nv_put8(cmdhdl, nvp->nvp_lcyl, satacmd->satacmd_lba_mid_lsb);
3830		nv_put8(cmdhdl, nvp->nvp_sect, satacmd->satacmd_lba_low_lsb);
3831
3832		break;
3833
3834	case ATA_ADDR_LBA48:
3835		NVLOG((NVDBG_DELIVER, nvp->nvp_ctlp, nvp,
3836		    "ATA_ADDR_LBA48 mode"));
3837
3838		/*
3839		 * for NCQ, tag goes into count register and real sector count
3840		 * into features register.  The sata module does the translation
3841		 * in the satacmd.
3842		 */
3843		if (ncq == B_TRUE) {
3844			nv_put8(cmdhdl, nvp->nvp_count, slot << 3);
3845			nv_put8(cmdhdl, nvp->nvp_feature,
3846			    satacmd->satacmd_features_reg_ext);
3847			nv_put8(cmdhdl, nvp->nvp_feature,
3848			    satacmd->satacmd_features_reg);
3849		} else {
3850			nv_put8(cmdhdl, nvp->nvp_count,
3851			    satacmd->satacmd_sec_count_msb);
3852			nv_put8(cmdhdl, nvp->nvp_count,
3853			    satacmd->satacmd_sec_count_lsb);
3854		}
3855
3856		/*
3857		 * send the high-order half first
3858		 */
3859		nv_put8(cmdhdl, nvp->nvp_hcyl, satacmd->satacmd_lba_high_msb);
3860		nv_put8(cmdhdl, nvp->nvp_lcyl, satacmd->satacmd_lba_mid_msb);
3861		nv_put8(cmdhdl, nvp->nvp_sect, satacmd->satacmd_lba_low_msb);
3862		/*
3863		 * Send the low-order half
3864		 */
3865		nv_put8(cmdhdl, nvp->nvp_hcyl, satacmd->satacmd_lba_high_lsb);
3866		nv_put8(cmdhdl, nvp->nvp_lcyl, satacmd->satacmd_lba_mid_lsb);
3867		nv_put8(cmdhdl, nvp->nvp_sect, satacmd->satacmd_lba_low_lsb);
3868
3869		break;
3870
3871	case 0:
3872		/*
3873		 * non-media access commands such as identify and features
3874		 * take this path.
3875		 */
3876		nv_put8(cmdhdl, nvp->nvp_count, satacmd->satacmd_sec_count_lsb);
3877		nv_put8(cmdhdl, nvp->nvp_feature,
3878		    satacmd->satacmd_features_reg);
3879		nv_put8(cmdhdl, nvp->nvp_hcyl, satacmd->satacmd_lba_high_lsb);
3880		nv_put8(cmdhdl, nvp->nvp_lcyl, satacmd->satacmd_lba_mid_lsb);
3881		nv_put8(cmdhdl, nvp->nvp_sect, satacmd->satacmd_lba_low_lsb);
3882
3883		break;
3884
3885	default:
3886		break;
3887	}
3888
3889	ASSERT(nvp->nvp_slot);
3890}
3891
3892
3893/*
3894 * start a command that involves no media access
3895 */
3896static int
3897nv_start_nodata(nv_port_t *nvp, int slot)
3898{
3899	nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
3900	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
3901	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
3902	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
3903
3904	nv_program_taskfile_regs(nvp, slot);
3905
3906	/*
3907	 * This next one sets the controller in motion
3908	 */
3909	nv_put8(cmdhdl, nvp->nvp_cmd, sata_cmdp->satacmd_cmd_reg);
3910
3911	return (SATA_TRAN_ACCEPTED);
3912}
3913
3914
3915int
3916nv_bm_status_clear(nv_port_t *nvp)
3917{
3918	ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl;
3919	uchar_t	status, ret;
3920
3921	/*
3922	 * Get the current BM status
3923	 */
3924	ret = status = nv_get8(bmhdl, nvp->nvp_bmisx);
3925
3926	status = (status & BMISX_MASK) | BMISX_IDERR | BMISX_IDEINTS;
3927
3928	/*
3929	 * Clear the latches (and preserve the other bits)
3930	 */
3931	nv_put8(bmhdl, nvp->nvp_bmisx, status);
3932
3933	return (ret);
3934}
3935
3936
3937/*
3938 * program the bus master DMA engine with the PRD address for
3939 * the active slot command, and start the DMA engine.
3940 */
3941static void
3942nv_start_dma_engine(nv_port_t *nvp, int slot)
3943{
3944	nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
3945	ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl;
3946	uchar_t direction;
3947
3948	ASSERT(nv_slotp->nvslot_spkt != NULL);
3949
3950	if (nv_slotp->nvslot_spkt->satapkt_cmd.satacmd_flags.sata_data_direction
3951	    == SATA_DIR_READ) {
3952		direction = BMICX_RWCON_WRITE_TO_MEMORY;
3953	} else {
3954		direction = BMICX_RWCON_READ_FROM_MEMORY;
3955	}
3956
3957	NVLOG((NVDBG_DELIVER, nvp->nvp_ctlp, nvp,
3958	    "nv_start_dma_engine entered"));
3959
3960	/*
3961	 * reset the controller's interrupt and error status bits
3962	 */
3963	(void) nv_bm_status_clear(nvp);
3964
3965	/*
3966	 * program the PRD table physical start address
3967	 */
3968	nv_put32(bmhdl, nvp->nvp_bmidtpx, nvp->nvp_sg_paddr[slot]);
3969
3970	/*
3971	 * set the direction control and start the DMA controller
3972	 */
3973	nv_put8(bmhdl, nvp->nvp_bmicx, direction | BMICX_SSBM);
3974}
3975
3976/*
3977 * start dma command, either in or out
3978 */
3979static int
3980nv_start_dma(nv_port_t *nvp, int slot)
3981{
3982	nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
3983	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
3984	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
3985	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
3986	uint8_t cmd = sata_cmdp->satacmd_cmd_reg;
3987#ifdef NCQ
3988	uint8_t ncq = B_FALSE;
3989#endif
3990	ddi_acc_handle_t sghdl = nvp->nvp_sg_acc_hdl[slot];
3991	uint_t *dstp = (uint_t *)nvp->nvp_sg_addr[slot];
3992	int sg_count = sata_cmdp->satacmd_num_dma_cookies, idx;
3993	ddi_dma_cookie_t  *srcp = sata_cmdp->satacmd_dma_cookie_list;
3994
3995	ASSERT(sg_count != 0);
3996
3997	if (sata_cmdp->satacmd_num_dma_cookies > NV_DMA_NSEGS) {
3998		nv_cmn_err(CE_WARN, nvp->nvp_ctlp, nvp, "NV_DMA_NSEGS=%d <"
3999		    " satacmd_num_dma_cookies=%d", NV_DMA_NSEGS,
4000		    sata_cmdp->satacmd_num_dma_cookies);
4001
4002		return (NV_FAILURE);
4003	}
4004
4005	nv_program_taskfile_regs(nvp, slot);
4006
4007	/*
4008	 * start the drive in motion
4009	 */
4010	nv_put8(cmdhdl, nvp->nvp_cmd, cmd);
4011
4012	/*
4013	 * the drive starts processing the transaction when the cmd register
4014	 * is written.  This is done here before programming the DMA engine to
4015	 * parallelize and save some time.  In the event that the drive is ready
4016	 * before DMA, it will wait.
4017	 */
4018#ifdef NCQ
4019	if ((cmd == SATAC_WRITE_FPDMA_QUEUED) ||
4020	    (cmd == SATAC_READ_FPDMA_QUEUED)) {
4021		ncq = B_TRUE;
4022	}
4023#endif
4024
4025	/*
4026	 * copy the PRD list to PRD table in DMA accessible memory
4027	 * so that the controller can access it.
4028	 */
4029	for (idx = 0; idx < sg_count; idx++, srcp++) {
4030		uint32_t size;
4031
4032		ASSERT(srcp->dmac_size <= UINT16_MAX);
4033
4034		nv_put32(sghdl, dstp++, srcp->dmac_address);
4035
4036		size = srcp->dmac_size;
4037
4038		/*
4039		 * If this is a 40-bit address, copy bits 32-40 of the
4040		 * physical address to bits 16-24 of the PRD count.
4041		 */
4042		if (srcp->dmac_laddress > UINT32_MAX) {
4043			size |= ((srcp->dmac_laddress & 0xff00000000) >> 16);
4044		}
4045
4046		/*
4047		 * set the end of table flag for the last entry
4048		 */
4049		if (idx == (sg_count - 1)) {
4050			size |= PRDE_EOT;
4051		}
4052
4053		nv_put32(sghdl, dstp++, size);
4054	}
4055
4056	(void) ddi_dma_sync(nvp->nvp_sg_dma_hdl[slot], 0,
4057	    sizeof (prde_t) * NV_DMA_NSEGS, DDI_DMA_SYNC_FORDEV);
4058
4059	nv_start_dma_engine(nvp, slot);
4060
4061#ifdef NCQ
4062	/*
4063	 * optimization:  for SWNCQ, start DMA engine if this is the only
4064	 * command running.  Preliminary NCQ efforts indicated this needs
4065	 * more debugging.
4066	 *
4067	 * if (nvp->nvp_ncq_run <= 1)
4068	 */
4069
4070	if (ncq == B_FALSE) {
4071		NVLOG((NVDBG_DELIVER, nvp->nvp_ctlp, nvp,
4072		    "NOT NCQ so starting DMA NOW non_ncq_commands=%d"
4073		    " cmd = %X", non_ncq_commands++, cmd));
4074		nv_start_dma_engine(nvp, slot);
4075	} else {
4076		NVLOG((NVDBG_DELIVER, nvp->nvp_ctlp, nvp, "?NCQ, so program "
4077		    "DMA later ncq_commands=%d cmd = %X", ncq_commands++, cmd));
4078	}
4079#endif /* NCQ */
4080
4081	return (SATA_TRAN_ACCEPTED);
4082}
4083
4084
4085/*
4086 * start a PIO data-in ATA command
4087 */
4088static int
4089nv_start_pio_in(nv_port_t *nvp, int slot)
4090{
4091
4092	nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
4093	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4094	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4095
4096	nv_program_taskfile_regs(nvp, slot);
4097
4098	/*
4099	 * This next one sets the drive in motion
4100	 */
4101	nv_put8(cmdhdl, nvp->nvp_cmd, spkt->satapkt_cmd.satacmd_cmd_reg);
4102
4103	return (SATA_TRAN_ACCEPTED);
4104}
4105
4106
4107/*
4108 * start a PIO data-out ATA command
4109 */
4110static int
4111nv_start_pio_out(nv_port_t *nvp, int slot)
4112{
4113	nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
4114	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4115	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4116
4117	nv_program_taskfile_regs(nvp, slot);
4118
4119	/*
4120	 * this next one sets the drive in motion
4121	 */
4122	nv_put8(cmdhdl, nvp->nvp_cmd, spkt->satapkt_cmd.satacmd_cmd_reg);
4123
4124	/*
4125	 * wait for the busy bit to settle
4126	 */
4127	NV_DELAY_NSEC(400);
4128
4129	/*
4130	 * wait for the drive to assert DRQ to send the first chunk
4131	 * of data. Have to busy wait because there's no interrupt for
4132	 * the first chunk. This is bad... uses a lot of cycles if the
4133	 * drive responds too slowly or if the wait loop granularity
4134	 * is too large. It's even worse if the drive is defective and
4135	 * the loop times out.
4136	 */
4137	if (nv_wait3(nvp, SATA_STATUS_DRQ, SATA_STATUS_BSY, /* okay */
4138	    SATA_STATUS_ERR, SATA_STATUS_BSY, /* cmd failed */
4139	    SATA_STATUS_DF, SATA_STATUS_BSY, /* drive failed */
4140	    4000000, 0) == B_FALSE) {
4141		spkt->satapkt_reason = SATA_PKT_TIMEOUT;
4142
4143		goto error;
4144	}
4145
4146	/*
4147	 * send the first block.
4148	 */
4149	nv_intr_pio_out(nvp, nv_slotp);
4150
4151	/*
4152	 * If nvslot_flags is not set to COMPLETE yet, then processing
4153	 * is OK so far, so return.  Otherwise, fall into error handling
4154	 * below.
4155	 */
4156	if (nv_slotp->nvslot_flags != NVSLOT_COMPLETE) {
4157
4158		return (SATA_TRAN_ACCEPTED);
4159	}
4160
4161	error:
4162	/*
4163	 * there was an error so reset the device and complete the packet.
4164	 */
4165	nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
4166	nv_complete_io(nvp, spkt, 0);
4167	nv_reset(nvp);
4168
4169	return (SATA_TRAN_PORT_ERROR);
4170}
4171
4172
4173/*
4174 * start a ATAPI Packet command (PIO data in or out)
4175 */
4176static int
4177nv_start_pkt_pio(nv_port_t *nvp, int slot)
4178{
4179	nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
4180	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4181	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4182	sata_cmd_t *satacmd = &spkt->satapkt_cmd;
4183
4184	NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4185	    "nv_start_pkt_pio: start"));
4186
4187	/*
4188	 * Write the PACKET command to the command register.  Normally
4189	 * this would be done through nv_program_taskfile_regs().  It
4190	 * is done here because some values need to be overridden.
4191	 */
4192
4193	/* select the drive */
4194	nv_put8(cmdhdl, nvp->nvp_drvhd, satacmd->satacmd_device_reg);
4195
4196	/* make certain the drive selected */
4197	if (nv_wait(nvp, SATA_STATUS_DRDY, SATA_STATUS_BSY,
4198	    NV_SEC2USEC(5), 0) == B_FALSE) {
4199		NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4200		    "nv_start_pkt_pio: drive select failed"));
4201		return (SATA_TRAN_PORT_ERROR);
4202	}
4203
4204	/*
4205	 * The command is always sent via PIO, despite whatever the SATA
4206	 * framework sets in the command.  Overwrite the DMA bit to do this.
4207	 * Also, overwrite the overlay bit to be safe (it shouldn't be set).
4208	 */
4209	nv_put8(cmdhdl, nvp->nvp_feature, 0);	/* deassert DMA and OVL */
4210
4211	/* set appropriately by the sata framework */
4212	nv_put8(cmdhdl, nvp->nvp_hcyl, satacmd->satacmd_lba_high_lsb);
4213	nv_put8(cmdhdl, nvp->nvp_lcyl, satacmd->satacmd_lba_mid_lsb);
4214	nv_put8(cmdhdl, nvp->nvp_sect, satacmd->satacmd_lba_low_lsb);
4215	nv_put8(cmdhdl, nvp->nvp_count, satacmd->satacmd_sec_count_lsb);
4216
4217	/* initiate the command by writing the command register last */
4218	nv_put8(cmdhdl, nvp->nvp_cmd, spkt->satapkt_cmd.satacmd_cmd_reg);
4219
4220	/* Give the host controller time to do its thing */
4221	NV_DELAY_NSEC(400);
4222
4223	/*
4224	 * Wait for the device to indicate that it is ready for the command
4225	 * ATAPI protocol state - HP0: Check_Status_A
4226	 */
4227
4228	if (nv_wait3(nvp, SATA_STATUS_DRQ, SATA_STATUS_BSY, /* okay */
4229	    SATA_STATUS_ERR, SATA_STATUS_BSY, /* cmd failed */
4230	    SATA_STATUS_DF, SATA_STATUS_BSY, /* drive failed */
4231	    4000000, 0) == B_FALSE) {
4232		/*
4233		 * Either an error or device fault occurred or the wait
4234		 * timed out.  According to the ATAPI protocol, command
4235		 * completion is also possible.  Other implementations of
4236		 * this protocol don't handle this last case, so neither
4237		 * does this code.
4238		 */
4239
4240		if (nv_get8(cmdhdl, nvp->nvp_status) &
4241		    (SATA_STATUS_ERR | SATA_STATUS_DF)) {
4242			spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4243
4244			NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4245			    "nv_start_pkt_pio: device error (HP0)"));
4246		} else {
4247			spkt->satapkt_reason = SATA_PKT_TIMEOUT;
4248
4249			NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4250			    "nv_start_pkt_pio: timeout (HP0)"));
4251		}
4252
4253		nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
4254		nv_complete_io(nvp, spkt, 0);
4255		nv_reset(nvp);
4256
4257		return (SATA_TRAN_PORT_ERROR);
4258	}
4259
4260	/*
4261	 * Put the ATAPI command in the data register
4262	 * ATAPI protocol state - HP1: Send_Packet
4263	 */
4264
4265	ddi_rep_put16(cmdhdl, (ushort_t *)spkt->satapkt_cmd.satacmd_acdb,
4266	    (ushort_t *)nvp->nvp_data,
4267	    (spkt->satapkt_cmd.satacmd_acdb_len >> 1), DDI_DEV_NO_AUTOINCR);
4268
4269	/*
4270	 * See you in nv_intr_pkt_pio.
4271	 * ATAPI protocol state - HP3: INTRQ_wait
4272	 */
4273
4274	NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4275	    "nv_start_pkt_pio: exiting into HP3"));
4276
4277	return (SATA_TRAN_ACCEPTED);
4278}
4279
4280
4281/*
4282 * Interrupt processing for a non-data ATA command.
4283 */
4284static void
4285nv_intr_nodata(nv_port_t *nvp, nv_slot_t *nv_slotp)
4286{
4287	uchar_t status;
4288	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4289	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
4290	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
4291	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4292
4293	NVLOG((NVDBG_INTR, nvp->nvp_ctlp, nvp, "nv_intr_nodata entered"));
4294
4295	status = nv_get8(cmdhdl, nvp->nvp_status);
4296
4297	/*
4298	 * check for errors
4299	 */
4300	if (status & (SATA_STATUS_DF | SATA_STATUS_ERR)) {
4301		spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4302		sata_cmdp->satacmd_status_reg = nv_get8(ctlhdl,
4303		    nvp->nvp_altstatus);
4304		sata_cmdp->satacmd_error_reg = nv_get8(cmdhdl, nvp->nvp_error);
4305	} else {
4306		spkt->satapkt_reason = SATA_PKT_COMPLETED;
4307	}
4308
4309	nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4310}
4311
4312
4313/*
4314 * ATA command, PIO data in
4315 */
4316static void
4317nv_intr_pio_in(nv_port_t *nvp, nv_slot_t *nv_slotp)
4318{
4319	uchar_t	status;
4320	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4321	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
4322	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
4323	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4324	int count;
4325
4326	status = nv_get8(cmdhdl, nvp->nvp_status);
4327
4328	if (status & SATA_STATUS_BSY) {
4329		spkt->satapkt_reason = SATA_PKT_TIMEOUT;
4330		nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4331		sata_cmdp->satacmd_status_reg = nv_get8(ctlhdl,
4332		    nvp->nvp_altstatus);
4333		sata_cmdp->satacmd_error_reg = nv_get8(cmdhdl, nvp->nvp_error);
4334		nv_reset(nvp);
4335
4336		return;
4337	}
4338
4339	/*
4340	 * check for errors
4341	 */
4342	if ((status & (SATA_STATUS_DRQ | SATA_STATUS_DF |
4343	    SATA_STATUS_ERR)) != SATA_STATUS_DRQ) {
4344		nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
4345		nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4346		spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4347
4348		return;
4349	}
4350
4351	/*
4352	 * read the next chunk of data (if any)
4353	 */
4354	count = min(nv_slotp->nvslot_byte_count, NV_BYTES_PER_SEC);
4355
4356	/*
4357	 * read count bytes
4358	 */
4359	ASSERT(count != 0);
4360
4361	ddi_rep_get16(cmdhdl, (ushort_t *)nv_slotp->nvslot_v_addr,
4362	    (ushort_t *)nvp->nvp_data, (count >> 1), DDI_DEV_NO_AUTOINCR);
4363
4364	nv_slotp->nvslot_v_addr += count;
4365	nv_slotp->nvslot_byte_count -= count;
4366
4367
4368	if (nv_slotp->nvslot_byte_count != 0) {
4369		/*
4370		 * more to transfer.  Wait for next interrupt.
4371		 */
4372		return;
4373	}
4374
4375	/*
4376	 * transfer is complete. wait for the busy bit to settle.
4377	 */
4378	NV_DELAY_NSEC(400);
4379
4380	spkt->satapkt_reason = SATA_PKT_COMPLETED;
4381	nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4382}
4383
4384
4385/*
4386 * ATA command PIO data out
4387 */
4388static void
4389nv_intr_pio_out(nv_port_t *nvp, nv_slot_t *nv_slotp)
4390{
4391	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4392	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
4393	uchar_t status;
4394	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
4395	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4396	int count;
4397
4398	/*
4399	 * clear the IRQ
4400	 */
4401	status = nv_get8(cmdhdl, nvp->nvp_status);
4402
4403	if (status & SATA_STATUS_BSY) {
4404		/*
4405		 * this should not happen
4406		 */
4407		spkt->satapkt_reason = SATA_PKT_TIMEOUT;
4408		nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4409		sata_cmdp->satacmd_status_reg = nv_get8(ctlhdl,
4410		    nvp->nvp_altstatus);
4411		sata_cmdp->satacmd_error_reg = nv_get8(cmdhdl, nvp->nvp_error);
4412
4413		return;
4414	}
4415
4416	/*
4417	 * check for errors
4418	 */
4419	if (status & (SATA_STATUS_DF | SATA_STATUS_ERR)) {
4420		nv_copy_registers(nvp,  &spkt->satapkt_device, spkt);
4421		nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4422		spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4423
4424		return;
4425	}
4426
4427	/*
4428	 * this is the condition which signals the drive is
4429	 * no longer ready to transfer.  Likely that the transfer
4430	 * completed successfully, but check that byte_count is
4431	 * zero.
4432	 */
4433	if ((status & SATA_STATUS_DRQ) == 0) {
4434
4435		if (nv_slotp->nvslot_byte_count == 0) {
4436			/*
4437			 * complete; successful transfer
4438			 */
4439			spkt->satapkt_reason = SATA_PKT_COMPLETED;
4440		} else {
4441			/*
4442			 * error condition, incomplete transfer
4443			 */
4444			nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
4445			spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4446		}
4447		nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4448
4449		return;
4450	}
4451
4452	/*
4453	 * write the next chunk of data
4454	 */
4455	count = min(nv_slotp->nvslot_byte_count, NV_BYTES_PER_SEC);
4456
4457	/*
4458	 * read or write count bytes
4459	 */
4460
4461	ASSERT(count != 0);
4462
4463	ddi_rep_put16(cmdhdl, (ushort_t *)nv_slotp->nvslot_v_addr,
4464	    (ushort_t *)nvp->nvp_data, (count >> 1), DDI_DEV_NO_AUTOINCR);
4465
4466	nv_slotp->nvslot_v_addr += count;
4467	nv_slotp->nvslot_byte_count -= count;
4468}
4469
4470
4471/*
4472 * ATAPI PACKET command, PIO in/out interrupt
4473 *
4474 * Under normal circumstances, one of four different interrupt scenarios
4475 * will result in this function being called:
4476 *
4477 * 1. Packet command data transfer
4478 * 2. Packet command completion
4479 * 3. Request sense data transfer
4480 * 4. Request sense command completion
4481 */
4482static void
4483nv_intr_pkt_pio(nv_port_t *nvp, nv_slot_t *nv_slotp)
4484{
4485	uchar_t	status;
4486	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4487	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
4488	int direction = sata_cmdp->satacmd_flags.sata_data_direction;
4489	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
4490	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4491	uint16_t ctlr_count;
4492	int count;
4493
4494	/* ATAPI protocol state - HP2: Check_Status_B */
4495
4496	status = nv_get8(cmdhdl, nvp->nvp_status);
4497	NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4498	    "nv_intr_pkt_pio: status 0x%x", status));
4499
4500	if (status & SATA_STATUS_BSY) {
4501		if ((nv_slotp->nvslot_flags & NVSLOT_RQSENSE) != 0) {
4502			nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4503			spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4504		} else {
4505			nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4506			spkt->satapkt_reason = SATA_PKT_TIMEOUT;
4507
4508			nv_reset(nvp);
4509		}
4510
4511		NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4512		    "nv_intr_pkt_pio: busy - status 0x%x", status));
4513
4514		return;
4515	}
4516
4517	if ((status & SATA_STATUS_DF) != 0) {
4518		/*
4519		 * On device fault, just clean up and bail.  Request sense
4520		 * will just default to its NO SENSE initialized value.
4521		 */
4522
4523		if ((nv_slotp->nvslot_flags & NVSLOT_RQSENSE) == 0) {
4524			nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
4525		}
4526
4527		nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4528		spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4529
4530		sata_cmdp->satacmd_status_reg = nv_get8(ctlhdl,
4531		    nvp->nvp_altstatus);
4532		sata_cmdp->satacmd_error_reg = nv_get8(cmdhdl,
4533		    nvp->nvp_error);
4534
4535		NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4536		    "nv_intr_pkt_pio: device fault"));
4537
4538		return;
4539	}
4540
4541	if ((status & SATA_STATUS_ERR) != 0) {
4542		/*
4543		 * On command error, figure out whether we are processing a
4544		 * request sense.  If so, clean up and bail.  Otherwise,
4545		 * do a REQUEST SENSE.
4546		 */
4547
4548		if ((nv_slotp->nvslot_flags & NVSLOT_RQSENSE) == 0) {
4549			nv_slotp->nvslot_flags |= NVSLOT_RQSENSE;
4550			if (nv_start_rqsense_pio(nvp, nv_slotp) ==
4551			    NV_FAILURE) {
4552				nv_copy_registers(nvp, &spkt->satapkt_device,
4553				    spkt);
4554				nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4555				spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4556			}
4557
4558			sata_cmdp->satacmd_status_reg = nv_get8(ctlhdl,
4559			    nvp->nvp_altstatus);
4560			sata_cmdp->satacmd_error_reg = nv_get8(cmdhdl,
4561			    nvp->nvp_error);
4562		} else {
4563			nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4564			spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4565
4566			nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
4567		}
4568
4569		NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4570		    "nv_intr_pkt_pio: error (status 0x%x)", status));
4571
4572		return;
4573	}
4574
4575	if ((nv_slotp->nvslot_flags & NVSLOT_RQSENSE) != 0) {
4576		/*
4577		 * REQUEST SENSE command processing
4578		 */
4579
4580		if ((status & (SATA_STATUS_DRQ)) != 0) {
4581			/* ATAPI state - HP4: Transfer_Data */
4582
4583			/* read the byte count from the controller */
4584			ctlr_count =
4585			    (uint16_t)nv_get8(cmdhdl, nvp->nvp_hcyl) << 8;
4586			ctlr_count |= nv_get8(cmdhdl, nvp->nvp_lcyl);
4587
4588			NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4589			    "nv_intr_pkt_pio: ctlr byte count - %d",
4590			    ctlr_count));
4591
4592			if (ctlr_count == 0) {
4593				/* no data to transfer - some devices do this */
4594
4595				spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4596				nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4597
4598				NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4599				    "nv_intr_pkt_pio: done (no data)"));
4600
4601				return;
4602			}
4603
4604			count = min(ctlr_count, SATA_ATAPI_RQSENSE_LEN);
4605
4606			/* transfer the data */
4607			ddi_rep_get16(cmdhdl,
4608			    (ushort_t *)nv_slotp->nvslot_rqsense_buff,
4609			    (ushort_t *)nvp->nvp_data, (count >> 1),
4610			    DDI_DEV_NO_AUTOINCR);
4611
4612			/* consume residual bytes */
4613			ctlr_count -= count;
4614
4615			if (ctlr_count > 0) {
4616				for (; ctlr_count > 0; ctlr_count -= 2)
4617					(void) ddi_get16(cmdhdl,
4618					    (ushort_t *)nvp->nvp_data);
4619			}
4620
4621			NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4622			    "nv_intr_pkt_pio: transition to HP2"));
4623		} else {
4624			/* still in ATAPI state - HP2 */
4625
4626			/*
4627			 * In order to avoid clobbering the rqsense data
4628			 * set by the SATA framework, the sense data read
4629			 * from the device is put in a separate buffer and
4630			 * copied into the packet after the request sense
4631			 * command successfully completes.
4632			 */
4633			bcopy(nv_slotp->nvslot_rqsense_buff,
4634			    spkt->satapkt_cmd.satacmd_rqsense,
4635			    SATA_ATAPI_RQSENSE_LEN);
4636
4637			nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4638			spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4639
4640			NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4641			    "nv_intr_pkt_pio: request sense done"));
4642		}
4643
4644		return;
4645	}
4646
4647	/*
4648	 * Normal command processing
4649	 */
4650
4651	if ((status & (SATA_STATUS_DRQ)) != 0) {
4652		/* ATAPI protocol state - HP4: Transfer_Data */
4653
4654		/* read the byte count from the controller */
4655		ctlr_count = (uint16_t)nv_get8(cmdhdl, nvp->nvp_hcyl) << 8;
4656		ctlr_count |= nv_get8(cmdhdl, nvp->nvp_lcyl);
4657
4658		if (ctlr_count == 0) {
4659			/* no data to transfer - some devices do this */
4660
4661			spkt->satapkt_reason = SATA_PKT_COMPLETED;
4662			nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4663
4664			NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4665			    "nv_intr_pkt_pio: done (no data)"));
4666
4667			return;
4668		}
4669
4670		count = min(ctlr_count, nv_slotp->nvslot_byte_count);
4671
4672		NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4673		    "nv_intr_pkt_pio: drive_bytes 0x%x", ctlr_count));
4674
4675		NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4676		    "nv_intr_pkt_pio: byte_count 0x%x",
4677		    nv_slotp->nvslot_byte_count));
4678
4679		/* transfer the data */
4680
4681		if (direction == SATA_DIR_READ) {
4682			ddi_rep_get16(cmdhdl,
4683			    (ushort_t *)nv_slotp->nvslot_v_addr,
4684			    (ushort_t *)nvp->nvp_data, (count >> 1),
4685			    DDI_DEV_NO_AUTOINCR);
4686
4687			ctlr_count -= count;
4688
4689			if (ctlr_count > 0) {
4690				/* consume remainding bytes */
4691
4692				for (; ctlr_count > 0;
4693				    ctlr_count -= 2)
4694					(void) ddi_get16(cmdhdl,
4695					    (ushort_t *)nvp->nvp_data);
4696
4697				NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4698				    "nv_intr_pkt_pio: bytes remained"));
4699			}
4700		} else {
4701			ddi_rep_put16(cmdhdl,
4702			    (ushort_t *)nv_slotp->nvslot_v_addr,
4703			    (ushort_t *)nvp->nvp_data, (count >> 1),
4704			    DDI_DEV_NO_AUTOINCR);
4705		}
4706
4707		nv_slotp->nvslot_v_addr += count;
4708		nv_slotp->nvslot_byte_count -= count;
4709
4710		NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4711		    "nv_intr_pkt_pio: transition to HP2"));
4712	} else {
4713		/* still in ATAPI state - HP2 */
4714
4715		spkt->satapkt_reason = SATA_PKT_COMPLETED;
4716		nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4717
4718		NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4719		    "nv_intr_pkt_pio: done"));
4720	}
4721}
4722
4723
4724/*
4725 * ATA command, DMA data in/out
4726 */
4727static void
4728nv_intr_dma(nv_port_t *nvp, struct nv_slot *nv_slotp)
4729{
4730	uchar_t status;
4731	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4732	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
4733	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
4734	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4735	ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl;
4736	uchar_t	bmicx;
4737	uchar_t bm_status;
4738
4739	nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4740
4741	/*
4742	 * stop DMA engine.
4743	 */
4744	bmicx = nv_get8(bmhdl, nvp->nvp_bmicx);
4745	nv_put8(bmhdl, nvp->nvp_bmicx,  bmicx & ~BMICX_SSBM);
4746
4747	/*
4748	 * get the status and clear the IRQ, and check for DMA error
4749	 */
4750	status = nv_get8(cmdhdl, nvp->nvp_status);
4751
4752	/*
4753	 * check for drive errors
4754	 */
4755	if (status & (SATA_STATUS_DF | SATA_STATUS_ERR)) {
4756		nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
4757		spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4758		(void) nv_bm_status_clear(nvp);
4759
4760		return;
4761	}
4762
4763	bm_status = nv_bm_status_clear(nvp);
4764
4765	/*
4766	 * check for bus master errors
4767	 */
4768	if (bm_status & BMISX_IDERR) {
4769		spkt->satapkt_reason = SATA_PKT_RESET;
4770		sata_cmdp->satacmd_status_reg = nv_get8(ctlhdl,
4771		    nvp->nvp_altstatus);
4772		sata_cmdp->satacmd_error_reg = nv_get8(cmdhdl, nvp->nvp_error);
4773		nv_reset(nvp);
4774
4775		return;
4776	}
4777
4778	spkt->satapkt_reason = SATA_PKT_COMPLETED;
4779}
4780
4781
4782/*
4783 * Wait for a register of a controller to achieve a specific state.
4784 * To return normally, all the bits in the first sub-mask must be ON,
4785 * all the bits in the second sub-mask must be OFF.
4786 * If timeout_usec microseconds pass without the controller achieving
4787 * the desired bit configuration, return TRUE, else FALSE.
4788 *
4789 * hybrid waiting algorithm: if not in interrupt context, busy looping will
4790 * occur for the first 250 us, then switch over to a sleeping wait.
4791 *
4792 */
4793int
4794nv_wait(nv_port_t *nvp, uchar_t onbits, uchar_t offbits, uint_t timeout_usec,
4795    int type_wait)
4796{
4797	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
4798	hrtime_t end, cur, start_sleep, start;
4799	int first_time = B_TRUE;
4800	ushort_t val;
4801
4802	for (;;) {
4803		val = nv_get8(ctlhdl, nvp->nvp_altstatus);
4804
4805		if ((val & onbits) == onbits && (val & offbits) == 0) {
4806
4807			return (B_TRUE);
4808		}
4809
4810		cur = gethrtime();
4811
4812		/*
4813		 * store the start time and calculate the end
4814		 * time.  also calculate "start_sleep" which is
4815		 * the point after which the driver will stop busy
4816		 * waiting and change to sleep waiting.
4817		 */
4818		if (first_time) {
4819			first_time = B_FALSE;
4820			/*
4821			 * start and end are in nanoseconds
4822			 */
4823			start = cur;
4824			end = start + timeout_usec * 1000;
4825			/*
4826			 * add 1 ms to start
4827			 */
4828			start_sleep =  start + 250000;
4829
4830			if (servicing_interrupt()) {
4831				type_wait = NV_NOSLEEP;
4832			}
4833		}
4834
4835		if (cur > end) {
4836
4837			break;
4838		}
4839
4840		if ((type_wait != NV_NOSLEEP) && (cur > start_sleep)) {
4841#if ! defined(__lock_lint)
4842			delay(1);
4843#endif
4844		} else {
4845			drv_usecwait(nv_usec_delay);
4846		}
4847	}
4848
4849	return (B_FALSE);
4850}
4851
4852
4853/*
4854 * This is a slightly more complicated version that checks
4855 * for error conditions and bails-out rather than looping
4856 * until the timeout is exceeded.
4857 *
4858 * hybrid waiting algorithm: if not in interrupt context, busy looping will
4859 * occur for the first 250 us, then switch over to a sleeping wait.
4860 */
4861int
4862nv_wait3(
4863	nv_port_t	*nvp,
4864	uchar_t		onbits1,
4865	uchar_t		offbits1,
4866	uchar_t		failure_onbits2,
4867	uchar_t		failure_offbits2,
4868	uchar_t		failure_onbits3,
4869	uchar_t		failure_offbits3,
4870	uint_t		timeout_usec,
4871	int		type_wait)
4872{
4873	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
4874	hrtime_t end, cur, start_sleep, start;
4875	int first_time = B_TRUE;
4876	ushort_t val;
4877
4878	for (;;) {
4879		val = nv_get8(ctlhdl, nvp->nvp_altstatus);
4880
4881		/*
4882		 * check for expected condition
4883		 */
4884		if ((val & onbits1) == onbits1 && (val & offbits1) == 0) {
4885
4886			return (B_TRUE);
4887		}
4888
4889		/*
4890		 * check for error conditions
4891		 */
4892		if ((val & failure_onbits2) == failure_onbits2 &&
4893		    (val & failure_offbits2) == 0) {
4894
4895			return (B_FALSE);
4896		}
4897
4898		if ((val & failure_onbits3) == failure_onbits3 &&
4899		    (val & failure_offbits3) == 0) {
4900
4901			return (B_FALSE);
4902		}
4903
4904		/*
4905		 * store the start time and calculate the end
4906		 * time.  also calculate "start_sleep" which is
4907		 * the point after which the driver will stop busy
4908		 * waiting and change to sleep waiting.
4909		 */
4910		if (first_time) {
4911			first_time = B_FALSE;
4912			/*
4913			 * start and end are in nanoseconds
4914			 */
4915			cur = start = gethrtime();
4916			end = start + timeout_usec * 1000;
4917			/*
4918			 * add 1 ms to start
4919			 */
4920			start_sleep =  start + 250000;
4921
4922			if (servicing_interrupt()) {
4923				type_wait = NV_NOSLEEP;
4924			}
4925		} else {
4926			cur = gethrtime();
4927		}
4928
4929		if (cur > end) {
4930
4931			break;
4932		}
4933
4934		if ((type_wait != NV_NOSLEEP) && (cur > start_sleep)) {
4935#if ! defined(__lock_lint)
4936			delay(1);
4937#endif
4938		} else {
4939			drv_usecwait(nv_usec_delay);
4940		}
4941	}
4942
4943	return (B_FALSE);
4944}
4945
4946
4947/*
4948 * nv_check_link() checks if a specified link is active device present
4949 * and communicating.
4950 */
4951static boolean_t
4952nv_check_link(uint32_t sstatus)
4953{
4954	uint8_t det;
4955
4956	det = (sstatus & SSTATUS_DET) >> SSTATUS_DET_SHIFT;
4957
4958	return (det == SSTATUS_DET_DEVPRE_PHYCOM);
4959}
4960
4961
4962/*
4963 * nv_port_state_change() reports the state of the port to the
4964 * sata module by calling sata_hba_event_notify().  This
4965 * function is called any time the state of the port is changed
4966 */
4967static void
4968nv_port_state_change(nv_port_t *nvp, int event, uint8_t addr_type, int state)
4969{
4970	sata_device_t sd;
4971
4972	bzero((void *)&sd, sizeof (sata_device_t));
4973	sd.satadev_rev = SATA_DEVICE_REV;
4974	nv_copy_registers(nvp, &sd, NULL);
4975
4976	/*
4977	 * When NCQ is implemented sactive and snotific field need to be
4978	 * updated.
4979	 */
4980	sd.satadev_addr.cport = nvp->nvp_port_num;
4981	sd.satadev_addr.qual = addr_type;
4982	sd.satadev_state = state;
4983
4984	sata_hba_event_notify(nvp->nvp_ctlp->nvc_dip, &sd, event);
4985}
4986
4987
4988/*
4989 * timeout processing:
4990 *
4991 * Check if any packets have crossed a timeout threshold.  If so, then
4992 * abort the packet.  This function is not NCQ aware.
4993 *
4994 * If reset was invoked in any other place than nv_sata_probe(), then
4995 * monitor for reset completion here.
4996 *
4997 */
4998static void
4999nv_timeout(void *arg)
5000{
5001	nv_port_t *nvp = arg;
5002	nv_slot_t *nv_slotp;
5003	int restart_timeout = B_FALSE;
5004
5005	mutex_enter(&nvp->nvp_mutex);
5006
5007	/*
5008	 * If the probe entry point is driving the reset and signature
5009	 * acquisition, just return.
5010	 */
5011	if (nvp->nvp_state & NV_PORT_RESET_PROBE) {
5012		goto finished;
5013	}
5014
5015	/*
5016	 * If the port is not in the init state, it likely
5017	 * means the link was lost while a timeout was active.
5018	 */
5019	if ((nvp->nvp_state & NV_PORT_INIT) == 0) {
5020		NVLOG((NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp,
5021		    "nv_timeout: port uninitialized"));
5022
5023		goto finished;
5024	}
5025
5026	if (nvp->nvp_state & NV_PORT_RESET) {
5027		ddi_acc_handle_t bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5];
5028		uint32_t sstatus;
5029
5030		NVLOG((NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp,
5031		    "nv_timeout(): port waiting for signature"));
5032
5033		sstatus = nv_get32(bar5_hdl, nvp->nvp_sstatus);
5034
5035		/*
5036		 * check for link presence.  If the link remains
5037		 * missing for more than 2 seconds, send a remove
5038		 * event and abort signature acquisition.
5039		 */
5040		if (nv_check_link(sstatus) == B_FALSE) {
5041			clock_t e_link_lost = ddi_get_lbolt();
5042
5043			if (nvp->nvp_link_lost_time == 0) {
5044				nvp->nvp_link_lost_time = e_link_lost;
5045			}
5046			if (TICK_TO_SEC(e_link_lost -
5047			    nvp->nvp_link_lost_time) < NV_LINK_LOST_OK) {
5048				NVLOG((NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp,
5049				    "probe: intermittent link lost while"
5050				    " resetting"));
5051				restart_timeout = B_TRUE;
5052			} else {
5053				NVLOG((NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp,
5054				    "link lost during signature acquisition."
5055				    "  Giving up"));
5056				nv_port_state_change(nvp,
5057				    SATA_EVNT_DEVICE_DETACHED|
5058				    SATA_EVNT_LINK_LOST,
5059				    SATA_ADDR_CPORT, 0);
5060				nvp->nvp_state |= NV_PORT_HOTREMOVED;
5061				nvp->nvp_state &= ~NV_PORT_RESET;
5062			}
5063
5064			goto finished;
5065		} else {
5066
5067			nvp->nvp_link_lost_time = 0;
5068		}
5069
5070		nv_read_signature(nvp);
5071
5072		if (nvp->nvp_signature != 0) {
5073			if ((nvp->nvp_type == SATA_DTYPE_ATADISK) ||
5074			    (nvp->nvp_type == SATA_DTYPE_ATAPICD)) {
5075				nvp->nvp_state |= NV_PORT_RESTORE;
5076				nv_port_state_change(nvp,
5077				    SATA_EVNT_DEVICE_RESET,
5078				    SATA_ADDR_DCPORT,
5079				    SATA_DSTATE_RESET|SATA_DSTATE_PWR_ACTIVE);
5080			}
5081
5082			goto finished;
5083		}
5084
5085		/*
5086		 * Reset if more than 5 seconds has passed without
5087		 * acquiring a signature.
5088		 */
5089		if (TICK_TO_SEC(ddi_get_lbolt() - nvp->nvp_reset_time) > 5) {
5090			nv_reset(nvp);
5091		}
5092
5093		restart_timeout = B_TRUE;
5094		goto finished;
5095	}
5096
5097
5098	/*
5099	 * not yet NCQ aware
5100	 */
5101	nv_slotp = &(nvp->nvp_slot[0]);
5102
5103	/*
5104	 * this happens early on before nv_slotp is set
5105	 * up OR when a device was unexpectedly removed and
5106	 * there was an active packet.
5107	 */
5108	if (nv_slotp == NULL) {
5109		NVLOG((NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp,
5110		    "nv_timeout: nv_slotp == NULL"));
5111
5112		goto finished;
5113	}
5114
5115	/*
5116	 * perform timeout checking and processing only if there is an
5117	 * active packet on the port
5118	 */
5119	if (nv_slotp->nvslot_spkt != NULL)  {
5120		sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
5121		sata_cmd_t *satacmd = &spkt->satapkt_cmd;
5122		uint8_t cmd = satacmd->satacmd_cmd_reg;
5123		uint64_t lba;
5124
5125#if ! defined(__lock_lint) && defined(DEBUG)
5126
5127		lba = (uint64_t)satacmd->satacmd_lba_low_lsb |
5128		    ((uint64_t)satacmd->satacmd_lba_mid_lsb << 8) |
5129		    ((uint64_t)satacmd->satacmd_lba_high_lsb << 16) |
5130		    ((uint64_t)satacmd->satacmd_lba_low_msb << 24) |
5131		    ((uint64_t)satacmd->satacmd_lba_mid_msb << 32) |
5132		    ((uint64_t)satacmd->satacmd_lba_high_msb << 40);
5133#endif
5134
5135		/*
5136		 * timeout not needed if there is a polling thread
5137		 */
5138		if (spkt->satapkt_op_mode & SATA_OPMODE_POLLING) {
5139
5140			goto finished;
5141		}
5142
5143		if (TICK_TO_SEC(ddi_get_lbolt() - nv_slotp->nvslot_stime) >
5144		    spkt->satapkt_time) {
5145			NVLOG((NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp,
5146			    "abort timeout: "
5147			    "nvslot_stime: %ld max ticks till timeout: "
5148			    "%ld cur_time: %ld cmd=%x lba=%d",
5149			    nv_slotp->nvslot_stime, drv_usectohz(MICROSEC *
5150			    spkt->satapkt_time), ddi_get_lbolt(), cmd, lba));
5151
5152			(void) nv_abort_active(nvp, spkt, SATA_PKT_TIMEOUT);
5153
5154		} else {
5155			NVLOG((NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp, "nv_timeout:"
5156			    " still in use so restarting timeout"));
5157		}
5158		restart_timeout = B_TRUE;
5159
5160	} else {
5161		/*
5162		 * there was no active packet, so do not re-enable timeout
5163		 */
5164		NVLOG((NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp,
5165		    "nv_timeout: no active packet so not re-arming timeout"));
5166	}
5167
5168	finished:
5169
5170	if (restart_timeout == B_TRUE) {
5171		nvp->nvp_timeout_id = timeout(nv_timeout, (void *)nvp,
5172		    drv_usectohz(NV_ONE_SEC));
5173	} else {
5174		nvp->nvp_timeout_id = 0;
5175	}
5176	mutex_exit(&nvp->nvp_mutex);
5177}
5178
5179
5180/*
5181 * enable or disable the 3 interrupt types the driver is
5182 * interested in: completion, add and remove.
5183 */
5184static void
5185mcp04_set_intr(nv_port_t *nvp, int flag)
5186{
5187	nv_ctl_t *nvc = nvp->nvp_ctlp;
5188	ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5];
5189	uchar_t *bar5  = nvc->nvc_bar_addr[5];
5190	uint8_t intr_bits[] = { MCP04_INT_PDEV_HOT|MCP04_INT_PDEV_INT,
5191	    MCP04_INT_SDEV_HOT|MCP04_INT_SDEV_INT };
5192	uint8_t clear_all_bits[] = { MCP04_INT_PDEV_ALL, MCP04_INT_SDEV_ALL };
5193	uint8_t int_en, port = nvp->nvp_port_num, intr_status;
5194
5195	if (flag & NV_INTR_DISABLE_NON_BLOCKING) {
5196		int_en = nv_get8(bar5_hdl,
5197		    (uint8_t *)(bar5 + MCP04_SATA_INT_EN));
5198		int_en &= ~intr_bits[port];
5199		nv_put8(bar5_hdl, (uint8_t *)(bar5 + MCP04_SATA_INT_EN),
5200		    int_en);
5201		return;
5202	}
5203
5204	ASSERT(mutex_owned(&nvp->nvp_mutex));
5205
5206	/*
5207	 * controller level lock also required since access to an 8-bit
5208	 * interrupt register is shared between both channels.
5209	 */
5210	mutex_enter(&nvc->nvc_mutex);
5211
5212	if (flag & NV_INTR_CLEAR_ALL) {
5213		NVLOG((NVDBG_INTR, nvc, nvp,
5214		    "mcp04_set_intr: NV_INTR_CLEAR_ALL"));
5215
5216		intr_status = nv_get8(nvc->nvc_bar_hdl[5],
5217		    (uint8_t *)(nvc->nvc_mcp04_int_status));
5218
5219		if (intr_status & clear_all_bits[port]) {
5220
5221			nv_put8(nvc->nvc_bar_hdl[5],
5222			    (uint8_t *)(nvc->nvc_mcp04_int_status),
5223			    clear_all_bits[port]);
5224
5225			NVLOG((NVDBG_INTR, nvc, nvp,
5226			    "interrupt bits cleared %x",
5227			    intr_status & clear_all_bits[port]));
5228		}
5229	}
5230
5231	if (flag & NV_INTR_DISABLE) {
5232		NVLOG((NVDBG_INTR, nvc, nvp,
5233		    "mcp04_set_intr: NV_INTR_DISABLE"));
5234		int_en = nv_get8(bar5_hdl,
5235		    (uint8_t *)(bar5 + MCP04_SATA_INT_EN));
5236		int_en &= ~intr_bits[port];
5237		nv_put8(bar5_hdl, (uint8_t *)(bar5 + MCP04_SATA_INT_EN),
5238		    int_en);
5239	}
5240
5241	if (flag & NV_INTR_ENABLE) {
5242		NVLOG((NVDBG_INTR, nvc, nvp, "mcp04_set_intr: NV_INTR_ENABLE"));
5243		int_en = nv_get8(bar5_hdl,
5244		    (uint8_t *)(bar5 + MCP04_SATA_INT_EN));
5245		int_en |= intr_bits[port];
5246		nv_put8(bar5_hdl, (uint8_t *)(bar5 + MCP04_SATA_INT_EN),
5247		    int_en);
5248	}
5249
5250	mutex_exit(&nvc->nvc_mutex);
5251}
5252
5253
5254/*
5255 * enable or disable the 3 interrupts the driver is interested in:
5256 * completion interrupt, hot add, and hot remove interrupt.
5257 */
5258static void
5259mcp55_set_intr(nv_port_t *nvp, int flag)
5260{
5261	nv_ctl_t *nvc = nvp->nvp_ctlp;
5262	ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5];
5263	uint16_t intr_bits =
5264	    MCP55_INT_ADD|MCP55_INT_REM|MCP55_INT_COMPLETE;
5265	uint16_t int_en;
5266
5267	if (flag & NV_INTR_DISABLE_NON_BLOCKING) {
5268		int_en = nv_get16(bar5_hdl, nvp->nvp_mcp55_int_ctl);
5269		int_en &= ~intr_bits;
5270		nv_put16(bar5_hdl, nvp->nvp_mcp55_int_ctl, int_en);
5271		return;
5272	}
5273
5274	ASSERT(mutex_owned(&nvp->nvp_mutex));
5275
5276	NVLOG((NVDBG_HOT, nvc, nvp, "mcp055_set_intr: enter flag: %d", flag));
5277
5278	if (flag & NV_INTR_CLEAR_ALL) {
5279		NVLOG((NVDBG_INTR, nvc, nvp,
5280		    "mcp55_set_intr: NV_INTR_CLEAR_ALL"));
5281		nv_put16(bar5_hdl, nvp->nvp_mcp55_int_status, MCP55_INT_CLEAR);
5282	}
5283
5284	if (flag & NV_INTR_ENABLE) {
5285		NVLOG((NVDBG_INTR, nvc, nvp, "mcp55_set_intr: NV_INTR_ENABLE"));
5286		int_en = nv_get16(bar5_hdl, nvp->nvp_mcp55_int_ctl);
5287		int_en |= intr_bits;
5288		nv_put16(bar5_hdl, nvp->nvp_mcp55_int_ctl, int_en);
5289	}
5290
5291	if (flag & NV_INTR_DISABLE) {
5292		NVLOG((NVDBG_INTR, nvc, nvp,
5293		    "mcp55_set_intr: NV_INTR_DISABLE"));
5294		int_en = nv_get16(bar5_hdl, nvp->nvp_mcp55_int_ctl);
5295		int_en &= ~intr_bits;
5296		nv_put16(bar5_hdl, nvp->nvp_mcp55_int_ctl, int_en);
5297	}
5298}
5299
5300
5301/*
5302 * The PM functions for suspend and resume are incomplete and need additional
5303 * work.  It may or may not work in the current state.
5304 */
5305static void
5306nv_resume(nv_port_t *nvp)
5307{
5308	NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp, "nv_resume()"));
5309
5310	mutex_enter(&nvp->nvp_mutex);
5311
5312	if (nvp->nvp_state & NV_PORT_INACTIVE) {
5313		mutex_exit(&nvp->nvp_mutex);
5314
5315		return;
5316	}
5317
5318#ifdef SGPIO_SUPPORT
5319	nv_sgp_drive_connect(nvp->nvp_ctlp, SGP_CTLR_PORT_TO_DRV(
5320	    nvp->nvp_ctlp->nvc_ctlr_num, nvp->nvp_port_num));
5321#endif
5322
5323	(*(nvp->nvp_ctlp->nvc_set_intr))(nvp, NV_INTR_CLEAR_ALL|NV_INTR_ENABLE);
5324
5325	/*
5326	 * power may have been removed to the port and the
5327	 * drive, and/or a drive may have been added or removed.
5328	 * Force a reset which will cause a probe and re-establish
5329	 * any state needed on the drive.
5330	 * nv_reset(nvp);
5331	 */
5332
5333	nv_reset(nvp);
5334
5335	mutex_exit(&nvp->nvp_mutex);
5336}
5337
5338/*
5339 * The PM functions for suspend and resume are incomplete and need additional
5340 * work.  It may or may not work in the current state.
5341 */
5342static void
5343nv_suspend(nv_port_t *nvp)
5344{
5345	NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp, "nv_suspend()"));
5346
5347	mutex_enter(&nvp->nvp_mutex);
5348
5349#ifdef SGPIO_SUPPORT
5350	nv_sgp_drive_disconnect(nvp->nvp_ctlp, SGP_CTLR_PORT_TO_DRV(
5351	    nvp->nvp_ctlp->nvc_ctlr_num, nvp->nvp_port_num));
5352#endif
5353
5354	if (nvp->nvp_state & NV_PORT_INACTIVE) {
5355		mutex_exit(&nvp->nvp_mutex);
5356
5357		return;
5358	}
5359
5360	(*(nvp->nvp_ctlp->nvc_set_intr))(nvp, NV_INTR_DISABLE);
5361
5362	/*
5363	 * power may have been removed to the port and the
5364	 * drive, and/or a drive may have been added or removed.
5365	 * Force a reset which will cause a probe and re-establish
5366	 * any state needed on the drive.
5367	 * nv_reset(nvp);
5368	 */
5369
5370	mutex_exit(&nvp->nvp_mutex);
5371}
5372
5373
5374static void
5375nv_copy_registers(nv_port_t *nvp, sata_device_t *sd, sata_pkt_t *spkt)
5376{
5377	ddi_acc_handle_t bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5];
5378	sata_cmd_t *scmd = &spkt->satapkt_cmd;
5379	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
5380	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
5381	uchar_t status;
5382	struct sata_cmd_flags flags;
5383
5384	NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp, "nv_copy_registers()"));
5385
5386	sd->satadev_scr.sstatus = nv_get32(bar5_hdl, nvp->nvp_sstatus);
5387	sd->satadev_scr.serror = nv_get32(bar5_hdl, nvp->nvp_serror);
5388	sd->satadev_scr.scontrol = nv_get32(bar5_hdl, nvp->nvp_sctrl);
5389
5390	if (spkt == NULL) {
5391
5392		return;
5393	}
5394
5395	/*
5396	 * in the error case, implicitly set the return of regs needed
5397	 * for error handling.
5398	 */
5399	status = scmd->satacmd_status_reg = nv_get8(ctlhdl,
5400	    nvp->nvp_altstatus);
5401
5402	flags = scmd->satacmd_flags;
5403
5404	if (status & SATA_STATUS_ERR) {
5405		flags.sata_copy_out_lba_low_msb = B_TRUE;
5406		flags.sata_copy_out_lba_mid_msb = B_TRUE;
5407		flags.sata_copy_out_lba_high_msb = B_TRUE;
5408		flags.sata_copy_out_lba_low_lsb = B_TRUE;
5409		flags.sata_copy_out_lba_mid_lsb = B_TRUE;
5410		flags.sata_copy_out_lba_high_lsb = B_TRUE;
5411		flags.sata_copy_out_error_reg = B_TRUE;
5412		flags.sata_copy_out_sec_count_msb = B_TRUE;
5413		flags.sata_copy_out_sec_count_lsb = B_TRUE;
5414		scmd->satacmd_status_reg = status;
5415	}
5416
5417	if (scmd->satacmd_addr_type & ATA_ADDR_LBA48) {
5418
5419		/*
5420		 * set HOB so that high byte will be read
5421		 */
5422		nv_put8(ctlhdl, nvp->nvp_devctl, ATDC_HOB|ATDC_D3);
5423
5424		/*
5425		 * get the requested high bytes
5426		 */
5427		if (flags.sata_copy_out_sec_count_msb) {
5428			scmd->satacmd_sec_count_msb =
5429			    nv_get8(cmdhdl, nvp->nvp_count);
5430		}
5431
5432		if (flags.sata_copy_out_lba_low_msb) {
5433			scmd->satacmd_lba_low_msb =
5434			    nv_get8(cmdhdl, nvp->nvp_sect);
5435		}
5436
5437		if (flags.sata_copy_out_lba_mid_msb) {
5438			scmd->satacmd_lba_mid_msb =
5439			    nv_get8(cmdhdl, nvp->nvp_lcyl);
5440		}
5441
5442		if (flags.sata_copy_out_lba_high_msb) {
5443			scmd->satacmd_lba_high_msb =
5444			    nv_get8(cmdhdl, nvp->nvp_hcyl);
5445		}
5446	}
5447
5448	/*
5449	 * disable HOB so that low byte is read
5450	 */
5451	nv_put8(ctlhdl, nvp->nvp_devctl, ATDC_D3);
5452
5453	/*
5454	 * get the requested low bytes
5455	 */
5456	if (flags.sata_copy_out_sec_count_lsb) {
5457		scmd->satacmd_sec_count_lsb = nv_get8(cmdhdl, nvp->nvp_count);
5458	}
5459
5460	if (flags.sata_copy_out_lba_low_lsb) {
5461		scmd->satacmd_lba_low_lsb = nv_get8(cmdhdl, nvp->nvp_sect);
5462	}
5463
5464	if (flags.sata_copy_out_lba_mid_lsb) {
5465		scmd->satacmd_lba_mid_lsb = nv_get8(cmdhdl, nvp->nvp_lcyl);
5466	}
5467
5468	if (flags.sata_copy_out_lba_high_lsb) {
5469		scmd->satacmd_lba_high_lsb = nv_get8(cmdhdl, nvp->nvp_hcyl);
5470	}
5471
5472	/*
5473	 * get the device register if requested
5474	 */
5475	if (flags.sata_copy_out_device_reg) {
5476		scmd->satacmd_device_reg =  nv_get8(cmdhdl, nvp->nvp_drvhd);
5477	}
5478
5479	/*
5480	 * get the error register if requested
5481	 */
5482	if (flags.sata_copy_out_error_reg) {
5483		scmd->satacmd_error_reg = nv_get8(cmdhdl, nvp->nvp_error);
5484	}
5485}
5486
5487
5488/*
5489 * Hot plug and remove interrupts can occur when the device is reset.  Just
5490 * masking the interrupt doesn't always work well because if a
5491 * different interrupt arrives on the other port, the driver can still
5492 * end up checking the state of the other port and discover the hot
5493 * interrupt flag is set even though it was masked.  Checking for recent
5494 * reset activity and then ignoring turns out to be the easiest way.
5495 */
5496static void
5497nv_report_add_remove(nv_port_t *nvp, int flags)
5498{
5499	ddi_acc_handle_t bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5];
5500	clock_t time_diff = ddi_get_lbolt() - nvp->nvp_reset_time;
5501	uint32_t sstatus;
5502	int i;
5503
5504	/*
5505	 * If reset within last 1 second ignore.  This should be
5506	 * reworked and improved instead of having this somewhat
5507	 * heavy handed clamping job.
5508	 */
5509	if (time_diff < drv_usectohz(NV_ONE_SEC)) {
5510		NVLOG((NVDBG_HOT, nvp->nvp_ctlp, nvp, "nv_report_add_remove()"
5511		    "ignoring plug interrupt was %dms ago",
5512		    TICK_TO_MSEC(time_diff)));
5513
5514		return;
5515	}
5516
5517	/*
5518	 * wait up to 1ms for sstatus to settle and reflect the true
5519	 * status of the port.  Failure to do so can create confusion
5520	 * in probe, where the incorrect sstatus value can still
5521	 * persist.
5522	 */
5523	for (i = 0; i < 1000; i++) {
5524		sstatus = nv_get32(bar5_hdl, nvp->nvp_sstatus);
5525
5526		if ((flags == NV_PORT_HOTREMOVED) &&
5527		    ((sstatus & SSTATUS_DET_DEVPRE_PHYCOM) !=
5528		    SSTATUS_DET_DEVPRE_PHYCOM)) {
5529			break;
5530		}
5531
5532		if ((flags != NV_PORT_HOTREMOVED) &&
5533		    ((sstatus & SSTATUS_DET_DEVPRE_PHYCOM) ==
5534		    SSTATUS_DET_DEVPRE_PHYCOM)) {
5535			break;
5536		}
5537		drv_usecwait(1);
5538	}
5539
5540	NVLOG((NVDBG_HOT, nvp->nvp_ctlp, nvp,
5541	    "sstatus took %i us for DEVPRE_PHYCOM to settle", i));
5542
5543	if (flags == NV_PORT_HOTREMOVED) {
5544		NVLOG((NVDBG_HOT, nvp->nvp_ctlp, nvp,
5545		    "nv_report_add_remove() hot removed"));
5546		nv_port_state_change(nvp,
5547		    SATA_EVNT_DEVICE_DETACHED,
5548		    SATA_ADDR_CPORT, 0);
5549
5550		nvp->nvp_state |= NV_PORT_HOTREMOVED;
5551	} else {
5552		NVLOG((NVDBG_HOT, nvp->nvp_ctlp, nvp,
5553		    "nv_report_add_remove() hot plugged"));
5554		nv_port_state_change(nvp, SATA_EVNT_DEVICE_ATTACHED,
5555		    SATA_ADDR_CPORT, 0);
5556	}
5557}
5558
5559/*
5560 * Get request sense data and stuff it the command's sense buffer.
5561 * Start a request sense command in order to get sense data to insert
5562 * in the sata packet's rqsense buffer.  The command completion
5563 * processing is in nv_intr_pkt_pio.
5564 *
5565 * The sata framework provides a function to allocate and set-up a
5566 * request sense packet command. The reasons it is not being used here is:
5567 * a) it cannot be called in an interrupt context and this function is
5568 *    called in an interrupt context.
5569 * b) it allocates DMA resources that are not used here because this is
5570 *    implemented using PIO.
5571 *
5572 * If, in the future, this is changed to use DMA, the sata framework should
5573 * be used to allocate and set-up the error retrieval (request sense)
5574 * command.
5575 */
5576static int
5577nv_start_rqsense_pio(nv_port_t *nvp, nv_slot_t *nv_slotp)
5578{
5579	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
5580	sata_cmd_t *satacmd = &spkt->satapkt_cmd;
5581	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
5582	int cdb_len = spkt->satapkt_cmd.satacmd_acdb_len;
5583
5584	NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
5585	    "nv_start_rqsense_pio: start"));
5586
5587	/* clear the local request sense buffer before starting the command */
5588	bzero(nv_slotp->nvslot_rqsense_buff, SATA_ATAPI_RQSENSE_LEN);
5589
5590	/* Write the request sense PACKET command */
5591
5592	/* select the drive */
5593	nv_put8(cmdhdl, nvp->nvp_drvhd, satacmd->satacmd_device_reg);
5594
5595	/* make certain the drive selected */
5596	if (nv_wait(nvp, SATA_STATUS_DRDY, SATA_STATUS_BSY,
5597	    NV_SEC2USEC(5), 0) == B_FALSE) {
5598		NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
5599		    "nv_start_rqsense_pio: drive select failed"));
5600		return (NV_FAILURE);
5601	}
5602
5603	/* set up the command */
5604	nv_put8(cmdhdl, nvp->nvp_feature, 0);	/* deassert DMA and OVL */
5605	nv_put8(cmdhdl, nvp->nvp_hcyl, SATA_ATAPI_MAX_BYTES_PER_DRQ >> 8);
5606	nv_put8(cmdhdl, nvp->nvp_lcyl, SATA_ATAPI_MAX_BYTES_PER_DRQ & 0xff);
5607	nv_put8(cmdhdl, nvp->nvp_sect, 0);
5608	nv_put8(cmdhdl, nvp->nvp_count, 0);	/* no tag */
5609
5610	/* initiate the command by writing the command register last */
5611	nv_put8(cmdhdl, nvp->nvp_cmd, SATAC_PACKET);
5612
5613	/* Give the host ctlr time to do its thing, according to ATA/ATAPI */
5614	NV_DELAY_NSEC(400);
5615
5616	/*
5617	 * Wait for the device to indicate that it is ready for the command
5618	 * ATAPI protocol state - HP0: Check_Status_A
5619	 */
5620
5621	if (nv_wait3(nvp, SATA_STATUS_DRQ, SATA_STATUS_BSY, /* okay */
5622	    SATA_STATUS_ERR, SATA_STATUS_BSY, /* cmd failed */
5623	    SATA_STATUS_DF, SATA_STATUS_BSY, /* drive failed */
5624	    4000000, 0) == B_FALSE) {
5625		if (nv_get8(cmdhdl, nvp->nvp_status) &
5626		    (SATA_STATUS_ERR | SATA_STATUS_DF)) {
5627			NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
5628			    "nv_start_rqsense_pio: rqsense dev error (HP0)"));
5629		} else {
5630			NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
5631			    "nv_start_rqsense_pio: rqsense timeout (HP0)"));
5632		}
5633
5634		nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
5635		nv_complete_io(nvp, spkt, 0);
5636		nv_reset(nvp);
5637
5638		return (NV_FAILURE);
5639	}
5640
5641	/*
5642	 * Put the ATAPI command in the data register
5643	 * ATAPI protocol state - HP1: Send_Packet
5644	 */
5645
5646	ddi_rep_put16(cmdhdl, (ushort_t *)nv_rqsense_cdb,
5647	    (ushort_t *)nvp->nvp_data,
5648	    (cdb_len >> 1), DDI_DEV_NO_AUTOINCR);
5649
5650	NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
5651	    "nv_start_rqsense_pio: exiting into HP3"));
5652
5653	return (NV_SUCCESS);
5654}
5655
5656/*
5657 * quiesce(9E) entry point.
5658 *
5659 * This function is called when the system is single-threaded at high
5660 * PIL with preemption disabled. Therefore, this function must not be
5661 * blocked.
5662 *
5663 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
5664 * DDI_FAILURE indicates an error condition and should almost never happen.
5665 */
5666static int
5667nv_quiesce(dev_info_t *dip)
5668{
5669	int port, instance = ddi_get_instance(dip);
5670	nv_ctl_t *nvc;
5671
5672	if ((nvc = (nv_ctl_t *)ddi_get_soft_state(nv_statep, instance)) == NULL)
5673		return (DDI_FAILURE);
5674
5675	for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
5676		nv_port_t *nvp = &(nvc->nvc_port[port]);
5677		ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
5678		ddi_acc_handle_t bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5];
5679		uint32_t sctrl;
5680
5681		/*
5682		 * Stop the controllers from generating interrupts.
5683		 */
5684		(*(nvc->nvc_set_intr))(nvp, NV_INTR_DISABLE_NON_BLOCKING);
5685
5686		/*
5687		 * clear signature registers
5688		 */
5689		nv_put8(cmdhdl, nvp->nvp_sect, 0);
5690		nv_put8(cmdhdl, nvp->nvp_lcyl, 0);
5691		nv_put8(cmdhdl, nvp->nvp_hcyl, 0);
5692		nv_put8(cmdhdl, nvp->nvp_count, 0);
5693
5694		nvp->nvp_signature = 0;
5695		nvp->nvp_type = 0;
5696		nvp->nvp_state |= NV_PORT_RESET;
5697		nvp->nvp_reset_time = ddi_get_lbolt();
5698		nvp->nvp_link_lost_time = 0;
5699
5700		/*
5701		 * assert reset in PHY by writing a 1 to bit 0 scontrol
5702		 */
5703		sctrl = nv_get32(bar5_hdl, nvp->nvp_sctrl);
5704
5705		nv_put32(bar5_hdl, nvp->nvp_sctrl,
5706		    sctrl | SCONTROL_DET_COMRESET);
5707
5708		/*
5709		 * wait 1ms
5710		 */
5711		drv_usecwait(1000);
5712
5713		/*
5714		 * de-assert reset in PHY
5715		 */
5716		nv_put32(bar5_hdl, nvp->nvp_sctrl, sctrl);
5717	}
5718
5719	return (DDI_SUCCESS);
5720}
5721
5722
5723#ifdef SGPIO_SUPPORT
5724/*
5725 * NVIDIA specific SGPIO LED support
5726 * Please refer to the NVIDIA documentation for additional details
5727 */
5728
5729/*
5730 * nv_sgp_led_init
5731 * Detect SGPIO support.  If present, initialize.
5732 */
5733static void
5734nv_sgp_led_init(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle)
5735{
5736	uint16_t csrp;		/* SGPIO_CSRP from PCI config space */
5737	uint32_t cbp;		/* SGPIO_CBP from PCI config space */
5738	nv_sgp_cmn_t *cmn;	/* shared data structure */
5739	char tqname[SGPIO_TQ_NAME_LEN];
5740	extern caddr_t psm_map_phys_new(paddr_t, size_t, int);
5741
5742	/*
5743	 * Initialize with appropriately invalid values in case this function
5744	 * exits without initializing SGPIO (for example, there is no SGPIO
5745	 * support).
5746	 */
5747	nvc->nvc_sgp_csr = 0;
5748	nvc->nvc_sgp_cbp = NULL;
5749
5750	/*
5751	 * Only try to initialize SGPIO LED support if this property
5752	 * indicates it should be.
5753	 */
5754	if (ddi_getprop(DDI_DEV_T_ANY, nvc->nvc_dip, DDI_PROP_DONTPASS,
5755	    "enable-sgpio-leds", 0) != 1)
5756		return;
5757
5758	/*
5759	 * CK804 can pass the sgpio_detect test even though it does not support
5760	 * SGPIO, so don't even look at a CK804.
5761	 */
5762	if (nvc->nvc_mcp55_flag != B_TRUE)
5763		return;
5764
5765	/*
5766	 * The NVIDIA SGPIO support can nominally handle 6 drives.
5767	 * However, the current implementation only supports 4 drives.
5768	 * With two drives per controller, that means only look at the
5769	 * first two controllers.
5770	 */
5771	if ((nvc->nvc_ctlr_num != 0) && (nvc->nvc_ctlr_num != 1))
5772		return;
5773
5774	/* confirm that the SGPIO registers are there */
5775	if (nv_sgp_detect(pci_conf_handle, &csrp, &cbp) != NV_SUCCESS) {
5776		NVLOG((NVDBG_INIT, nvc, NULL,
5777		    "SGPIO registers not detected"));
5778		return;
5779	}
5780
5781	/* save off the SGPIO_CSR I/O address */
5782	nvc->nvc_sgp_csr = csrp;
5783
5784	/* map in Command Block */
5785	nvc->nvc_sgp_cbp = (nv_sgp_cb_t *)psm_map_phys_new(cbp,
5786	    sizeof (nv_sgp_cb_t), PROT_READ | PROT_WRITE);
5787
5788	/* initialize the SGPIO h/w */
5789	if (nv_sgp_init(nvc) == NV_FAILURE) {
5790		nv_cmn_err(CE_WARN, nvc, NULL,
5791		    "!Unable to initialize SGPIO");
5792	}
5793
5794	if (nvc->nvc_ctlr_num == 0) {
5795		/*
5796		 * Controller 0 on the MCP55/IO55 initialized the SGPIO
5797		 * and the data that is shared between the controllers.
5798		 * The clever thing to do would be to let the first controller
5799		 * that comes up be the one that initializes all this.
5800		 * However, SGPIO state is not necessarily zeroed between
5801		 * between OS reboots, so there might be old data there.
5802		 */
5803
5804		/* allocate shared space */
5805		cmn = (nv_sgp_cmn_t *)kmem_zalloc(sizeof (nv_sgp_cmn_t),
5806		    KM_SLEEP);
5807		if (cmn == NULL) {
5808			nv_cmn_err(CE_WARN, nvc, NULL,
5809			    "!Failed to allocate shared data");
5810			return;
5811		}
5812
5813		nvc->nvc_sgp_cmn = cmn;
5814
5815		/* initialize the shared data structure */
5816		cmn->nvs_magic = SGPIO_MAGIC;
5817		cmn->nvs_in_use = (1 << nvc->nvc_ctlr_num);
5818		cmn->nvs_connected = 0;
5819		cmn->nvs_activity = 0;
5820
5821		mutex_init(&cmn->nvs_slock, NULL, MUTEX_DRIVER, NULL);
5822		mutex_init(&cmn->nvs_tlock, NULL, MUTEX_DRIVER, NULL);
5823		cv_init(&cmn->nvs_cv, NULL, CV_DRIVER, NULL);
5824
5825		/* put the address in the SGPIO scratch register */
5826#if defined(__amd64)
5827		nvc->nvc_sgp_cbp->sgpio_sr = (uint64_t)cmn;
5828#else
5829		nvc->nvc_sgp_cbp->sgpio_sr = (uint32_t)cmn;
5830#endif
5831
5832		/* start the activity LED taskq */
5833
5834		/*
5835		 * The taskq name should be unique and the time
5836		 */
5837		(void) snprintf(tqname, SGPIO_TQ_NAME_LEN,
5838		    "nvSataLed%x", (short)(ddi_get_lbolt() & 0xffff));
5839		cmn->nvs_taskq = ddi_taskq_create(nvc->nvc_dip, tqname, 1,
5840		    TASKQ_DEFAULTPRI, 0);
5841		if (cmn->nvs_taskq == NULL) {
5842			cmn->nvs_taskq_delay = 0;
5843			nv_cmn_err(CE_WARN, nvc, NULL,
5844			    "!Failed to start activity LED taskq");
5845		} else {
5846			cmn->nvs_taskq_delay = SGPIO_LOOP_WAIT_USECS;
5847			(void) ddi_taskq_dispatch(cmn->nvs_taskq,
5848			    nv_sgp_activity_led_ctl, nvc, DDI_SLEEP);
5849		}
5850
5851	} else if (nvc->nvc_ctlr_num == 1) {
5852		/*
5853		 * Controller 1 confirms that SGPIO has been initialized
5854		 * and, if so, try to get the shared data pointer, otherwise
5855		 * get the shared data pointer when accessing the data.
5856		 */
5857
5858		if (nvc->nvc_sgp_cbp->sgpio_sr != 0) {
5859			cmn = (nv_sgp_cmn_t *)nvc->nvc_sgp_cbp->sgpio_sr;
5860
5861			/*
5862			 * It looks like a pointer, but is it the shared data?
5863			 */
5864			if (cmn->nvs_magic == SGPIO_MAGIC) {
5865				nvc->nvc_sgp_cmn = cmn;
5866
5867				cmn->nvs_in_use |= (1 << nvc->nvc_ctlr_num);
5868			}
5869		}
5870	}
5871}
5872
5873/*
5874 * nv_sgp_detect
5875 * Read the SGPIO_CSR and SGPIO_CBP values from PCI config space and
5876 * report back whether both were readable.
5877 */
5878static int
5879nv_sgp_detect(ddi_acc_handle_t pci_conf_handle, uint16_t *csrpp,
5880    uint32_t *cbpp)
5881{
5882	/* get the SGPIO_CSRP */
5883	*csrpp = pci_config_get16(pci_conf_handle, SGPIO_CSRP);
5884	if (*csrpp == 0) {
5885		return (NV_FAILURE);
5886	}
5887
5888	/* SGPIO_CSRP is good, get the SGPIO_CBP */
5889	*cbpp = pci_config_get32(pci_conf_handle, SGPIO_CBP);
5890	if (*cbpp == 0) {
5891		return (NV_FAILURE);
5892	}
5893
5894	/* SGPIO_CBP is good, so we must support SGPIO */
5895	return (NV_SUCCESS);
5896}
5897
5898/*
5899 * nv_sgp_init
5900 * Initialize SGPIO.  The process is specified by NVIDIA.
5901 */
5902static int
5903nv_sgp_init(nv_ctl_t *nvc)
5904{
5905	uint32_t status;
5906	int drive_count;
5907
5908	/*
5909	 * if SGPIO status set to SGPIO_STATE_RESET, logic has been
5910	 * reset and needs to be initialized.
5911	 */
5912	status = nv_sgp_csr_read(nvc);
5913	if (SGPIO_CSR_SSTAT(status) == SGPIO_STATE_RESET) {
5914		if (nv_sgp_init_cmd(nvc) == NV_FAILURE) {
5915			/* reset and try again */
5916			nv_sgp_reset(nvc);
5917			if (nv_sgp_init_cmd(nvc) == NV_FAILURE) {
5918				NVLOG((NVDBG_ALWAYS, nvc, NULL,
5919				    "SGPIO init failed"));
5920				return (NV_FAILURE);
5921			}
5922		}
5923	}
5924
5925	/*
5926	 * NVIDIA recommends reading the supported drive count even
5927	 * though they also indicate that it is 4 at this time.
5928	 */
5929	drive_count = SGP_CR0_DRV_CNT(nvc->nvc_sgp_cbp->sgpio_cr0);
5930	if (drive_count != SGPIO_DRV_CNT_VALUE) {
5931		NVLOG((NVDBG_ALWAYS, nvc, NULL,
5932		    "SGPIO reported undocumented drive count - %d",
5933		    drive_count));
5934	}
5935
5936	NVLOG((NVDBG_INIT, nvc, NULL,
5937	    "initialized ctlr: %d csr: 0x%08x",
5938	    nvc->nvc_ctlr_num, nvc->nvc_sgp_csr));
5939
5940	return (NV_SUCCESS);
5941}
5942
5943static void
5944nv_sgp_reset(nv_ctl_t *nvc)
5945{
5946	uint32_t cmd;
5947	uint32_t status;
5948
5949	cmd = SGPIO_CMD_RESET;
5950	nv_sgp_csr_write(nvc, cmd);
5951
5952	status = nv_sgp_csr_read(nvc);
5953
5954	if (SGPIO_CSR_CSTAT(status) != SGPIO_CMD_OK) {
5955		NVLOG((NVDBG_ALWAYS, nvc, NULL,
5956		    "SGPIO reset failed: CSR - 0x%x", status));
5957	}
5958}
5959
5960static int
5961nv_sgp_init_cmd(nv_ctl_t *nvc)
5962{
5963	int seq;
5964	hrtime_t start, end;
5965	uint32_t status;
5966	uint32_t cmd;
5967
5968	/* get the old sequence value */
5969	status = nv_sgp_csr_read(nvc);
5970	seq = SGPIO_CSR_SEQ(status);
5971
5972	/* check the state since we have the info anyway */
5973	if (SGPIO_CSR_SSTAT(status) != SGPIO_STATE_OPERATIONAL) {
5974		NVLOG((NVDBG_ALWAYS, nvc, NULL,
5975		    "SGPIO init_cmd: state not operational"));
5976	}
5977
5978	/* issue command */
5979	cmd = SGPIO_CSR_CMD_SET(SGPIO_CMD_READ_PARAMS);
5980	nv_sgp_csr_write(nvc, cmd);
5981
5982	DTRACE_PROBE2(sgpio__cmd, int, cmd, int, status);
5983
5984	/* poll for completion */
5985	start = gethrtime();
5986	end = start + NV_SGP_CMD_TIMEOUT;
5987	for (;;) {
5988		status = nv_sgp_csr_read(nvc);
5989
5990		/* break on error */
5991		if (SGPIO_CSR_CSTAT(status) == SGPIO_CMD_ERROR)
5992			break;
5993
5994		/* break on command completion (seq changed) */
5995		if (SGPIO_CSR_SEQ(status) != seq) {
5996			if (SGPIO_CSR_CSTAT(status) == SGPIO_CMD_ACTIVE) {
5997				NVLOG((NVDBG_ALWAYS, nvc, NULL,
5998				    "Seq changed but command still active"));
5999			}
6000
6001			break;
6002		}
6003
6004		/* Wait 400 ns and try again */
6005		NV_DELAY_NSEC(400);
6006
6007		if (gethrtime() > end)
6008			break;
6009	}
6010
6011	if (SGPIO_CSR_CSTAT(status) == SGPIO_CMD_OK)
6012		return (NV_SUCCESS);
6013
6014	return (NV_FAILURE);
6015}
6016
6017static int
6018nv_sgp_check_set_cmn(nv_ctl_t *nvc)
6019{
6020	nv_sgp_cmn_t *cmn;
6021
6022	if (nvc->nvc_sgp_cbp == NULL)
6023		return (NV_FAILURE);
6024
6025	/* check to see if Scratch Register is set */
6026	if (nvc->nvc_sgp_cbp->sgpio_sr != 0) {
6027		nvc->nvc_sgp_cmn =
6028		    (nv_sgp_cmn_t *)nvc->nvc_sgp_cbp->sgpio_sr;
6029
6030		if (nvc->nvc_sgp_cmn->nvs_magic != SGPIO_MAGIC)
6031			return (NV_FAILURE);
6032
6033		cmn = nvc->nvc_sgp_cmn;
6034
6035		mutex_enter(&cmn->nvs_slock);
6036		cmn->nvs_in_use |= (1 << nvc->nvc_ctlr_num);
6037		mutex_exit(&cmn->nvs_slock);
6038
6039		return (NV_SUCCESS);
6040	}
6041
6042	return (NV_FAILURE);
6043}
6044
6045/*
6046 * nv_sgp_csr_read
6047 * This is just a 32-bit port read from the value that was obtained from the
6048 * PCI config space.
6049 *
6050 * XXX It was advised to use the in[bwl] function for this, even though they
6051 * are obsolete interfaces.
6052 */
6053static int
6054nv_sgp_csr_read(nv_ctl_t *nvc)
6055{
6056	return (inl(nvc->nvc_sgp_csr));
6057}
6058
6059/*
6060 * nv_sgp_csr_write
6061 * This is just a 32-bit I/O port write.  The port number was obtained from
6062 * the PCI config space.
6063 *
6064 * XXX It was advised to use the out[bwl] function for this, even though they
6065 * are obsolete interfaces.
6066 */
6067static void
6068nv_sgp_csr_write(nv_ctl_t *nvc, uint32_t val)
6069{
6070	outl(nvc->nvc_sgp_csr, val);
6071}
6072
6073/*
6074 * nv_sgp_write_data
6075 * Cause SGPIO to send Command Block data
6076 */
6077static int
6078nv_sgp_write_data(nv_ctl_t *nvc)
6079{
6080	hrtime_t start, end;
6081	uint32_t status;
6082	uint32_t cmd;
6083
6084	/* issue command */
6085	cmd = SGPIO_CSR_CMD_SET(SGPIO_CMD_WRITE_DATA);
6086	nv_sgp_csr_write(nvc, cmd);
6087
6088	/* poll for completion */
6089	start = gethrtime();
6090	end = start + NV_SGP_CMD_TIMEOUT;
6091	for (;;) {
6092		status = nv_sgp_csr_read(nvc);
6093
6094		/* break on error completion */
6095		if (SGPIO_CSR_CSTAT(status) == SGPIO_CMD_ERROR)
6096			break;
6097
6098		/* break on successful completion */
6099		if (SGPIO_CSR_CSTAT(status) == SGPIO_CMD_OK)
6100			break;
6101
6102		/* Wait 400 ns and try again */
6103		NV_DELAY_NSEC(400);
6104
6105		if (gethrtime() > end)
6106			break;
6107	}
6108
6109	if (SGPIO_CSR_CSTAT(status) == SGPIO_CMD_OK)
6110		return (NV_SUCCESS);
6111
6112	return (NV_FAILURE);
6113}
6114
6115/*
6116 * nv_sgp_activity_led_ctl
6117 * This is run as a taskq.  It wakes up at a fixed interval and checks to
6118 * see if any of the activity LEDs need to be changed.
6119 */
6120static void
6121nv_sgp_activity_led_ctl(void *arg)
6122{
6123	nv_ctl_t *nvc = (nv_ctl_t *)arg;
6124	nv_sgp_cmn_t *cmn;
6125	volatile nv_sgp_cb_t *cbp;
6126	clock_t ticks;
6127	uint8_t drv_leds;
6128	uint32_t old_leds;
6129	uint32_t new_led_state;
6130	int i;
6131
6132	cmn = nvc->nvc_sgp_cmn;
6133	cbp = nvc->nvc_sgp_cbp;
6134
6135	do {
6136		/* save off the old state of all of the LEDs */
6137		old_leds = cbp->sgpio0_tr;
6138
6139		DTRACE_PROBE3(sgpio__activity__state,
6140		    int, cmn->nvs_connected, int, cmn->nvs_activity,
6141		    int, old_leds);
6142
6143		new_led_state = 0;
6144
6145		/* for each drive */
6146		for (i = 0; i < SGPIO_DRV_CNT_VALUE; i++) {
6147
6148			/* get the current state of the LEDs for the drive */
6149			drv_leds = SGPIO0_TR_DRV(old_leds, i);
6150
6151			if ((cmn->nvs_connected & (1 << i)) == 0) {
6152				/* if not connected, turn off activity */
6153				drv_leds &= ~TR_ACTIVE_MASK;
6154				drv_leds |= TR_ACTIVE_SET(TR_ACTIVE_DISABLE);
6155
6156				new_led_state &= SGPIO0_TR_DRV_CLR(i);
6157				new_led_state |=
6158				    SGPIO0_TR_DRV_SET(drv_leds, i);
6159
6160				continue;
6161			}
6162
6163			if ((cmn->nvs_activity & (1 << i)) == 0) {
6164				/* connected, but not active */
6165				drv_leds &= ~TR_ACTIVE_MASK;
6166				drv_leds |= TR_ACTIVE_SET(TR_ACTIVE_ENABLE);
6167
6168				new_led_state &= SGPIO0_TR_DRV_CLR(i);
6169				new_led_state |=
6170				    SGPIO0_TR_DRV_SET(drv_leds, i);
6171
6172				continue;
6173			}
6174
6175			/* connected and active */
6176			if (TR_ACTIVE(drv_leds) == TR_ACTIVE_ENABLE) {
6177				/* was enabled, so disable */
6178				drv_leds &= ~TR_ACTIVE_MASK;
6179				drv_leds |=
6180				    TR_ACTIVE_SET(TR_ACTIVE_DISABLE);
6181
6182				new_led_state &= SGPIO0_TR_DRV_CLR(i);
6183				new_led_state |=
6184				    SGPIO0_TR_DRV_SET(drv_leds, i);
6185			} else {
6186				/* was disabled, so enable */
6187				drv_leds &= ~TR_ACTIVE_MASK;
6188				drv_leds |= TR_ACTIVE_SET(TR_ACTIVE_ENABLE);
6189
6190				new_led_state &= SGPIO0_TR_DRV_CLR(i);
6191				new_led_state |=
6192				    SGPIO0_TR_DRV_SET(drv_leds, i);
6193			}
6194
6195			/*
6196			 * clear the activity bit
6197			 * if there is drive activity again within the
6198			 * loop interval (now 1/16 second), nvs_activity
6199			 * will be reset and the "connected and active"
6200			 * condition above will cause the LED to blink
6201			 * off and on at the loop interval rate.  The
6202			 * rate may be increased (interval shortened) as
6203			 * long as it is not more than 1/30 second.
6204			 */
6205			mutex_enter(&cmn->nvs_slock);
6206			cmn->nvs_activity &= ~(1 << i);
6207			mutex_exit(&cmn->nvs_slock);
6208		}
6209
6210		DTRACE_PROBE1(sgpio__new__led__state, int, new_led_state);
6211
6212		/* write out LED values */
6213
6214		mutex_enter(&cmn->nvs_slock);
6215		cbp->sgpio0_tr &= ~TR_ACTIVE_MASK_ALL;
6216		cbp->sgpio0_tr |= new_led_state;
6217		cbp->sgpio_cr0 = SGP_CR0_ENABLE_MASK;
6218		mutex_exit(&cmn->nvs_slock);
6219
6220		if (nv_sgp_write_data(nvc) == NV_FAILURE) {
6221			NVLOG((NVDBG_VERBOSE, nvc, NULL,
6222			    "nv_sgp_write_data failure updating active LED"));
6223		}
6224
6225		/* now rest for the interval */
6226		mutex_enter(&cmn->nvs_tlock);
6227		ticks = drv_usectohz(cmn->nvs_taskq_delay);
6228		if (ticks > 0)
6229			(void) cv_timedwait(&cmn->nvs_cv, &cmn->nvs_tlock,
6230			    ddi_get_lbolt() + ticks);
6231		mutex_exit(&cmn->nvs_tlock);
6232	} while (ticks > 0);
6233}
6234
6235/*
6236 * nv_sgp_drive_connect
6237 * Set the flag used to indicate that the drive is attached to the HBA.
6238 * Used to let the taskq know that it should turn the Activity LED on.
6239 */
6240static void
6241nv_sgp_drive_connect(nv_ctl_t *nvc, int drive)
6242{
6243	nv_sgp_cmn_t *cmn;
6244
6245	if (nv_sgp_check_set_cmn(nvc) == NV_FAILURE)
6246		return;
6247	cmn = nvc->nvc_sgp_cmn;
6248
6249	mutex_enter(&cmn->nvs_slock);
6250	cmn->nvs_connected |= (1 << drive);
6251	mutex_exit(&cmn->nvs_slock);
6252}
6253
6254/*
6255 * nv_sgp_drive_disconnect
6256 * Clears the flag used to indicate that the drive is no longer attached
6257 * to the HBA.  Used to let the taskq know that it should turn the
6258 * Activity LED off.  The flag that indicates that the drive is in use is
6259 * also cleared.
6260 */
6261static void
6262nv_sgp_drive_disconnect(nv_ctl_t *nvc, int drive)
6263{
6264	nv_sgp_cmn_t *cmn;
6265
6266	if (nv_sgp_check_set_cmn(nvc) == NV_FAILURE)
6267		return;
6268	cmn = nvc->nvc_sgp_cmn;
6269
6270	mutex_enter(&cmn->nvs_slock);
6271	cmn->nvs_connected &= ~(1 << drive);
6272	cmn->nvs_activity &= ~(1 << drive);
6273	mutex_exit(&cmn->nvs_slock);
6274}
6275
6276/*
6277 * nv_sgp_drive_active
6278 * Sets the flag used to indicate that the drive has been accessed and the
6279 * LED should be flicked off, then on.  It is cleared at a fixed time
6280 * interval by the LED taskq and set by the sata command start.
6281 */
6282static void
6283nv_sgp_drive_active(nv_ctl_t *nvc, int drive)
6284{
6285	nv_sgp_cmn_t *cmn;
6286
6287	if (nv_sgp_check_set_cmn(nvc) == NV_FAILURE)
6288		return;
6289	cmn = nvc->nvc_sgp_cmn;
6290
6291	DTRACE_PROBE1(sgpio__active, int, drive);
6292
6293	mutex_enter(&cmn->nvs_slock);
6294	cmn->nvs_connected |= (1 << drive);
6295	cmn->nvs_activity |= (1 << drive);
6296	mutex_exit(&cmn->nvs_slock);
6297}
6298
6299
6300/*
6301 * nv_sgp_locate
6302 * Turns the Locate/OK2RM LED off or on for a particular drive.  State is
6303 * maintained in the SGPIO Command Block.
6304 */
6305static void
6306nv_sgp_locate(nv_ctl_t *nvc, int drive, int value)
6307{
6308	uint8_t leds;
6309	volatile nv_sgp_cb_t *cb = nvc->nvc_sgp_cbp;
6310	nv_sgp_cmn_t *cmn;
6311
6312	if (nv_sgp_check_set_cmn(nvc) == NV_FAILURE)
6313		return;
6314	cmn = nvc->nvc_sgp_cmn;
6315
6316	if ((drive < 0) || (drive >= SGPIO_DRV_CNT_VALUE))
6317		return;
6318
6319	DTRACE_PROBE2(sgpio__locate, int, drive, int, value);
6320
6321	mutex_enter(&cmn->nvs_slock);
6322
6323	leds = SGPIO0_TR_DRV(cb->sgpio0_tr, drive);
6324
6325	leds &= ~TR_LOCATE_MASK;
6326	leds |= TR_LOCATE_SET(value);
6327
6328	cb->sgpio0_tr &= SGPIO0_TR_DRV_CLR(drive);
6329	cb->sgpio0_tr |= SGPIO0_TR_DRV_SET(leds, drive);
6330
6331	cb->sgpio_cr0 = SGP_CR0_ENABLE_MASK;
6332
6333	mutex_exit(&cmn->nvs_slock);
6334
6335	if (nv_sgp_write_data(nvc) == NV_FAILURE) {
6336		nv_cmn_err(CE_WARN, nvc, NULL,
6337		    "!nv_sgp_write_data failure updating OK2RM/Locate LED");
6338	}
6339}
6340
6341/*
6342 * nv_sgp_error
6343 * Turns the Error/Failure LED off or on for a particular drive.  State is
6344 * maintained in the SGPIO Command Block.
6345 */
6346static void
6347nv_sgp_error(nv_ctl_t *nvc, int drive, int value)
6348{
6349	uint8_t leds;
6350	volatile nv_sgp_cb_t *cb = nvc->nvc_sgp_cbp;
6351	nv_sgp_cmn_t *cmn;
6352
6353	if (nv_sgp_check_set_cmn(nvc) == NV_FAILURE)
6354		return;
6355	cmn = nvc->nvc_sgp_cmn;
6356
6357	if ((drive < 0) || (drive >= SGPIO_DRV_CNT_VALUE))
6358		return;
6359
6360	DTRACE_PROBE2(sgpio__error, int, drive, int, value);
6361
6362	mutex_enter(&cmn->nvs_slock);
6363
6364	leds = SGPIO0_TR_DRV(cb->sgpio0_tr, drive);
6365
6366	leds &= ~TR_ERROR_MASK;
6367	leds |= TR_ERROR_SET(value);
6368
6369	cb->sgpio0_tr &= SGPIO0_TR_DRV_CLR(drive);
6370	cb->sgpio0_tr |= SGPIO0_TR_DRV_SET(leds, drive);
6371
6372	cb->sgpio_cr0 = SGP_CR0_ENABLE_MASK;
6373
6374	mutex_exit(&cmn->nvs_slock);
6375
6376	if (nv_sgp_write_data(nvc) == NV_FAILURE) {
6377		nv_cmn_err(CE_WARN, nvc, NULL,
6378		    "!nv_sgp_write_data failure updating Fail/Error LED");
6379	}
6380}
6381
6382static void
6383nv_sgp_cleanup(nv_ctl_t *nvc)
6384{
6385	int drive;
6386	uint8_t drv_leds;
6387	uint32_t led_state;
6388	volatile nv_sgp_cb_t *cb = nvc->nvc_sgp_cbp;
6389	nv_sgp_cmn_t *cmn = nvc->nvc_sgp_cmn;
6390	extern void psm_unmap_phys(caddr_t, size_t);
6391
6392	/*
6393	 * If the SGPIO command block isn't mapped or the shared data
6394	 * structure isn't present in this instance, there isn't much that
6395	 * can be cleaned up.
6396	 */
6397	if ((cb == NULL) || (cmn == NULL))
6398		return;
6399
6400	/* turn off activity LEDs for this controller */
6401	drv_leds = TR_ACTIVE_SET(TR_ACTIVE_DISABLE);
6402
6403	/* get the existing LED state */
6404	led_state = cb->sgpio0_tr;
6405
6406	/* turn off port 0 */
6407	drive = SGP_CTLR_PORT_TO_DRV(nvc->nvc_ctlr_num, 0);
6408	led_state &= SGPIO0_TR_DRV_CLR(drive);
6409	led_state |= SGPIO0_TR_DRV_SET(drv_leds, drive);
6410
6411	/* turn off port 1 */
6412	drive = SGP_CTLR_PORT_TO_DRV(nvc->nvc_ctlr_num, 1);
6413	led_state &= SGPIO0_TR_DRV_CLR(drive);
6414	led_state |= SGPIO0_TR_DRV_SET(drv_leds, drive);
6415
6416	/* set the new led state, which should turn off this ctrl's LEDs */
6417	cb->sgpio_cr0 = SGP_CR0_ENABLE_MASK;
6418	(void) nv_sgp_write_data(nvc);
6419
6420	/* clear the controller's in use bit */
6421	mutex_enter(&cmn->nvs_slock);
6422	cmn->nvs_in_use &= ~(1 << nvc->nvc_ctlr_num);
6423	mutex_exit(&cmn->nvs_slock);
6424
6425	if (cmn->nvs_in_use == 0) {
6426		/* if all "in use" bits cleared, take everything down */
6427
6428		if (cmn->nvs_taskq != NULL) {
6429			/* allow activity taskq to exit */
6430			cmn->nvs_taskq_delay = 0;
6431			cv_broadcast(&cmn->nvs_cv);
6432
6433			/* then destroy it */
6434			ddi_taskq_destroy(cmn->nvs_taskq);
6435		}
6436
6437		/* turn off all of the LEDs */
6438		cb->sgpio0_tr = 0;
6439		cb->sgpio_cr0 = SGP_CR0_ENABLE_MASK;
6440		(void) nv_sgp_write_data(nvc);
6441
6442		cb->sgpio_sr = NULL;
6443
6444		/* free resources */
6445		cv_destroy(&cmn->nvs_cv);
6446		mutex_destroy(&cmn->nvs_tlock);
6447		mutex_destroy(&cmn->nvs_slock);
6448
6449		kmem_free(nvc->nvc_sgp_cmn, sizeof (nv_sgp_cmn_t));
6450	}
6451
6452	nvc->nvc_sgp_cmn = NULL;
6453
6454	/* unmap the SGPIO Command Block */
6455	psm_unmap_phys((caddr_t)nvc->nvc_sgp_cbp, sizeof (nv_sgp_cb_t));
6456}
6457#endif	/* SGPIO_SUPPORT */
6458