nv_sata.c revision 9709:10a5d653b2cf
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22/*
23 * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24 * Use is subject to license terms.
25 */
26
27/*
28 *
29 * nv_sata is a combo SATA HBA driver for ck804/mcp5x (mcp5x = mcp55/mcp51)
30 * based chipsets.
31 *
32 * NCQ
33 * ---
34 *
35 * A portion of the NCQ is in place, but is incomplete.  NCQ is disabled
36 * and is likely to be revisited in the future.
37 *
38 *
39 * Power Management
40 * ----------------
41 *
42 * Normally power management would be responsible for ensuring the device
43 * is quiescent and then changing power states to the device, such as
44 * powering down parts or all of the device.  mcp5x/ck804 is unique in
45 * that it is only available as part of a larger southbridge chipset, so
46 * removing power to the device isn't possible.  Switches to control
47 * power management states D0/D3 in the PCI configuration space appear to
48 * be supported but changes to these states are apparently are ignored.
49 * The only further PM that the driver _could_ do is shut down the PHY,
50 * but in order to deliver the first rev of the driver sooner than later,
51 * that will be deferred until some future phase.
52 *
53 * Since the driver currently will not directly change any power state to
54 * the device, no power() entry point will be required.  However, it is
55 * possible that in ACPI power state S3, aka suspend to RAM, that power
56 * can be removed to the device, and the driver cannot rely on BIOS to
57 * have reset any state.  For the time being, there is no known
58 * non-default configurations that need to be programmed.  This judgement
59 * is based on the port of the legacy ata driver not having any such
60 * functionality and based on conversations with the PM team.  If such a
61 * restoration is later deemed necessary it can be incorporated into the
62 * DDI_RESUME processing.
63 *
64 */
65
66#include <sys/scsi/scsi.h>
67#include <sys/pci.h>
68#include <sys/byteorder.h>
69#include <sys/sunddi.h>
70#include <sys/sata/sata_hba.h>
71#ifdef SGPIO_SUPPORT
72#include <sys/sata/adapters/nv_sata/nv_sgpio.h>
73#include <sys/devctl.h>
74#include <sys/sdt.h>
75#endif
76#include <sys/sata/adapters/nv_sata/nv_sata.h>
77#include <sys/disp.h>
78#include <sys/note.h>
79#include <sys/promif.h>
80
81
82/*
83 * Function prototypes for driver entry points
84 */
85static int nv_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
86static int nv_detach(dev_info_t *dip, ddi_detach_cmd_t cmd);
87static int nv_quiesce(dev_info_t *dip);
88static int nv_getinfo(dev_info_t *dip, ddi_info_cmd_t infocmd,
89    void *arg, void **result);
90
91/*
92 * Function prototypes for entry points from sata service module
93 * These functions are distinguished from other local functions
94 * by the prefix "nv_sata_"
95 */
96static int nv_sata_start(dev_info_t *dip, sata_pkt_t *spkt);
97static int nv_sata_abort(dev_info_t *dip, sata_pkt_t *spkt, int);
98static int nv_sata_reset(dev_info_t *dip, sata_device_t *sd);
99static int nv_sata_activate(dev_info_t *dip, sata_device_t *sd);
100static int nv_sata_deactivate(dev_info_t *dip, sata_device_t *sd);
101
102/*
103 * Local function prototypes
104 */
105static uint_t mcp5x_intr(caddr_t arg1, caddr_t arg2);
106static uint_t ck804_intr(caddr_t arg1, caddr_t arg2);
107static int nv_add_legacy_intrs(nv_ctl_t *nvc);
108#ifdef NV_MSI_SUPPORTED
109static int nv_add_msi_intrs(nv_ctl_t *nvc);
110#endif
111static void nv_rem_intrs(nv_ctl_t *nvc);
112static int nv_start_common(nv_port_t *nvp, sata_pkt_t *spkt);
113static int nv_start_nodata(nv_port_t *nvp, int slot);
114static void nv_intr_nodata(nv_port_t *nvp, nv_slot_t *spkt);
115static int nv_start_pio_in(nv_port_t *nvp, int slot);
116static int nv_start_pio_out(nv_port_t *nvp, int slot);
117static void nv_intr_pio_in(nv_port_t *nvp, nv_slot_t *spkt);
118static void nv_intr_pio_out(nv_port_t *nvp, nv_slot_t *spkt);
119static int nv_start_pkt_pio(nv_port_t *nvp, int slot);
120static void nv_intr_pkt_pio(nv_port_t *nvp, nv_slot_t *nv_slotp);
121static int nv_start_dma(nv_port_t *nvp, int slot);
122static void nv_intr_dma(nv_port_t *nvp, struct nv_slot *spkt);
123static void nv_log(uint_t flag, nv_ctl_t *nvc, nv_port_t *nvp, char *fmt, ...);
124static void nv_uninit_ctl(nv_ctl_t *nvc);
125static void mcp5x_reg_init(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle);
126static void ck804_reg_init(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle);
127static void nv_uninit_port(nv_port_t *nvp);
128static int nv_init_port(nv_port_t *nvp);
129static int nv_init_ctl(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle);
130static int mcp5x_packet_complete_intr(nv_ctl_t *nvc, nv_port_t *nvp);
131#ifdef NCQ
132static int mcp5x_dma_setup_intr(nv_ctl_t *nvc, nv_port_t *nvp);
133#endif
134static void nv_start_dma_engine(nv_port_t *nvp, int slot);
135static void nv_port_state_change(nv_port_t *nvp, int event, uint8_t addr_type,
136    int state);
137static boolean_t nv_check_link(uint32_t sstatus);
138static void nv_common_reg_init(nv_ctl_t *nvc);
139static void ck804_intr_process(nv_ctl_t *nvc, uint8_t intr_status);
140static void nv_reset(nv_port_t *nvp);
141static void nv_complete_io(nv_port_t *nvp,  sata_pkt_t *spkt, int slot);
142static void nv_timeout(void *);
143static int nv_poll_wait(nv_port_t *nvp, sata_pkt_t *spkt);
144static void nv_cmn_err(int ce, nv_ctl_t *nvc, nv_port_t *nvp, char *fmt, ...);
145static void nv_read_signature(nv_port_t *nvp);
146static void mcp5x_set_intr(nv_port_t *nvp, int flag);
147static void ck804_set_intr(nv_port_t *nvp, int flag);
148static void nv_resume(nv_port_t *nvp);
149static void nv_suspend(nv_port_t *nvp);
150static int nv_start_sync(nv_port_t *nvp, sata_pkt_t *spkt);
151static int nv_abort_active(nv_port_t *nvp, sata_pkt_t *spkt, int abort_reason);
152static void nv_copy_registers(nv_port_t *nvp, sata_device_t *sd,
153    sata_pkt_t *spkt);
154static void nv_report_add_remove(nv_port_t *nvp, int flags);
155static int nv_start_async(nv_port_t *nvp, sata_pkt_t *spkt);
156static int nv_wait3(nv_port_t *nvp, uchar_t onbits1, uchar_t offbits1,
157    uchar_t failure_onbits2, uchar_t failure_offbits2,
158    uchar_t failure_onbits3, uchar_t failure_offbits3,
159    uint_t timeout_usec, int type_wait);
160static int nv_wait(nv_port_t *nvp, uchar_t onbits, uchar_t offbits,
161    uint_t timeout_usec, int type_wait);
162static int nv_start_rqsense_pio(nv_port_t *nvp, nv_slot_t *nv_slotp);
163
164#ifdef SGPIO_SUPPORT
165static int nv_open(dev_t *devp, int flag, int otyp, cred_t *credp);
166static int nv_close(dev_t dev, int flag, int otyp, cred_t *credp);
167static int nv_ioctl(dev_t dev, int cmd, intptr_t arg, int mode,
168    cred_t *credp, int *rvalp);
169
170static void nv_sgp_led_init(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle);
171static int nv_sgp_detect(ddi_acc_handle_t pci_conf_handle, uint16_t *csrpp,
172    uint32_t *cbpp);
173static int nv_sgp_init(nv_ctl_t *nvc);
174static void nv_sgp_reset(nv_ctl_t *nvc);
175static int nv_sgp_init_cmd(nv_ctl_t *nvc);
176static int nv_sgp_check_set_cmn(nv_ctl_t *nvc);
177static int nv_sgp_csr_read(nv_ctl_t *nvc);
178static void nv_sgp_csr_write(nv_ctl_t *nvc, uint32_t val);
179static int nv_sgp_write_data(nv_ctl_t *nvc);
180static void nv_sgp_activity_led_ctl(void *arg);
181static void nv_sgp_drive_connect(nv_ctl_t *nvc, int drive);
182static void nv_sgp_drive_disconnect(nv_ctl_t *nvc, int drive);
183static void nv_sgp_drive_active(nv_ctl_t *nvc, int drive);
184static void nv_sgp_locate(nv_ctl_t *nvc, int drive, int value);
185static void nv_sgp_error(nv_ctl_t *nvc, int drive, int value);
186static void nv_sgp_cleanup(nv_ctl_t *nvc);
187#endif
188
189
190/*
191 * DMA attributes for the data buffer for x86.  dma_attr_burstsizes is unused.
192 * Verify if needed if ported to other ISA.
193 */
194static ddi_dma_attr_t buffer_dma_attr = {
195	DMA_ATTR_V0,		/* dma_attr_version */
196	0,			/* dma_attr_addr_lo: lowest bus address */
197	0xffffffffull,		/* dma_attr_addr_hi: */
198	NV_BM_64K_BOUNDARY - 1,	/* dma_attr_count_max i.e for one cookie */
199	4,			/* dma_attr_align */
200	1,			/* dma_attr_burstsizes. */
201	1,			/* dma_attr_minxfer */
202	0xffffffffull,		/* dma_attr_maxxfer including all cookies */
203	0xffffffffull,		/* dma_attr_seg */
204	NV_DMA_NSEGS,		/* dma_attr_sgllen */
205	512,			/* dma_attr_granular */
206	0,			/* dma_attr_flags */
207};
208static ddi_dma_attr_t buffer_dma_40bit_attr = {
209	DMA_ATTR_V0,		/* dma_attr_version */
210	0,			/* dma_attr_addr_lo: lowest bus address */
211	0xffffffffffull,	/* dma_attr_addr_hi: */
212	NV_BM_64K_BOUNDARY - 1,	/* dma_attr_count_max i.e for one cookie */
213	4,			/* dma_attr_align */
214	1,			/* dma_attr_burstsizes. */
215	1,			/* dma_attr_minxfer */
216	NV_BM_64K_BOUNDARY - 1,	/* dma_attr_maxxfer including all cookies */
217	0xffffffffull,		/* dma_attr_seg */
218	NV_DMA_NSEGS,		/* dma_attr_sgllen */
219	512,			/* dma_attr_granular */
220	0,			/* dma_attr_flags */
221};
222
223
224/*
225 * DMA attributes for PRD tables
226 */
227ddi_dma_attr_t nv_prd_dma_attr = {
228	DMA_ATTR_V0,		/* dma_attr_version */
229	0,			/* dma_attr_addr_lo */
230	0xffffffffull,		/* dma_attr_addr_hi */
231	NV_BM_64K_BOUNDARY - 1,	/* dma_attr_count_max */
232	4,			/* dma_attr_align */
233	1,			/* dma_attr_burstsizes */
234	1,			/* dma_attr_minxfer */
235	NV_BM_64K_BOUNDARY,	/* dma_attr_maxxfer */
236	NV_BM_64K_BOUNDARY - 1,	/* dma_attr_seg */
237	1,			/* dma_attr_sgllen */
238	1,			/* dma_attr_granular */
239	0			/* dma_attr_flags */
240};
241
242/*
243 * Device access attributes
244 */
245static ddi_device_acc_attr_t accattr = {
246    DDI_DEVICE_ATTR_V0,
247    DDI_STRUCTURE_LE_ACC,
248    DDI_STRICTORDER_ACC
249};
250
251
252#ifdef SGPIO_SUPPORT
253static struct cb_ops nv_cb_ops = {
254	nv_open,		/* open */
255	nv_close,		/* close */
256	nodev,			/* strategy (block) */
257	nodev,			/* print (block) */
258	nodev,			/* dump (block) */
259	nodev,			/* read */
260	nodev,			/* write */
261	nv_ioctl,		/* ioctl */
262	nodev,			/* devmap */
263	nodev,			/* mmap */
264	nodev,			/* segmap */
265	nochpoll,		/* chpoll */
266	ddi_prop_op,		/* prop_op */
267	NULL,			/* streams */
268	D_NEW | D_MP |
269	D_64BIT | D_HOTPLUG,	/* flags */
270	CB_REV			/* rev */
271};
272#endif  /* SGPIO_SUPPORT */
273
274
275static struct dev_ops nv_dev_ops = {
276	DEVO_REV,		/* devo_rev */
277	0,			/* refcnt  */
278	nv_getinfo,		/* info */
279	nulldev,		/* identify */
280	nulldev,		/* probe */
281	nv_attach,		/* attach */
282	nv_detach,		/* detach */
283	nodev,			/* no reset */
284#ifdef SGPIO_SUPPORT
285	&nv_cb_ops,		/* driver operations */
286#else
287	(struct cb_ops *)0,	/* driver operations */
288#endif
289	NULL,			/* bus operations */
290	NULL,			/* power */
291	nv_quiesce		/* quiesce */
292};
293
294
295/*
296 * Request Sense CDB for ATAPI
297 */
298static const uint8_t nv_rqsense_cdb[16] = {
299	SCMD_REQUEST_SENSE,
300	0,
301	0,
302	0,
303	SATA_ATAPI_MIN_RQSENSE_LEN,
304	0,
305	0, 0, 0, 0, 0, 0, 0, 0, 0, 0	/* pad out to max CDB length */
306};
307
308
309static sata_tran_hotplug_ops_t nv_hotplug_ops;
310
311extern struct mod_ops mod_driverops;
312
313static  struct modldrv modldrv = {
314	&mod_driverops,	/* driverops */
315	"Nvidia ck804/mcp51/mcp55 HBA",
316	&nv_dev_ops,	/* driver ops */
317};
318
319static  struct modlinkage modlinkage = {
320	MODREV_1,
321	&modldrv,
322	NULL
323};
324
325
326/*
327 * wait between checks of reg status
328 */
329int nv_usec_delay = NV_WAIT_REG_CHECK;
330
331/*
332 * The following is needed for nv_vcmn_err()
333 */
334static kmutex_t nv_log_mutex; /* protects nv_log_buf */
335static char nv_log_buf[NV_STRING_512];
336int nv_debug_flags = NVDBG_ALWAYS;
337int nv_log_to_console = B_FALSE;
338
339int nv_log_delay = 0;
340int nv_prom_print = B_FALSE;
341
342/*
343 * for debugging
344 */
345#ifdef DEBUG
346int ncq_commands = 0;
347int non_ncq_commands = 0;
348#endif
349
350/*
351 * Opaque state pointer to be initialized by ddi_soft_state_init()
352 */
353static void *nv_statep	= NULL;
354
355/* This can be disabled if there are any problems with 40-bit DMA */
356int nv_sata_40bit_dma = B_TRUE;
357
358static sata_tran_hotplug_ops_t nv_hotplug_ops = {
359	SATA_TRAN_HOTPLUG_OPS_REV_1,	/* structure version */
360	nv_sata_activate,	/* activate port. cfgadm -c connect */
361	nv_sata_deactivate	/* deactivate port. cfgadm -c disconnect */
362};
363
364
365/*
366 *  nv module initialization
367 */
368int
369_init(void)
370{
371	int	error;
372
373	error = ddi_soft_state_init(&nv_statep, sizeof (nv_ctl_t), 0);
374
375	if (error != 0) {
376
377		return (error);
378	}
379
380	mutex_init(&nv_log_mutex, NULL, MUTEX_DRIVER, NULL);
381
382	if ((error = sata_hba_init(&modlinkage)) != 0) {
383		ddi_soft_state_fini(&nv_statep);
384		mutex_destroy(&nv_log_mutex);
385
386		return (error);
387	}
388
389	error = mod_install(&modlinkage);
390	if (error != 0) {
391		sata_hba_fini(&modlinkage);
392		ddi_soft_state_fini(&nv_statep);
393		mutex_destroy(&nv_log_mutex);
394
395		return (error);
396	}
397
398	return (error);
399}
400
401
402/*
403 * nv module uninitialize
404 */
405int
406_fini(void)
407{
408	int	error;
409
410	error = mod_remove(&modlinkage);
411
412	if (error != 0) {
413		return (error);
414	}
415
416	/*
417	 * remove the resources allocated in _init()
418	 */
419	mutex_destroy(&nv_log_mutex);
420	sata_hba_fini(&modlinkage);
421	ddi_soft_state_fini(&nv_statep);
422
423	return (error);
424}
425
426
427/*
428 * nv _info entry point
429 */
430int
431_info(struct modinfo *modinfop)
432{
433	return (mod_info(&modlinkage, modinfop));
434}
435
436
437/*
438 * these wrappers for ddi_{get,put}8 are for observability
439 * with dtrace
440 */
441#ifdef DEBUG
442
443static void
444nv_put8(ddi_acc_handle_t handle, uint8_t *dev_addr, uint8_t value)
445{
446	ddi_put8(handle, dev_addr, value);
447}
448
449static void
450nv_put32(ddi_acc_handle_t handle, uint32_t *dev_addr, uint32_t value)
451{
452	ddi_put32(handle, dev_addr, value);
453}
454
455static uint32_t
456nv_get32(ddi_acc_handle_t handle, uint32_t *dev_addr)
457{
458	return (ddi_get32(handle, dev_addr));
459}
460
461static void
462nv_put16(ddi_acc_handle_t handle, uint16_t *dev_addr, uint16_t value)
463{
464	ddi_put16(handle, dev_addr, value);
465}
466
467static uint16_t
468nv_get16(ddi_acc_handle_t handle, uint16_t *dev_addr)
469{
470	return (ddi_get16(handle, dev_addr));
471}
472
473static uint8_t
474nv_get8(ddi_acc_handle_t handle, uint8_t *dev_addr)
475{
476	return (ddi_get8(handle, dev_addr));
477}
478
479#else
480
481#define	nv_put8 ddi_put8
482#define	nv_put32 ddi_put32
483#define	nv_get32 ddi_get32
484#define	nv_put16 ddi_put16
485#define	nv_get16 ddi_get16
486#define	nv_get8 ddi_get8
487
488#endif
489
490
491/*
492 * Driver attach
493 */
494static int
495nv_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
496{
497	int status, attach_state, intr_types, bar, i, command;
498	int inst = ddi_get_instance(dip);
499	ddi_acc_handle_t pci_conf_handle;
500	nv_ctl_t *nvc;
501	uint8_t subclass;
502	uint32_t reg32;
503#ifdef SGPIO_SUPPORT
504	pci_regspec_t *regs;
505	int rlen;
506#endif
507
508	switch (cmd) {
509
510	case DDI_ATTACH:
511
512		NVLOG((NVDBG_INIT, NULL, NULL,
513		    "nv_attach(): DDI_ATTACH inst %d", inst));
514
515		attach_state = ATTACH_PROGRESS_NONE;
516
517		status = ddi_soft_state_zalloc(nv_statep, inst);
518
519		if (status != DDI_SUCCESS) {
520			break;
521		}
522
523		nvc = ddi_get_soft_state(nv_statep, inst);
524
525		nvc->nvc_dip = dip;
526
527		attach_state |= ATTACH_PROGRESS_STATEP_ALLOC;
528
529		if (pci_config_setup(dip, &pci_conf_handle) == DDI_SUCCESS) {
530			nvc->nvc_revid = pci_config_get8(pci_conf_handle,
531			    PCI_CONF_REVID);
532			NVLOG((NVDBG_INIT, NULL, NULL,
533			    "inst %d: silicon revid is %x nv_debug_flags=%x",
534			    inst, nvc->nvc_revid, nv_debug_flags));
535		} else {
536			break;
537		}
538
539		attach_state |= ATTACH_PROGRESS_CONF_HANDLE;
540
541		/*
542		 * Set the PCI command register: enable IO/MEM/Master.
543		 */
544		command = pci_config_get16(pci_conf_handle, PCI_CONF_COMM);
545		pci_config_put16(pci_conf_handle, PCI_CONF_COMM,
546		    command|PCI_COMM_IO|PCI_COMM_MAE|PCI_COMM_ME);
547
548		subclass = pci_config_get8(pci_conf_handle, PCI_CONF_SUBCLASS);
549
550		if (subclass & PCI_MASS_RAID) {
551			cmn_err(CE_WARN,
552			    "attach failed: RAID mode not supported");
553			break;
554		}
555
556		/*
557		 * the 6 bars of the controller are:
558		 * 0: port 0 task file
559		 * 1: port 0 status
560		 * 2: port 1 task file
561		 * 3: port 1 status
562		 * 4: bus master for both ports
563		 * 5: extended registers for SATA features
564		 */
565		for (bar = 0; bar < 6; bar++) {
566			status = ddi_regs_map_setup(dip, bar + 1,
567			    (caddr_t *)&nvc->nvc_bar_addr[bar], 0, 0, &accattr,
568			    &nvc->nvc_bar_hdl[bar]);
569
570			if (status != DDI_SUCCESS) {
571				NVLOG((NVDBG_INIT, nvc, NULL,
572				    "ddi_regs_map_setup failure for bar"
573				    " %d status = %d", bar, status));
574				break;
575			}
576		}
577
578		attach_state |= ATTACH_PROGRESS_BARS;
579
580		/*
581		 * initialize controller and driver core
582		 */
583		status = nv_init_ctl(nvc, pci_conf_handle);
584
585		if (status == NV_FAILURE) {
586			NVLOG((NVDBG_INIT, nvc, NULL, "nv_init_ctl failed"));
587
588			break;
589		}
590
591		attach_state |= ATTACH_PROGRESS_CTL_SETUP;
592
593		/*
594		 * initialize mutexes
595		 */
596		mutex_init(&nvc->nvc_mutex, NULL, MUTEX_DRIVER,
597		    DDI_INTR_PRI(nvc->nvc_intr_pri));
598
599		attach_state |= ATTACH_PROGRESS_MUTEX_INIT;
600
601		/*
602		 * get supported interrupt types
603		 */
604		if (ddi_intr_get_supported_types(dip, &intr_types) !=
605		    DDI_SUCCESS) {
606			nv_cmn_err(CE_WARN, nvc, NULL,
607			    "!ddi_intr_get_supported_types failed");
608			NVLOG((NVDBG_INIT, nvc, NULL,
609			    "interrupt supported types failed"));
610
611			break;
612		}
613
614		NVLOG((NVDBG_INIT, nvc, NULL,
615		    "ddi_intr_get_supported_types() returned: 0x%x",
616		    intr_types));
617
618#ifdef NV_MSI_SUPPORTED
619		if (intr_types & DDI_INTR_TYPE_MSI) {
620			NVLOG((NVDBG_INIT, nvc, NULL,
621			    "using MSI interrupt type"));
622
623			/*
624			 * Try MSI first, but fall back to legacy if MSI
625			 * attach fails
626			 */
627			if (nv_add_msi_intrs(nvc) == DDI_SUCCESS) {
628				nvc->nvc_intr_type = DDI_INTR_TYPE_MSI;
629				attach_state |= ATTACH_PROGRESS_INTR_ADDED;
630				NVLOG((NVDBG_INIT, nvc, NULL,
631				    "MSI interrupt setup done"));
632			} else {
633				nv_cmn_err(CE_CONT, nvc, NULL,
634				    "!MSI registration failed "
635				    "will try Legacy interrupts");
636			}
637		}
638#endif
639
640		/*
641		 * Either the MSI interrupt setup has failed or only
642		 * the fixed interrupts are available on the system.
643		 */
644		if (!(attach_state & ATTACH_PROGRESS_INTR_ADDED) &&
645		    (intr_types & DDI_INTR_TYPE_FIXED)) {
646
647			NVLOG((NVDBG_INIT, nvc, NULL,
648			    "using Legacy interrupt type"));
649
650			if (nv_add_legacy_intrs(nvc) == DDI_SUCCESS) {
651				nvc->nvc_intr_type = DDI_INTR_TYPE_FIXED;
652				attach_state |= ATTACH_PROGRESS_INTR_ADDED;
653				NVLOG((NVDBG_INIT, nvc, NULL,
654				    "Legacy interrupt setup done"));
655			} else {
656				nv_cmn_err(CE_WARN, nvc, NULL,
657				    "!legacy interrupt setup failed");
658				NVLOG((NVDBG_INIT, nvc, NULL,
659				    "legacy interrupt setup failed"));
660				break;
661			}
662		}
663
664		if (!(attach_state & ATTACH_PROGRESS_INTR_ADDED)) {
665			NVLOG((NVDBG_INIT, nvc, NULL,
666			    "no interrupts registered"));
667			break;
668		}
669
670#ifdef SGPIO_SUPPORT
671		/*
672		 * save off the controller number
673		 */
674		(void) ddi_getlongprop(DDI_DEV_T_NONE, dip, DDI_PROP_DONTPASS,
675		    "reg", (caddr_t)&regs, &rlen);
676		nvc->nvc_ctlr_num = PCI_REG_FUNC_G(regs->pci_phys_hi);
677		kmem_free(regs, rlen);
678
679		/*
680		 * initialize SGPIO
681		 */
682		nv_sgp_led_init(nvc, pci_conf_handle);
683#endif	/* SGPIO_SUPPORT */
684
685		/*
686		 * attach to sata module
687		 */
688		if (sata_hba_attach(nvc->nvc_dip,
689		    &nvc->nvc_sata_hba_tran,
690		    DDI_ATTACH) != DDI_SUCCESS) {
691			attach_state |= ATTACH_PROGRESS_SATA_MODULE;
692
693			break;
694		}
695
696		pci_config_teardown(&pci_conf_handle);
697
698		NVLOG((NVDBG_INIT, nvc, NULL, "nv_attach DDI_SUCCESS"));
699
700		return (DDI_SUCCESS);
701
702	case DDI_RESUME:
703
704		nvc = ddi_get_soft_state(nv_statep, inst);
705
706		NVLOG((NVDBG_INIT, nvc, NULL,
707		    "nv_attach(): DDI_RESUME inst %d", inst));
708
709		if (pci_config_setup(dip, &pci_conf_handle) != DDI_SUCCESS) {
710			return (DDI_FAILURE);
711		}
712
713		/*
714		 * Set the PCI command register: enable IO/MEM/Master.
715		 */
716		command = pci_config_get16(pci_conf_handle, PCI_CONF_COMM);
717		pci_config_put16(pci_conf_handle, PCI_CONF_COMM,
718		    command|PCI_COMM_IO|PCI_COMM_MAE|PCI_COMM_ME);
719
720		/*
721		 * Need to set bit 2 to 1 at config offset 0x50
722		 * to enable access to the bar5 registers.
723		 */
724		reg32 = pci_config_get32(pci_conf_handle, NV_SATA_CFG_20);
725
726		if ((reg32 & NV_BAR5_SPACE_EN) != NV_BAR5_SPACE_EN) {
727			pci_config_put32(pci_conf_handle, NV_SATA_CFG_20,
728			    reg32 | NV_BAR5_SPACE_EN);
729		}
730
731		nvc->nvc_state &= ~NV_CTRL_SUSPEND;
732
733		for (i = 0; i < NV_MAX_PORTS(nvc); i++) {
734			nv_resume(&(nvc->nvc_port[i]));
735		}
736
737		pci_config_teardown(&pci_conf_handle);
738
739		return (DDI_SUCCESS);
740
741	default:
742		return (DDI_FAILURE);
743	}
744
745
746	/*
747	 * DDI_ATTACH failure path starts here
748	 */
749
750	if (attach_state & ATTACH_PROGRESS_INTR_ADDED) {
751		nv_rem_intrs(nvc);
752	}
753
754	if (attach_state & ATTACH_PROGRESS_SATA_MODULE) {
755		/*
756		 * Remove timers
757		 */
758		int port = 0;
759		nv_port_t *nvp;
760
761		for (; port < NV_MAX_PORTS(nvc); port++) {
762			nvp = &(nvc->nvc_port[port]);
763			if (nvp->nvp_timeout_id != 0) {
764				(void) untimeout(nvp->nvp_timeout_id);
765			}
766		}
767	}
768
769	if (attach_state & ATTACH_PROGRESS_MUTEX_INIT) {
770		mutex_destroy(&nvc->nvc_mutex);
771	}
772
773	if (attach_state & ATTACH_PROGRESS_CTL_SETUP) {
774		nv_uninit_ctl(nvc);
775	}
776
777	if (attach_state & ATTACH_PROGRESS_BARS) {
778		while (--bar >= 0) {
779			ddi_regs_map_free(&nvc->nvc_bar_hdl[bar]);
780		}
781	}
782
783	if (attach_state & ATTACH_PROGRESS_STATEP_ALLOC) {
784		ddi_soft_state_free(nv_statep, inst);
785	}
786
787	if (attach_state & ATTACH_PROGRESS_CONF_HANDLE) {
788		pci_config_teardown(&pci_conf_handle);
789	}
790
791	cmn_err(CE_WARN, "nv_sata%d attach failed", inst);
792
793	return (DDI_FAILURE);
794}
795
796
797static int
798nv_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
799{
800	int i, port, inst = ddi_get_instance(dip);
801	nv_ctl_t *nvc;
802	nv_port_t *nvp;
803
804	nvc = ddi_get_soft_state(nv_statep, inst);
805
806	switch (cmd) {
807
808	case DDI_DETACH:
809
810		NVLOG((NVDBG_INIT, nvc, NULL, "nv_detach: DDI_DETACH"));
811
812		/*
813		 * Remove interrupts
814		 */
815		nv_rem_intrs(nvc);
816
817		/*
818		 * Remove timers
819		 */
820		for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
821			nvp = &(nvc->nvc_port[port]);
822			if (nvp->nvp_timeout_id != 0) {
823				(void) untimeout(nvp->nvp_timeout_id);
824			}
825		}
826
827		/*
828		 * Remove maps
829		 */
830		for (i = 0; i < 6; i++) {
831			ddi_regs_map_free(&nvc->nvc_bar_hdl[i]);
832		}
833
834		/*
835		 * Destroy mutexes
836		 */
837		mutex_destroy(&nvc->nvc_mutex);
838
839		/*
840		 * Uninitialize the controller
841		 */
842		nv_uninit_ctl(nvc);
843
844#ifdef SGPIO_SUPPORT
845		/*
846		 * release SGPIO resources
847		 */
848		nv_sgp_cleanup(nvc);
849#endif
850
851		/*
852		 * unregister from the sata module
853		 */
854		(void) sata_hba_detach(nvc->nvc_dip, DDI_DETACH);
855
856		/*
857		 * Free soft state
858		 */
859		ddi_soft_state_free(nv_statep, inst);
860
861		return (DDI_SUCCESS);
862
863	case DDI_SUSPEND:
864
865		NVLOG((NVDBG_INIT, nvc, NULL, "nv_detach: DDI_SUSPEND"));
866
867		for (i = 0; i < NV_MAX_PORTS(nvc); i++) {
868			nv_suspend(&(nvc->nvc_port[i]));
869		}
870
871		nvc->nvc_state |= NV_CTRL_SUSPEND;
872
873		return (DDI_SUCCESS);
874
875	default:
876		return (DDI_FAILURE);
877	}
878}
879
880
881/*ARGSUSED*/
882static int
883nv_getinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
884{
885	nv_ctl_t *nvc;
886	int instance;
887	dev_t dev;
888
889	dev = (dev_t)arg;
890	instance = getminor(dev);
891
892	switch (infocmd) {
893	case DDI_INFO_DEVT2DEVINFO:
894		nvc = ddi_get_soft_state(nv_statep,  instance);
895		if (nvc != NULL) {
896			*result = nvc->nvc_dip;
897			return (DDI_SUCCESS);
898		} else {
899			*result = NULL;
900			return (DDI_FAILURE);
901		}
902	case DDI_INFO_DEVT2INSTANCE:
903		*(int *)result = instance;
904		break;
905	default:
906		break;
907	}
908	return (DDI_SUCCESS);
909}
910
911
912#ifdef SGPIO_SUPPORT
913/* ARGSUSED */
914static int
915nv_open(dev_t *devp, int flag, int otyp, cred_t *credp)
916{
917	nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, getminor(*devp));
918
919	if (nvc == NULL) {
920		return (ENXIO);
921	}
922
923	return (0);
924}
925
926
927/* ARGSUSED */
928static int
929nv_close(dev_t dev, int flag, int otyp, cred_t *credp)
930{
931	return (0);
932}
933
934
935/* ARGSUSED */
936static int
937nv_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp, int *rvalp)
938{
939	nv_ctl_t *nvc;
940	int inst;
941	int status;
942	int ctlr, port;
943	int drive;
944	uint8_t curr_led;
945	struct dc_led_ctl led;
946
947	inst = getminor(dev);
948	if (inst == -1) {
949		return (EBADF);
950	}
951
952	nvc = ddi_get_soft_state(nv_statep, inst);
953	if (nvc == NULL) {
954		return (EBADF);
955	}
956
957	if ((nvc->nvc_sgp_cbp == NULL) || (nvc->nvc_sgp_cmn == NULL)) {
958		return (EIO);
959	}
960
961	switch (cmd) {
962	case DEVCTL_SET_LED:
963		status = ddi_copyin((void *)arg, &led,
964		    sizeof (struct dc_led_ctl), mode);
965		if (status != 0)
966			return (EFAULT);
967
968		/*
969		 * Since only the first two controller currently support
970		 * SGPIO (as per NVIDIA docs), this code will as well.
971		 * Note that this validate the port value within led_state
972		 * as well.
973		 */
974
975		ctlr = SGP_DRV_TO_CTLR(led.led_number);
976		if ((ctlr != 0) && (ctlr != 1))
977			return (ENXIO);
978
979		if ((led.led_state & DCL_STATE_FAST_BLNK) ||
980		    (led.led_state & DCL_STATE_SLOW_BLNK)) {
981			return (EINVAL);
982		}
983
984		drive = led.led_number;
985
986		if ((led.led_ctl_active == DCL_CNTRL_OFF) ||
987		    (led.led_state == DCL_STATE_OFF)) {
988
989			if (led.led_type == DCL_TYPE_DEVICE_FAIL) {
990				nv_sgp_error(nvc, drive, TR_ERROR_DISABLE);
991			} else if (led.led_type == DCL_TYPE_DEVICE_OK2RM) {
992				nv_sgp_locate(nvc, drive, TR_LOCATE_DISABLE);
993			} else {
994				return (ENXIO);
995			}
996
997			port = SGP_DRV_TO_PORT(led.led_number);
998			nvc->nvc_port[port].nvp_sgp_ioctl_mod |= led.led_type;
999		}
1000
1001		if (led.led_ctl_active == DCL_CNTRL_ON) {
1002			if (led.led_type == DCL_TYPE_DEVICE_FAIL) {
1003				nv_sgp_error(nvc, drive, TR_ERROR_ENABLE);
1004			} else if (led.led_type == DCL_TYPE_DEVICE_OK2RM) {
1005				nv_sgp_locate(nvc, drive, TR_LOCATE_ENABLE);
1006			} else {
1007				return (ENXIO);
1008			}
1009
1010			port = SGP_DRV_TO_PORT(led.led_number);
1011			nvc->nvc_port[port].nvp_sgp_ioctl_mod |= led.led_type;
1012		}
1013
1014		break;
1015
1016	case DEVCTL_GET_LED:
1017		status = ddi_copyin((void *)arg, &led,
1018		    sizeof (struct dc_led_ctl), mode);
1019		if (status != 0)
1020			return (EFAULT);
1021
1022		/*
1023		 * Since only the first two controller currently support
1024		 * SGPIO (as per NVIDIA docs), this code will as well.
1025		 * Note that this validate the port value within led_state
1026		 * as well.
1027		 */
1028
1029		ctlr = SGP_DRV_TO_CTLR(led.led_number);
1030		if ((ctlr != 0) && (ctlr != 1))
1031			return (ENXIO);
1032
1033		curr_led = SGPIO0_TR_DRV(nvc->nvc_sgp_cbp->sgpio0_tr,
1034		    led.led_number);
1035
1036		port = SGP_DRV_TO_PORT(led.led_number);
1037		if (nvc->nvc_port[port].nvp_sgp_ioctl_mod & led.led_type) {
1038			led.led_ctl_active = DCL_CNTRL_ON;
1039
1040			if (led.led_type == DCL_TYPE_DEVICE_FAIL) {
1041				if (TR_ERROR(curr_led) == TR_ERROR_DISABLE)
1042					led.led_state = DCL_STATE_OFF;
1043				else
1044					led.led_state = DCL_STATE_ON;
1045			} else if (led.led_type == DCL_TYPE_DEVICE_OK2RM) {
1046				if (TR_LOCATE(curr_led) == TR_LOCATE_DISABLE)
1047					led.led_state = DCL_STATE_OFF;
1048				else
1049					led.led_state = DCL_STATE_ON;
1050			} else {
1051				return (ENXIO);
1052			}
1053		} else {
1054			led.led_ctl_active = DCL_CNTRL_OFF;
1055			/*
1056			 * Not really off, but never set and no constant for
1057			 * tri-state
1058			 */
1059			led.led_state = DCL_STATE_OFF;
1060		}
1061
1062		status = ddi_copyout(&led, (void *)arg,
1063		    sizeof (struct dc_led_ctl), mode);
1064		if (status != 0)
1065			return (EFAULT);
1066
1067		break;
1068
1069	case DEVCTL_NUM_LEDS:
1070		led.led_number = SGPIO_DRV_CNT_VALUE;
1071		led.led_ctl_active = 1;
1072		led.led_type = 3;
1073
1074		/*
1075		 * According to documentation, NVIDIA SGPIO is supposed to
1076		 * support blinking, but it does not seem to work in practice.
1077		 */
1078		led.led_state = DCL_STATE_ON;
1079
1080		status = ddi_copyout(&led, (void *)arg,
1081		    sizeof (struct dc_led_ctl), mode);
1082		if (status != 0)
1083			return (EFAULT);
1084
1085		break;
1086
1087	default:
1088		return (EINVAL);
1089	}
1090
1091	return (0);
1092}
1093#endif	/* SGPIO_SUPPORT */
1094
1095
1096/*
1097 * Called by sata module to probe a port.  Port and device state
1098 * are not changed here... only reported back to the sata module.
1099 *
1100 * If probe confirms a device is present for the first time, it will
1101 * initiate a device reset, then probe will be called again and the
1102 * signature will be check.  If the signature is valid, data structures
1103 * will be initialized.
1104 */
1105static int
1106nv_sata_probe(dev_info_t *dip, sata_device_t *sd)
1107{
1108	nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip));
1109	uint8_t cport = sd->satadev_addr.cport;
1110	uint8_t pmport = sd->satadev_addr.pmport;
1111	uint8_t qual = sd->satadev_addr.qual;
1112	clock_t nv_lbolt = ddi_get_lbolt();
1113	nv_port_t *nvp;
1114
1115	if (cport >= NV_MAX_PORTS(nvc)) {
1116		sd->satadev_type = SATA_DTYPE_NONE;
1117		sd->satadev_state = SATA_STATE_UNKNOWN;
1118
1119		return (SATA_FAILURE);
1120	}
1121
1122	ASSERT(nvc->nvc_port != NULL);
1123	nvp = &(nvc->nvc_port[cport]);
1124	ASSERT(nvp != NULL);
1125
1126	NVLOG((NVDBG_PROBE, nvc, nvp,
1127	    "nv_sata_probe: enter cport: 0x%x, pmport: 0x%x, "
1128	    "qual: 0x%x", cport, pmport, qual));
1129
1130	mutex_enter(&nvp->nvp_mutex);
1131
1132	/*
1133	 * This check seems to be done in the SATA module.
1134	 * It may not be required here
1135	 */
1136	if (nvp->nvp_state & NV_PORT_INACTIVE) {
1137		nv_cmn_err(CE_WARN, nvc, nvp,
1138		    "port inactive.  Use cfgadm to activate");
1139		sd->satadev_type = SATA_DTYPE_UNKNOWN;
1140		sd->satadev_state = SATA_PSTATE_SHUTDOWN;
1141		mutex_exit(&nvp->nvp_mutex);
1142
1143		return (SATA_FAILURE);
1144	}
1145
1146	if (qual == SATA_ADDR_PMPORT) {
1147		sd->satadev_type = SATA_DTYPE_NONE;
1148		sd->satadev_state = SATA_STATE_UNKNOWN;
1149		mutex_exit(&nvp->nvp_mutex);
1150		nv_cmn_err(CE_WARN, nvc, nvp,
1151		    "controller does not support port multiplier");
1152
1153		return (SATA_FAILURE);
1154	}
1155
1156	sd->satadev_state = SATA_PSTATE_PWRON;
1157
1158	nv_copy_registers(nvp, sd, NULL);
1159
1160	/*
1161	 * determine link status
1162	 */
1163	if (nv_check_link(sd->satadev_scr.sstatus) == B_FALSE) {
1164		uint8_t det;
1165
1166		/*
1167		 * Reset will cause the link to go down for a short period of
1168		 * time.  If link is lost for less than 2 seconds ignore it
1169		 * so that the reset can progress.
1170		 */
1171		if (nvp->nvp_state & NV_PORT_RESET_PROBE) {
1172
1173			if (nvp->nvp_link_lost_time == 0) {
1174				nvp->nvp_link_lost_time = nv_lbolt;
1175			}
1176
1177			if (TICK_TO_SEC(nv_lbolt -
1178			    nvp->nvp_link_lost_time) < NV_LINK_LOST_OK) {
1179				NVLOG((NVDBG_ALWAYS, nvp->nvp_ctlp, nvp,
1180				    "probe: intermittent link lost while"
1181				    " resetting"));
1182				/*
1183				 * fake status of link so that probe continues
1184				 */
1185				SSTATUS_SET_IPM(sd->satadev_scr.sstatus,
1186				    SSTATUS_IPM_ACTIVE);
1187				SSTATUS_SET_DET(sd->satadev_scr.sstatus,
1188				    SSTATUS_DET_DEVPRE_PHYCOM);
1189				sd->satadev_type = SATA_DTYPE_UNKNOWN;
1190				mutex_exit(&nvp->nvp_mutex);
1191
1192				return (SATA_SUCCESS);
1193			} else {
1194				nvp->nvp_state &=
1195				    ~(NV_PORT_RESET_PROBE|NV_PORT_RESET);
1196			}
1197		}
1198
1199		/*
1200		 * no link, so tear down port and abort all active packets
1201		 */
1202
1203		det = (sd->satadev_scr.sstatus & SSTATUS_DET) >>
1204		    SSTATUS_DET_SHIFT;
1205
1206		switch (det) {
1207		case SSTATUS_DET_NODEV:
1208		case SSTATUS_DET_PHYOFFLINE:
1209			sd->satadev_type = SATA_DTYPE_NONE;
1210			break;
1211		default:
1212			sd->satadev_type = SATA_DTYPE_UNKNOWN;
1213			break;
1214		}
1215
1216		NVLOG((NVDBG_PROBE, nvp->nvp_ctlp, nvp,
1217		    "probe: link lost invoking nv_abort_active"));
1218
1219		(void) nv_abort_active(nvp, NULL, SATA_PKT_TIMEOUT);
1220		nv_uninit_port(nvp);
1221
1222		mutex_exit(&nvp->nvp_mutex);
1223
1224		return (SATA_SUCCESS);
1225	} else {
1226		nvp->nvp_link_lost_time = 0;
1227	}
1228
1229	/*
1230	 * A device is present so clear hotremoved flag
1231	 */
1232	nvp->nvp_state &= ~NV_PORT_HOTREMOVED;
1233
1234#ifdef SGPIO_SUPPORT
1235	nv_sgp_drive_connect(nvp->nvp_ctlp, SGP_CTLR_PORT_TO_DRV(
1236	    nvp->nvp_ctlp->nvc_ctlr_num, nvp->nvp_port_num));
1237#endif
1238
1239	/*
1240	 * If the signature was acquired previously there is no need to
1241	 * do it again.
1242	 */
1243	if (nvp->nvp_signature != 0) {
1244		NVLOG((NVDBG_PROBE, nvp->nvp_ctlp, nvp,
1245		    "probe: signature acquired previously"));
1246		sd->satadev_type = nvp->nvp_type;
1247		mutex_exit(&nvp->nvp_mutex);
1248
1249		return (SATA_SUCCESS);
1250	}
1251
1252	/*
1253	 * If NV_PORT_RESET is not set, this is the first time through
1254	 * so perform reset and return.
1255	 */
1256	if ((nvp->nvp_state & NV_PORT_RESET) == 0) {
1257		NVLOG((NVDBG_PROBE, nvp->nvp_ctlp, nvp,
1258		    "probe: first reset to get sig"));
1259		nvp->nvp_state |= NV_PORT_RESET_PROBE;
1260		nv_reset(nvp);
1261		sd->satadev_type = nvp->nvp_type = SATA_DTYPE_UNKNOWN;
1262		nvp->nvp_probe_time = nv_lbolt;
1263		mutex_exit(&nvp->nvp_mutex);
1264
1265		return (SATA_SUCCESS);
1266	}
1267
1268	/*
1269	 * Reset was done previously.  see if the signature is
1270	 * available.
1271	 */
1272	nv_read_signature(nvp);
1273	sd->satadev_type = nvp->nvp_type;
1274
1275	/*
1276	 * Some drives may require additional resets to get a
1277	 * valid signature.  If a drive was not just powered up, the signature
1278	 * should arrive within half a second of reset.  Therefore if more
1279	 * than 5 seconds has elapsed while waiting for a signature, reset
1280	 * again.  These extra resets do not appear to create problems when
1281	 * the drive is spinning up for more than this reset period.
1282	 */
1283	if (nvp->nvp_signature == 0) {
1284		if (TICK_TO_SEC(nv_lbolt - nvp->nvp_reset_time) > 5) {
1285			NVLOG((NVDBG_PROBE, nvc, nvp, "additional reset"
1286			    " during signature acquisition"));
1287			nv_reset(nvp);
1288		}
1289
1290		mutex_exit(&nvp->nvp_mutex);
1291
1292		return (SATA_SUCCESS);
1293	}
1294
1295	NVLOG((NVDBG_PROBE, nvc, nvp, "signature acquired after %d ms",
1296	    TICK_TO_MSEC(nv_lbolt - nvp->nvp_probe_time)));
1297
1298	/*
1299	 * nv_sata only deals with ATA disks and ATAPI CD/DVDs so far.  If
1300	 * it is not either of those, then just return.
1301	 */
1302	if ((nvp->nvp_type != SATA_DTYPE_ATADISK) &&
1303	    (nvp->nvp_type != SATA_DTYPE_ATAPICD)) {
1304		NVLOG((NVDBG_PROBE, nvc, nvp, "Driver currently handles only"
1305		    " disks/CDs/DVDs.  Signature acquired was %X",
1306		    nvp->nvp_signature));
1307		mutex_exit(&nvp->nvp_mutex);
1308
1309		return (SATA_SUCCESS);
1310	}
1311
1312	/*
1313	 * make sure structures are initialized
1314	 */
1315	if (nv_init_port(nvp) == NV_SUCCESS) {
1316		NVLOG((NVDBG_PROBE, nvc, nvp,
1317		    "device detected and set up at port %d", cport));
1318		mutex_exit(&nvp->nvp_mutex);
1319
1320		return (SATA_SUCCESS);
1321	} else {
1322		nv_cmn_err(CE_WARN, nvc, nvp, "failed to set up data "
1323		    "structures for port %d", cport);
1324		mutex_exit(&nvp->nvp_mutex);
1325
1326		return (SATA_FAILURE);
1327	}
1328	/*NOTREACHED*/
1329}
1330
1331
1332/*
1333 * Called by sata module to start a new command.
1334 */
1335static int
1336nv_sata_start(dev_info_t *dip, sata_pkt_t *spkt)
1337{
1338	int cport = spkt->satapkt_device.satadev_addr.cport;
1339	nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip));
1340	nv_port_t *nvp = &(nvc->nvc_port[cport]);
1341	int ret;
1342
1343	NVLOG((NVDBG_ENTRY, nvc, nvp, "nv_sata_start: opmode: 0x%x cmd=%x",
1344	    spkt->satapkt_op_mode, spkt->satapkt_cmd.satacmd_cmd_reg));
1345
1346	mutex_enter(&nvp->nvp_mutex);
1347
1348	/*
1349	 * hotremoved is an intermediate state where the link was lost,
1350	 * but the hotplug event has not yet been processed by the sata
1351	 * module.  Fail the request.
1352	 */
1353	if (nvp->nvp_state & NV_PORT_HOTREMOVED) {
1354		spkt->satapkt_reason = SATA_PKT_PORT_ERROR;
1355		spkt->satapkt_device.satadev_state = SATA_STATE_UNKNOWN;
1356		NVLOG((NVDBG_ERRS, nvc, nvp,
1357		    "nv_sata_start: NV_PORT_HOTREMOVED"));
1358		nv_copy_registers(nvp, &spkt->satapkt_device, NULL);
1359		mutex_exit(&nvp->nvp_mutex);
1360
1361		return (SATA_TRAN_PORT_ERROR);
1362	}
1363
1364	if (nvp->nvp_state & NV_PORT_RESET) {
1365		NVLOG((NVDBG_ERRS, nvc, nvp,
1366		    "still waiting for reset completion"));
1367		spkt->satapkt_reason = SATA_PKT_BUSY;
1368		mutex_exit(&nvp->nvp_mutex);
1369
1370		/*
1371		 * If in panic, timeouts do not occur, so fake one
1372		 * so that the signature can be acquired to complete
1373		 * the reset handling.
1374		 */
1375		if (ddi_in_panic()) {
1376			nv_timeout(nvp);
1377		}
1378
1379		return (SATA_TRAN_BUSY);
1380	}
1381
1382	if (nvp->nvp_type == SATA_DTYPE_NONE) {
1383		spkt->satapkt_reason = SATA_PKT_PORT_ERROR;
1384		NVLOG((NVDBG_ERRS, nvc, nvp,
1385		    "nv_sata_start: SATA_DTYPE_NONE"));
1386		nv_copy_registers(nvp, &spkt->satapkt_device, NULL);
1387		mutex_exit(&nvp->nvp_mutex);
1388
1389		return (SATA_TRAN_PORT_ERROR);
1390	}
1391
1392	if (spkt->satapkt_device.satadev_type == SATA_DTYPE_PMULT) {
1393		ASSERT(nvp->nvp_type == SATA_DTYPE_PMULT);
1394		nv_cmn_err(CE_WARN, nvc, nvp,
1395		    "port multipliers not supported by controller");
1396		spkt->satapkt_reason = SATA_PKT_CMD_UNSUPPORTED;
1397		mutex_exit(&nvp->nvp_mutex);
1398
1399		return (SATA_TRAN_CMD_UNSUPPORTED);
1400	}
1401
1402	if ((nvp->nvp_state & NV_PORT_INIT) == 0) {
1403		spkt->satapkt_reason = SATA_PKT_PORT_ERROR;
1404		NVLOG((NVDBG_ERRS, nvc, nvp,
1405		    "nv_sata_start: port not yet initialized"));
1406		nv_copy_registers(nvp, &spkt->satapkt_device, NULL);
1407		mutex_exit(&nvp->nvp_mutex);
1408
1409		return (SATA_TRAN_PORT_ERROR);
1410	}
1411
1412	if (nvp->nvp_state & NV_PORT_INACTIVE) {
1413		spkt->satapkt_reason = SATA_PKT_PORT_ERROR;
1414		NVLOG((NVDBG_ERRS, nvc, nvp,
1415		    "nv_sata_start: NV_PORT_INACTIVE"));
1416		nv_copy_registers(nvp, &spkt->satapkt_device, NULL);
1417		mutex_exit(&nvp->nvp_mutex);
1418
1419		return (SATA_TRAN_PORT_ERROR);
1420	}
1421
1422	if (nvp->nvp_state & NV_PORT_FAILED) {
1423		spkt->satapkt_reason = SATA_PKT_PORT_ERROR;
1424		NVLOG((NVDBG_ERRS, nvc, nvp,
1425		    "nv_sata_start: NV_PORT_FAILED state"));
1426		nv_copy_registers(nvp, &spkt->satapkt_device, NULL);
1427		mutex_exit(&nvp->nvp_mutex);
1428
1429		return (SATA_TRAN_PORT_ERROR);
1430	}
1431
1432	/*
1433	 * after a device reset, and then when sata module restore processing
1434	 * is complete, the sata module will set sata_clear_dev_reset which
1435	 * indicates that restore processing has completed and normal
1436	 * non-restore related commands should be processed.
1437	 */
1438	if (spkt->satapkt_cmd.satacmd_flags.sata_clear_dev_reset) {
1439		nvp->nvp_state &= ~NV_PORT_RESTORE;
1440		NVLOG((NVDBG_ENTRY, nvc, nvp,
1441		    "nv_sata_start: clearing NV_PORT_RESTORE"));
1442	}
1443
1444	/*
1445	 * if the device was recently reset as indicated by NV_PORT_RESTORE,
1446	 * only allow commands which restore device state.  The sata module
1447	 * marks such commands with with sata_ignore_dev_reset.
1448	 *
1449	 * during coredump, nv_reset is called and but then the restore
1450	 * doesn't happen.  For now, workaround by ignoring the wait for
1451	 * restore if the system is panicing.
1452	 */
1453	if ((nvp->nvp_state & NV_PORT_RESTORE) &&
1454	    !(spkt->satapkt_cmd.satacmd_flags.sata_ignore_dev_reset) &&
1455	    (ddi_in_panic() == 0)) {
1456		spkt->satapkt_reason = SATA_PKT_BUSY;
1457		NVLOG((NVDBG_ENTRY, nvc, nvp,
1458		    "nv_sata_start: waiting for restore "));
1459		mutex_exit(&nvp->nvp_mutex);
1460
1461		return (SATA_TRAN_BUSY);
1462	}
1463
1464	if (nvp->nvp_state & NV_PORT_ABORTING) {
1465		spkt->satapkt_reason = SATA_PKT_BUSY;
1466		NVLOG((NVDBG_ERRS, nvc, nvp,
1467		    "nv_sata_start: NV_PORT_ABORTING"));
1468		mutex_exit(&nvp->nvp_mutex);
1469
1470		return (SATA_TRAN_BUSY);
1471	}
1472
1473	if (spkt->satapkt_op_mode &
1474	    (SATA_OPMODE_POLLING|SATA_OPMODE_SYNCH)) {
1475
1476		ret = nv_start_sync(nvp, spkt);
1477
1478		mutex_exit(&nvp->nvp_mutex);
1479
1480		return (ret);
1481	}
1482
1483	/*
1484	 * start command asynchronous command
1485	 */
1486	ret = nv_start_async(nvp, spkt);
1487
1488	mutex_exit(&nvp->nvp_mutex);
1489
1490	return (ret);
1491}
1492
1493
1494/*
1495 * SATA_OPMODE_POLLING implies the driver is in a
1496 * synchronous mode, and SATA_OPMODE_SYNCH is also set.
1497 * If only SATA_OPMODE_SYNCH is set, the driver can use
1498 * interrupts and sleep wait on a cv.
1499 *
1500 * If SATA_OPMODE_POLLING is set, the driver can't use
1501 * interrupts and must busy wait and simulate the
1502 * interrupts by waiting for BSY to be cleared.
1503 *
1504 * Synchronous mode has to return BUSY if there are
1505 * any other commands already on the drive.
1506 */
1507static int
1508nv_start_sync(nv_port_t *nvp, sata_pkt_t *spkt)
1509{
1510	nv_ctl_t *nvc = nvp->nvp_ctlp;
1511	int ret;
1512
1513	NVLOG((NVDBG_SYNC, nvp->nvp_ctlp, nvp, "nv_sata_satapkt_sync: entry"));
1514
1515	if (nvp->nvp_ncq_run != 0 || nvp->nvp_non_ncq_run != 0) {
1516		spkt->satapkt_reason = SATA_PKT_BUSY;
1517		NVLOG((NVDBG_SYNC, nvp->nvp_ctlp, nvp,
1518		    "nv_sata_satapkt_sync: device is busy, sync cmd rejected"
1519		    "ncq_run: %d non_ncq_run: %d  spkt: %p",
1520		    nvp->nvp_ncq_run, nvp->nvp_non_ncq_run,
1521		    (&(nvp->nvp_slot[0]))->nvslot_spkt));
1522
1523		return (SATA_TRAN_BUSY);
1524	}
1525
1526	/*
1527	 * if SYNC but not POLL, verify that this is not on interrupt thread.
1528	 */
1529	if (!(spkt->satapkt_op_mode & SATA_OPMODE_POLLING) &&
1530	    servicing_interrupt()) {
1531		spkt->satapkt_reason = SATA_PKT_BUSY;
1532		NVLOG((NVDBG_SYNC, nvp->nvp_ctlp, nvp,
1533		    "SYNC mode not allowed during interrupt"));
1534
1535		return (SATA_TRAN_BUSY);
1536
1537	}
1538
1539	/*
1540	 * disable interrupt generation if in polled mode
1541	 */
1542	if (spkt->satapkt_op_mode & SATA_OPMODE_POLLING) {
1543		(*(nvc->nvc_set_intr))(nvp, NV_INTR_DISABLE);
1544	}
1545
1546	if ((ret = nv_start_common(nvp, spkt)) != SATA_TRAN_ACCEPTED) {
1547		if (spkt->satapkt_op_mode & SATA_OPMODE_POLLING) {
1548			(*(nvc->nvc_set_intr))(nvp, NV_INTR_ENABLE);
1549		}
1550
1551		return (ret);
1552	}
1553
1554	if (spkt->satapkt_op_mode & SATA_OPMODE_POLLING) {
1555		mutex_exit(&nvp->nvp_mutex);
1556		ret = nv_poll_wait(nvp, spkt);
1557		mutex_enter(&nvp->nvp_mutex);
1558
1559		(*(nvc->nvc_set_intr))(nvp, NV_INTR_ENABLE);
1560
1561		NVLOG((NVDBG_SYNC, nvp->nvp_ctlp, nvp, "nv_sata_satapkt_sync:"
1562		    " done % reason %d", ret));
1563
1564		return (ret);
1565	}
1566
1567	/*
1568	 * non-polling synchronous mode handling.  The interrupt will signal
1569	 * when the IO is completed.
1570	 */
1571	cv_wait(&nvp->nvp_poll_cv, &nvp->nvp_mutex);
1572
1573	if (spkt->satapkt_reason != SATA_PKT_COMPLETED) {
1574
1575		spkt->satapkt_reason = SATA_PKT_TIMEOUT;
1576	}
1577
1578	NVLOG((NVDBG_SYNC, nvp->nvp_ctlp, nvp, "nv_sata_satapkt_sync:"
1579	    " done % reason %d", spkt->satapkt_reason));
1580
1581	return (SATA_TRAN_ACCEPTED);
1582}
1583
1584
1585static int
1586nv_poll_wait(nv_port_t *nvp, sata_pkt_t *spkt)
1587{
1588	int ret;
1589	nv_ctl_t *nvc = nvp->nvp_ctlp;
1590#if ! defined(__lock_lint)
1591	nv_slot_t *nv_slotp = &(nvp->nvp_slot[0]); /* not NCQ aware */
1592#endif
1593
1594	NVLOG((NVDBG_SYNC, nvc, nvp, "nv_poll_wait: enter"));
1595
1596	for (;;) {
1597
1598		NV_DELAY_NSEC(400);
1599
1600		NVLOG((NVDBG_SYNC, nvc, nvp, "nv_poll_wait: before nv_wait"));
1601		if (nv_wait(nvp, 0, SATA_STATUS_BSY,
1602		    NV_SEC2USEC(spkt->satapkt_time), NV_NOSLEEP) == B_FALSE) {
1603			mutex_enter(&nvp->nvp_mutex);
1604			spkt->satapkt_reason = SATA_PKT_TIMEOUT;
1605			nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
1606			nv_reset(nvp);
1607			nv_complete_io(nvp, spkt, 0);
1608			mutex_exit(&nvp->nvp_mutex);
1609			NVLOG((NVDBG_SYNC, nvc, nvp, "nv_poll_wait: "
1610			    "SATA_STATUS_BSY"));
1611
1612			return (SATA_TRAN_ACCEPTED);
1613		}
1614
1615		NVLOG((NVDBG_SYNC, nvc, nvp, "nv_poll_wait: before nvc_intr"));
1616
1617		/*
1618		 * Simulate interrupt.
1619		 */
1620		ret = (*(nvc->nvc_interrupt))((caddr_t)nvc, NULL);
1621		NVLOG((NVDBG_SYNC, nvc, nvp, "nv_poll_wait: after nvc_intr"));
1622
1623		if (ret != DDI_INTR_CLAIMED) {
1624			NVLOG((NVDBG_SYNC, nvc, nvp, "nv_poll_wait:"
1625			    " unclaimed -- resetting"));
1626			mutex_enter(&nvp->nvp_mutex);
1627			nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
1628			nv_reset(nvp);
1629			spkt->satapkt_reason = SATA_PKT_TIMEOUT;
1630			nv_complete_io(nvp, spkt, 0);
1631			mutex_exit(&nvp->nvp_mutex);
1632
1633			return (SATA_TRAN_ACCEPTED);
1634		}
1635
1636#if ! defined(__lock_lint)
1637		if (nv_slotp->nvslot_flags == NVSLOT_COMPLETE) {
1638			/*
1639			 * packet is complete
1640			 */
1641			return (SATA_TRAN_ACCEPTED);
1642		}
1643#endif
1644	}
1645	/*NOTREACHED*/
1646}
1647
1648
1649/*
1650 * Called by sata module to abort outstanding packets.
1651 */
1652/*ARGSUSED*/
1653static int
1654nv_sata_abort(dev_info_t *dip, sata_pkt_t *spkt, int flag)
1655{
1656	int cport = spkt->satapkt_device.satadev_addr.cport;
1657	nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip));
1658	nv_port_t *nvp = &(nvc->nvc_port[cport]);
1659	int c_a, ret;
1660
1661	ASSERT(cport < NV_MAX_PORTS(nvc));
1662	NVLOG((NVDBG_ENTRY, nvc, nvp, "nv_sata_abort %d %p", flag, spkt));
1663
1664	mutex_enter(&nvp->nvp_mutex);
1665
1666	if (nvp->nvp_state & NV_PORT_INACTIVE) {
1667		mutex_exit(&nvp->nvp_mutex);
1668		nv_cmn_err(CE_WARN, nvc, nvp,
1669		    "abort request failed: port inactive");
1670
1671		return (SATA_FAILURE);
1672	}
1673
1674	/*
1675	 * spkt == NULL then abort all commands
1676	 */
1677	c_a = nv_abort_active(nvp, spkt, SATA_PKT_ABORTED);
1678
1679	if (c_a) {
1680		NVLOG((NVDBG_ENTRY, nvc, nvp,
1681		    "packets aborted running=%d", c_a));
1682		ret = SATA_SUCCESS;
1683	} else {
1684		if (spkt == NULL) {
1685			NVLOG((NVDBG_ENTRY, nvc, nvp, "no spkts to abort"));
1686		} else {
1687			NVLOG((NVDBG_ENTRY, nvc, nvp,
1688			    "can't find spkt to abort"));
1689		}
1690		ret = SATA_FAILURE;
1691	}
1692
1693	mutex_exit(&nvp->nvp_mutex);
1694
1695	return (ret);
1696}
1697
1698
1699/*
1700 * if spkt == NULL abort all pkts running, otherwise
1701 * abort the requested packet.  must be called with nv_mutex
1702 * held and returns with it held.  Not NCQ aware.
1703 */
1704static int
1705nv_abort_active(nv_port_t *nvp, sata_pkt_t *spkt, int abort_reason)
1706{
1707	int aborted = 0, i, reset_once = B_FALSE;
1708	struct nv_slot *nv_slotp;
1709	sata_pkt_t *spkt_slot;
1710
1711	ASSERT(MUTEX_HELD(&nvp->nvp_mutex));
1712
1713	/*
1714	 * return if the port is not configured
1715	 */
1716	if (nvp->nvp_slot == NULL) {
1717		NVLOG((NVDBG_ENTRY, nvp->nvp_ctlp, nvp,
1718		    "nv_abort_active: not configured so returning"));
1719
1720		return (0);
1721	}
1722
1723	NVLOG((NVDBG_ENTRY, nvp->nvp_ctlp, nvp, "nv_abort_active"));
1724
1725	nvp->nvp_state |= NV_PORT_ABORTING;
1726
1727	for (i = 0; i < nvp->nvp_queue_depth; i++) {
1728
1729		nv_slotp = &(nvp->nvp_slot[i]);
1730		spkt_slot = nv_slotp->nvslot_spkt;
1731
1732		/*
1733		 * skip if not active command in slot
1734		 */
1735		if (spkt_slot == NULL) {
1736			continue;
1737		}
1738
1739		/*
1740		 * if a specific packet was requested, skip if
1741		 * this is not a match
1742		 */
1743		if ((spkt != NULL) && (spkt != spkt_slot)) {
1744			continue;
1745		}
1746
1747		/*
1748		 * stop the hardware.  This could need reworking
1749		 * when NCQ is enabled in the driver.
1750		 */
1751		if (reset_once == B_FALSE) {
1752			ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl;
1753
1754			/*
1755			 * stop DMA engine
1756			 */
1757			nv_put8(bmhdl, nvp->nvp_bmicx,  0);
1758
1759			nv_reset(nvp);
1760			reset_once = B_TRUE;
1761		}
1762
1763		spkt_slot->satapkt_reason = abort_reason;
1764		nv_complete_io(nvp, spkt_slot, i);
1765		aborted++;
1766	}
1767
1768	nvp->nvp_state &= ~NV_PORT_ABORTING;
1769
1770	return (aborted);
1771}
1772
1773
1774/*
1775 * Called by sata module to reset a port, device, or the controller.
1776 */
1777static int
1778nv_sata_reset(dev_info_t *dip, sata_device_t *sd)
1779{
1780	int cport = sd->satadev_addr.cport;
1781	nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip));
1782	nv_port_t *nvp = &(nvc->nvc_port[cport]);
1783	int ret = SATA_SUCCESS;
1784
1785	ASSERT(cport < NV_MAX_PORTS(nvc));
1786
1787	NVLOG((NVDBG_ENTRY, nvc, nvp, "nv_sata_reset"));
1788
1789	mutex_enter(&nvp->nvp_mutex);
1790
1791	switch (sd->satadev_addr.qual) {
1792
1793	case SATA_ADDR_CPORT:
1794		/*FALLTHROUGH*/
1795	case SATA_ADDR_DCPORT:
1796		nv_reset(nvp);
1797		(void) nv_abort_active(nvp, NULL, SATA_PKT_RESET);
1798
1799		break;
1800	case SATA_ADDR_CNTRL:
1801		NVLOG((NVDBG_ENTRY, nvc, nvp,
1802		    "nv_sata_reset: constroller reset not supported"));
1803
1804		break;
1805	case SATA_ADDR_PMPORT:
1806	case SATA_ADDR_DPMPORT:
1807		NVLOG((NVDBG_ENTRY, nvc, nvp,
1808		    "nv_sata_reset: port multipliers not supported"));
1809		/*FALLTHROUGH*/
1810	default:
1811		/*
1812		 * unsupported case
1813		 */
1814		ret = SATA_FAILURE;
1815		break;
1816	}
1817
1818	if (ret == SATA_SUCCESS) {
1819		/*
1820		 * If the port is inactive, do a quiet reset and don't attempt
1821		 * to wait for reset completion or do any post reset processing
1822		 */
1823		if (nvp->nvp_state & NV_PORT_INACTIVE) {
1824			nvp->nvp_state &= ~NV_PORT_RESET;
1825			nvp->nvp_reset_time = 0;
1826		}
1827
1828		/*
1829		 * clear the port failed flag
1830		 */
1831		nvp->nvp_state &= ~NV_PORT_FAILED;
1832	}
1833
1834	mutex_exit(&nvp->nvp_mutex);
1835
1836	return (ret);
1837}
1838
1839
1840/*
1841 * Sata entry point to handle port activation.  cfgadm -c connect
1842 */
1843static int
1844nv_sata_activate(dev_info_t *dip, sata_device_t *sd)
1845{
1846	int cport = sd->satadev_addr.cport;
1847	nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip));
1848	nv_port_t *nvp = &(nvc->nvc_port[cport]);
1849
1850	ASSERT(cport < NV_MAX_PORTS(nvc));
1851	NVLOG((NVDBG_ENTRY, nvc, nvp, "nv_sata_activate"));
1852
1853	mutex_enter(&nvp->nvp_mutex);
1854
1855	sd->satadev_state = SATA_STATE_READY;
1856
1857	nv_copy_registers(nvp, sd, NULL);
1858
1859	(*(nvc->nvc_set_intr))(nvp, NV_INTR_ENABLE);
1860
1861	nvp->nvp_state = 0;
1862
1863	mutex_exit(&nvp->nvp_mutex);
1864
1865	return (SATA_SUCCESS);
1866}
1867
1868
1869/*
1870 * Sata entry point to handle port deactivation.  cfgadm -c disconnect
1871 */
1872static int
1873nv_sata_deactivate(dev_info_t *dip, sata_device_t *sd)
1874{
1875	int cport = sd->satadev_addr.cport;
1876	nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip));
1877	nv_port_t *nvp = &(nvc->nvc_port[cport]);
1878
1879	ASSERT(cport < NV_MAX_PORTS(nvc));
1880	NVLOG((NVDBG_ENTRY, nvc, nvp, "nv_sata_deactivate"));
1881
1882	mutex_enter(&nvp->nvp_mutex);
1883
1884	(void) nv_abort_active(nvp, NULL, SATA_PKT_RESET);
1885
1886	/*
1887	 * mark the device as inaccessible
1888	 */
1889	nvp->nvp_state &= ~NV_PORT_INACTIVE;
1890
1891	/*
1892	 * disable the interrupts on port
1893	 */
1894	(*(nvc->nvc_set_intr))(nvp, NV_INTR_DISABLE);
1895
1896	nv_uninit_port(nvp);
1897
1898	sd->satadev_state = SATA_PSTATE_SHUTDOWN;
1899	nv_copy_registers(nvp, sd, NULL);
1900
1901	mutex_exit(&nvp->nvp_mutex);
1902
1903	return (SATA_SUCCESS);
1904}
1905
1906
1907/*
1908 * find an empty slot in the driver's queue, increment counters,
1909 * and then invoke the appropriate PIO or DMA start routine.
1910 */
1911static int
1912nv_start_common(nv_port_t *nvp, sata_pkt_t *spkt)
1913{
1914	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
1915	int on_bit = 0x01, slot, sactive, ret, ncq = 0;
1916	uint8_t cmd = spkt->satapkt_cmd.satacmd_cmd_reg;
1917	int direction = sata_cmdp->satacmd_flags.sata_data_direction;
1918	nv_ctl_t *nvc = nvp->nvp_ctlp;
1919	nv_slot_t *nv_slotp;
1920	boolean_t dma_cmd;
1921
1922	NVLOG((NVDBG_DELIVER, nvc, nvp, "nv_start_common  entered: cmd: 0x%x",
1923	    sata_cmdp->satacmd_cmd_reg));
1924
1925	if ((cmd == SATAC_WRITE_FPDMA_QUEUED) ||
1926	    (cmd == SATAC_READ_FPDMA_QUEUED)) {
1927		nvp->nvp_ncq_run++;
1928		/*
1929		 * search for an empty NCQ slot.  by the time, it's already
1930		 * been determined by the caller that there is room on the
1931		 * queue.
1932		 */
1933		for (slot = 0; slot < nvp->nvp_queue_depth; slot++,
1934		    on_bit <<= 1) {
1935			if ((nvp->nvp_sactive_cache & on_bit) == 0) {
1936				break;
1937			}
1938		}
1939
1940		/*
1941		 * the first empty slot found, should not exceed the queue
1942		 * depth of the drive.  if it does it's an error.
1943		 */
1944		ASSERT(slot != nvp->nvp_queue_depth);
1945
1946		sactive = nv_get32(nvc->nvc_bar_hdl[5],
1947		    nvp->nvp_sactive);
1948		ASSERT((sactive & on_bit) == 0);
1949		nv_put32(nvc->nvc_bar_hdl[5], nvp->nvp_sactive, on_bit);
1950		NVLOG((NVDBG_INIT, nvc, nvp, "setting SACTIVE onbit: %X",
1951		    on_bit));
1952		nvp->nvp_sactive_cache |= on_bit;
1953
1954		ncq = NVSLOT_NCQ;
1955
1956	} else {
1957		nvp->nvp_non_ncq_run++;
1958		slot = 0;
1959	}
1960
1961	nv_slotp = (nv_slot_t *)&nvp->nvp_slot[slot];
1962
1963	ASSERT(nv_slotp->nvslot_spkt == NULL);
1964
1965	nv_slotp->nvslot_spkt = spkt;
1966	nv_slotp->nvslot_flags = ncq;
1967
1968	/*
1969	 * the sata module doesn't indicate which commands utilize the
1970	 * DMA engine, so find out using this switch table.
1971	 */
1972	switch (spkt->satapkt_cmd.satacmd_cmd_reg) {
1973	case SATAC_READ_DMA_EXT:
1974	case SATAC_WRITE_DMA_EXT:
1975	case SATAC_WRITE_DMA:
1976	case SATAC_READ_DMA:
1977	case SATAC_READ_DMA_QUEUED:
1978	case SATAC_READ_DMA_QUEUED_EXT:
1979	case SATAC_WRITE_DMA_QUEUED:
1980	case SATAC_WRITE_DMA_QUEUED_EXT:
1981	case SATAC_READ_FPDMA_QUEUED:
1982	case SATAC_WRITE_FPDMA_QUEUED:
1983		dma_cmd = B_TRUE;
1984		break;
1985	default:
1986		dma_cmd = B_FALSE;
1987	}
1988
1989	if (sata_cmdp->satacmd_num_dma_cookies != 0 && dma_cmd == B_TRUE) {
1990		NVLOG((NVDBG_DELIVER, nvc,  nvp, "DMA command"));
1991		nv_slotp->nvslot_start = nv_start_dma;
1992		nv_slotp->nvslot_intr = nv_intr_dma;
1993	} else if (spkt->satapkt_cmd.satacmd_cmd_reg == SATAC_PACKET) {
1994		NVLOG((NVDBG_DELIVER, nvc,  nvp, "packet command"));
1995		nv_slotp->nvslot_start = nv_start_pkt_pio;
1996		nv_slotp->nvslot_intr = nv_intr_pkt_pio;
1997		if ((direction == SATA_DIR_READ) ||
1998		    (direction == SATA_DIR_WRITE)) {
1999			nv_slotp->nvslot_byte_count =
2000			    spkt->satapkt_cmd.satacmd_bp->b_bcount;
2001			nv_slotp->nvslot_v_addr =
2002			    spkt->satapkt_cmd.satacmd_bp->b_un.b_addr;
2003			/*
2004			 * Freeing DMA resources allocated by the framework
2005			 * now to avoid buffer overwrite (dma sync) problems
2006			 * when the buffer is released at command completion.
2007			 * Primarily an issue on systems with more than
2008			 * 4GB of memory.
2009			 */
2010			sata_free_dma_resources(spkt);
2011		}
2012	} else if (direction == SATA_DIR_NODATA_XFER) {
2013		NVLOG((NVDBG_DELIVER, nvc, nvp, "non-data command"));
2014		nv_slotp->nvslot_start = nv_start_nodata;
2015		nv_slotp->nvslot_intr = nv_intr_nodata;
2016	} else if (direction == SATA_DIR_READ) {
2017		NVLOG((NVDBG_DELIVER, nvc, nvp, "pio in command"));
2018		nv_slotp->nvslot_start = nv_start_pio_in;
2019		nv_slotp->nvslot_intr = nv_intr_pio_in;
2020		nv_slotp->nvslot_byte_count =
2021		    spkt->satapkt_cmd.satacmd_bp->b_bcount;
2022		nv_slotp->nvslot_v_addr =
2023		    spkt->satapkt_cmd.satacmd_bp->b_un.b_addr;
2024		/*
2025		 * Freeing DMA resources allocated by the framework now to
2026		 * avoid buffer overwrite (dma sync) problems when the buffer
2027		 * is released at command completion.  This is not an issue
2028		 * for write because write does not update the buffer.
2029		 * Primarily an issue on systems with more than 4GB of memory.
2030		 */
2031		sata_free_dma_resources(spkt);
2032	} else if (direction == SATA_DIR_WRITE) {
2033		NVLOG((NVDBG_DELIVER, nvc, nvp, "pio out command"));
2034		nv_slotp->nvslot_start = nv_start_pio_out;
2035		nv_slotp->nvslot_intr = nv_intr_pio_out;
2036		nv_slotp->nvslot_byte_count =
2037		    spkt->satapkt_cmd.satacmd_bp->b_bcount;
2038		nv_slotp->nvslot_v_addr =
2039		    spkt->satapkt_cmd.satacmd_bp->b_un.b_addr;
2040	} else {
2041		nv_cmn_err(CE_WARN, nvc, nvp, "malformed command: direction"
2042		    " %d cookies %d cmd %x",
2043		    sata_cmdp->satacmd_flags.sata_data_direction,
2044		    sata_cmdp->satacmd_num_dma_cookies,  cmd);
2045		spkt->satapkt_reason = SATA_PKT_CMD_UNSUPPORTED;
2046		ret = SATA_TRAN_CMD_UNSUPPORTED;
2047
2048		goto fail;
2049	}
2050
2051	if ((ret = (*nv_slotp->nvslot_start)(nvp, slot)) ==
2052	    SATA_TRAN_ACCEPTED) {
2053#ifdef SGPIO_SUPPORT
2054		nv_sgp_drive_active(nvp->nvp_ctlp,
2055		    (nvp->nvp_ctlp->nvc_ctlr_num * 2) + nvp->nvp_port_num);
2056#endif
2057		nv_slotp->nvslot_stime = ddi_get_lbolt();
2058
2059		/*
2060		 * start timer if it's not already running and this packet
2061		 * is not requesting polled mode.
2062		 */
2063		if ((nvp->nvp_timeout_id == 0) &&
2064		    ((spkt->satapkt_op_mode & SATA_OPMODE_POLLING) == 0)) {
2065			nvp->nvp_timeout_id = timeout(nv_timeout, (void *)nvp,
2066			    drv_usectohz(NV_ONE_SEC));
2067		}
2068
2069		return (SATA_TRAN_ACCEPTED);
2070	}
2071
2072	fail:
2073
2074	spkt->satapkt_reason = SATA_TRAN_PORT_ERROR;
2075
2076	if (ncq == NVSLOT_NCQ) {
2077		nvp->nvp_ncq_run--;
2078		nvp->nvp_sactive_cache &= ~on_bit;
2079	} else {
2080		nvp->nvp_non_ncq_run--;
2081	}
2082	nv_slotp->nvslot_spkt = NULL;
2083	nv_slotp->nvslot_flags = 0;
2084
2085	return (ret);
2086}
2087
2088
2089/*
2090 * Check if the signature is ready and if non-zero translate
2091 * it into a solaris sata defined type.
2092 */
2093static void
2094nv_read_signature(nv_port_t *nvp)
2095{
2096	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
2097
2098	nvp->nvp_signature = nv_get8(cmdhdl, nvp->nvp_count);
2099	nvp->nvp_signature |= (nv_get8(cmdhdl, nvp->nvp_sect) << 8);
2100	nvp->nvp_signature |= (nv_get8(cmdhdl, nvp->nvp_lcyl) << 16);
2101	nvp->nvp_signature |= (nv_get8(cmdhdl, nvp->nvp_hcyl) << 24);
2102
2103	switch (nvp->nvp_signature) {
2104
2105	case NV_SIG_DISK:
2106		NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp, "drive is a disk"));
2107		nvp->nvp_type = SATA_DTYPE_ATADISK;
2108		break;
2109	case NV_SIG_ATAPI:
2110		NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp,
2111		    "drive is an optical device"));
2112		nvp->nvp_type = SATA_DTYPE_ATAPICD;
2113		break;
2114	case NV_SIG_PM:
2115		NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp,
2116		    "device is a port multiplier"));
2117		nvp->nvp_type = SATA_DTYPE_PMULT;
2118		break;
2119	case NV_SIG_NOTREADY:
2120		NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp,
2121		    "signature not ready"));
2122		nvp->nvp_type = SATA_DTYPE_UNKNOWN;
2123		break;
2124	default:
2125		nv_cmn_err(CE_WARN, nvp->nvp_ctlp, nvp, "signature %X not"
2126		    " recognized", nvp->nvp_signature);
2127		nvp->nvp_type = SATA_DTYPE_UNKNOWN;
2128		break;
2129	}
2130
2131	if (nvp->nvp_signature) {
2132		nvp->nvp_state &= ~(NV_PORT_RESET_PROBE|NV_PORT_RESET);
2133	}
2134}
2135
2136
2137/*
2138 * Reset the port
2139 */
2140static void
2141nv_reset(nv_port_t *nvp)
2142{
2143	ddi_acc_handle_t bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5];
2144	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
2145	nv_ctl_t *nvc = nvp->nvp_ctlp;
2146	uint32_t sctrl;
2147
2148	NVLOG((NVDBG_ENTRY, nvc, nvp, "nv_reset()"));
2149
2150	ASSERT(mutex_owned(&nvp->nvp_mutex));
2151
2152	/*
2153	 * clear signature registers
2154	 */
2155	nv_put8(cmdhdl, nvp->nvp_sect, 0);
2156	nv_put8(cmdhdl, nvp->nvp_lcyl, 0);
2157	nv_put8(cmdhdl, nvp->nvp_hcyl, 0);
2158	nv_put8(cmdhdl, nvp->nvp_count, 0);
2159
2160	nvp->nvp_signature = 0;
2161	nvp->nvp_type = 0;
2162	nvp->nvp_state |= NV_PORT_RESET;
2163	nvp->nvp_reset_time = ddi_get_lbolt();
2164	nvp->nvp_link_lost_time = 0;
2165
2166	/*
2167	 * assert reset in PHY by writing a 1 to bit 0 scontrol
2168	 */
2169	sctrl = nv_get32(bar5_hdl, nvp->nvp_sctrl);
2170
2171	nv_put32(bar5_hdl, nvp->nvp_sctrl, sctrl | SCONTROL_DET_COMRESET);
2172
2173	/*
2174	 * wait 1ms
2175	 */
2176	drv_usecwait(1000);
2177
2178	/*
2179	 * de-assert reset in PHY
2180	 */
2181	nv_put32(bar5_hdl, nvp->nvp_sctrl, sctrl);
2182
2183	/*
2184	 * make sure timer is running
2185	 */
2186	if (nvp->nvp_timeout_id == 0) {
2187		nvp->nvp_timeout_id = timeout(nv_timeout, (void *)nvp,
2188		    drv_usectohz(NV_ONE_SEC));
2189	}
2190}
2191
2192
2193/*
2194 * Initialize register handling specific to mcp51/mcp55
2195 */
2196/* ARGSUSED */
2197static void
2198mcp5x_reg_init(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle)
2199{
2200	nv_port_t *nvp;
2201	uchar_t *bar5  = nvc->nvc_bar_addr[5];
2202	uint8_t off, port;
2203
2204	nvc->nvc_mcp5x_ctl = (uint32_t *)(bar5 + MCP5X_CTL);
2205	nvc->nvc_mcp5x_ncq = (uint32_t *)(bar5 + MCP5X_NCQ);
2206
2207	for (port = 0, off = 0; port < NV_MAX_PORTS(nvc); port++, off += 2) {
2208		nvp = &(nvc->nvc_port[port]);
2209		nvp->nvp_mcp5x_int_status =
2210		    (uint16_t *)(bar5 + MCP5X_INT_STATUS + off);
2211		nvp->nvp_mcp5x_int_ctl =
2212		    (uint16_t *)(bar5 + MCP5X_INT_CTL + off);
2213
2214		/*
2215		 * clear any previous interrupts asserted
2216		 */
2217		nv_put16(nvc->nvc_bar_hdl[5], nvp->nvp_mcp5x_int_status,
2218		    MCP5X_INT_CLEAR);
2219
2220		/*
2221		 * These are the interrupts to accept for now.  The spec
2222		 * says these are enable bits, but nvidia has indicated
2223		 * these are masking bits.  Even though they may be masked
2224		 * out to prevent asserting the main interrupt, they can
2225		 * still be asserted while reading the interrupt status
2226		 * register, so that needs to be considered in the interrupt
2227		 * handler.
2228		 */
2229		nv_put16(nvc->nvc_bar_hdl[5], nvp->nvp_mcp5x_int_ctl,
2230		    ~(MCP5X_INT_IGNORE));
2231	}
2232
2233	/*
2234	 * Allow the driver to program the BM on the first command instead
2235	 * of waiting for an interrupt.
2236	 */
2237#ifdef NCQ
2238	flags = MCP_SATA_AE_NCQ_PDEV_FIRST_CMD | MCP_SATA_AE_NCQ_SDEV_FIRST_CMD;
2239	nv_put32(nvc->nvc_bar_hdl[5], nvc->nvc_mcp5x_ncq, flags);
2240	flags = MCP_SATA_AE_CTL_PRI_SWNCQ | MCP_SATA_AE_CTL_SEC_SWNCQ;
2241	nv_put32(nvc->nvc_bar_hdl[5], nvc->nvc_mcp5x_ctl, flags);
2242#endif
2243
2244	/*
2245	 * mcp55 rev A03 and above supports 40-bit physical addressing.
2246	 * Enable DMA to take advantage of that.
2247	 *
2248	 */
2249	if (nvc->nvc_revid >= 0xa3) {
2250		if (nv_sata_40bit_dma == B_TRUE) {
2251			uint32_t reg32;
2252			NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp,
2253			    "rev id is %X and"
2254			    " is capable of 40-bit DMA addressing",
2255			    nvc->nvc_revid));
2256			nvc->dma_40bit = B_TRUE;
2257			reg32 = pci_config_get32(pci_conf_handle,
2258			    NV_SATA_CFG_20);
2259			pci_config_put32(pci_conf_handle, NV_SATA_CFG_20,
2260			    reg32 |NV_40BIT_PRD);
2261		} else {
2262			NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp,
2263			    "40-bit DMA disabled by nv_sata_40bit_dma"));
2264		}
2265	} else {
2266		nv_cmn_err(CE_NOTE, nvp->nvp_ctlp, nvp, "rev id is %X and is "
2267		    "not capable of 40-bit DMA addressing", nvc->nvc_revid);
2268	}
2269}
2270
2271
2272/*
2273 * Initialize register handling specific to ck804
2274 */
2275static void
2276ck804_reg_init(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle)
2277{
2278	uchar_t *bar5  = nvc->nvc_bar_addr[5];
2279	uint32_t reg32;
2280	uint16_t reg16;
2281	nv_port_t *nvp;
2282	int j;
2283
2284	/*
2285	 * delay hotplug interrupts until PHYRDY.
2286	 */
2287	reg32 = pci_config_get32(pci_conf_handle, NV_SATA_CFG_42);
2288	pci_config_put32(pci_conf_handle, NV_SATA_CFG_42,
2289	    reg32 | CK804_CFG_DELAY_HOTPLUG_INTR);
2290
2291	/*
2292	 * enable hot plug interrupts for channel x and y
2293	 */
2294	reg16 = nv_get16(nvc->nvc_bar_hdl[5],
2295	    (uint16_t *)(bar5 + NV_ADMACTL_X));
2296	nv_put16(nvc->nvc_bar_hdl[5], (uint16_t *)(bar5 + NV_ADMACTL_X),
2297	    NV_HIRQ_EN | reg16);
2298
2299
2300	reg16 = nv_get16(nvc->nvc_bar_hdl[5],
2301	    (uint16_t *)(bar5 + NV_ADMACTL_Y));
2302	nv_put16(nvc->nvc_bar_hdl[5], (uint16_t *)(bar5 + NV_ADMACTL_Y),
2303	    NV_HIRQ_EN | reg16);
2304
2305	nvc->nvc_ck804_int_status = (uint8_t *)(bar5 + CK804_SATA_INT_STATUS);
2306
2307	/*
2308	 * clear any existing interrupt pending then enable
2309	 */
2310	for (j = 0; j < NV_MAX_PORTS(nvc); j++) {
2311		nvp = &(nvc->nvc_port[j]);
2312		mutex_enter(&nvp->nvp_mutex);
2313		(*(nvp->nvp_ctlp->nvc_set_intr))(nvp,
2314		    NV_INTR_CLEAR_ALL|NV_INTR_ENABLE);
2315		mutex_exit(&nvp->nvp_mutex);
2316	}
2317}
2318
2319
2320/*
2321 * Initialize the controller and set up driver data structures.
2322 * determine if ck804 or mcp5x class.
2323 */
2324static int
2325nv_init_ctl(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle)
2326{
2327	struct sata_hba_tran stran;
2328	nv_port_t *nvp;
2329	int j, ck804;
2330	uchar_t *cmd_addr, *ctl_addr, *bm_addr;
2331	ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5];
2332	uchar_t *bar5  = nvc->nvc_bar_addr[5];
2333	uint32_t reg32;
2334	uint8_t reg8, reg8_save;
2335
2336	NVLOG((NVDBG_INIT, nvc, NULL, "nv_init_ctl entered"));
2337
2338	ck804 = B_TRUE;
2339#ifdef SGPIO_SUPPORT
2340	nvc->nvc_mcp5x_flag = B_FALSE;
2341#endif
2342
2343	/*
2344	 * Need to set bit 2 to 1 at config offset 0x50
2345	 * to enable access to the bar5 registers.
2346	 */
2347	reg32 = pci_config_get32(pci_conf_handle, NV_SATA_CFG_20);
2348	pci_config_put32(pci_conf_handle, NV_SATA_CFG_20,
2349	    reg32 | NV_BAR5_SPACE_EN);
2350
2351	/*
2352	 * Determine if this is ck804 or mcp5x.  ck804 will map in the
2353	 * task file registers into bar5 while mcp5x won't.  The offset of
2354	 * the task file registers in mcp5x's space is unused, so it will
2355	 * return zero.  So check one of the task file registers to see if it is
2356	 * writable and reads back what was written.  If it's mcp5x it will
2357	 * return back 0xff whereas ck804 will return the value written.
2358	 */
2359	reg8_save = nv_get8(bar5_hdl,
2360	    (uint8_t *)(bar5 + NV_BAR5_TRAN_LEN_CH_X));
2361
2362
2363	for (j = 1; j < 3; j++) {
2364
2365		nv_put8(bar5_hdl, (uint8_t *)(bar5 + NV_BAR5_TRAN_LEN_CH_X), j);
2366		reg8 = nv_get8(bar5_hdl,
2367		    (uint8_t *)(bar5 + NV_BAR5_TRAN_LEN_CH_X));
2368
2369		if (reg8 != j) {
2370			ck804 = B_FALSE;
2371			nvc->nvc_mcp5x_flag = B_TRUE;
2372			break;
2373		}
2374	}
2375
2376	nv_put8(bar5_hdl, (uint8_t *)(bar5 + NV_BAR5_TRAN_LEN_CH_X), reg8_save);
2377
2378	if (ck804 == B_TRUE) {
2379		NVLOG((NVDBG_INIT, nvc, NULL, "controller is CK804"));
2380		nvc->nvc_interrupt = ck804_intr;
2381		nvc->nvc_reg_init = ck804_reg_init;
2382		nvc->nvc_set_intr = ck804_set_intr;
2383	} else {
2384		NVLOG((NVDBG_INIT, nvc, NULL, "controller is MCP51/MCP55"));
2385		nvc->nvc_interrupt = mcp5x_intr;
2386		nvc->nvc_reg_init = mcp5x_reg_init;
2387		nvc->nvc_set_intr = mcp5x_set_intr;
2388	}
2389
2390
2391	stran.sata_tran_hba_rev = SATA_TRAN_HBA_REV_2;
2392	stran.sata_tran_hba_dip = nvc->nvc_dip;
2393	if (nvc->dma_40bit == B_TRUE)
2394		stran.sata_tran_hba_dma_attr = &buffer_dma_40bit_attr;
2395	else
2396		stran.sata_tran_hba_dma_attr = &buffer_dma_attr;
2397	stran.sata_tran_hba_num_cports = NV_NUM_CPORTS;
2398	stran.sata_tran_hba_features_support =
2399	    SATA_CTLF_HOTPLUG | SATA_CTLF_ASN | SATA_CTLF_ATAPI;
2400	stran.sata_tran_hba_qdepth = NV_QUEUE_SLOTS;
2401	stran.sata_tran_probe_port = nv_sata_probe;
2402	stran.sata_tran_start = nv_sata_start;
2403	stran.sata_tran_abort = nv_sata_abort;
2404	stran.sata_tran_reset_dport = nv_sata_reset;
2405	stran.sata_tran_selftest = NULL;
2406	stran.sata_tran_hotplug_ops = &nv_hotplug_ops;
2407	stran.sata_tran_pwrmgt_ops = NULL;
2408	stran.sata_tran_ioctl = NULL;
2409	nvc->nvc_sata_hba_tran = stran;
2410
2411	nvc->nvc_port = kmem_zalloc(sizeof (nv_port_t) * NV_MAX_PORTS(nvc),
2412	    KM_SLEEP);
2413
2414	/*
2415	 * initialize registers common to all chipsets
2416	 */
2417	nv_common_reg_init(nvc);
2418
2419	for (j = 0; j < NV_MAX_PORTS(nvc); j++) {
2420		nvp = &(nvc->nvc_port[j]);
2421
2422		cmd_addr = nvp->nvp_cmd_addr;
2423		ctl_addr = nvp->nvp_ctl_addr;
2424		bm_addr = nvp->nvp_bm_addr;
2425
2426		mutex_init(&nvp->nvp_mutex, NULL, MUTEX_DRIVER,
2427		    DDI_INTR_PRI(nvc->nvc_intr_pri));
2428
2429		cv_init(&nvp->nvp_poll_cv, NULL, CV_DRIVER, NULL);
2430
2431		nvp->nvp_data	= cmd_addr + NV_DATA;
2432		nvp->nvp_error	= cmd_addr + NV_ERROR;
2433		nvp->nvp_feature = cmd_addr + NV_FEATURE;
2434		nvp->nvp_count	= cmd_addr + NV_COUNT;
2435		nvp->nvp_sect	= cmd_addr + NV_SECT;
2436		nvp->nvp_lcyl	= cmd_addr + NV_LCYL;
2437		nvp->nvp_hcyl	= cmd_addr + NV_HCYL;
2438		nvp->nvp_drvhd	= cmd_addr + NV_DRVHD;
2439		nvp->nvp_status	= cmd_addr + NV_STATUS;
2440		nvp->nvp_cmd	= cmd_addr + NV_CMD;
2441		nvp->nvp_altstatus = ctl_addr + NV_ALTSTATUS;
2442		nvp->nvp_devctl	= ctl_addr + NV_DEVCTL;
2443
2444		nvp->nvp_bmicx	= bm_addr + BMICX_REG;
2445		nvp->nvp_bmisx	= bm_addr + BMISX_REG;
2446		nvp->nvp_bmidtpx = (uint32_t *)(bm_addr + BMIDTPX_REG);
2447
2448		nvp->nvp_state = 0;
2449	}
2450
2451	/*
2452	 * initialize register by calling chip specific reg initialization
2453	 */
2454	(*(nvc->nvc_reg_init))(nvc, pci_conf_handle);
2455
2456	return (NV_SUCCESS);
2457}
2458
2459
2460/*
2461 * Initialize data structures with enough slots to handle queuing, if
2462 * enabled.  NV_QUEUE_SLOTS will be set to 1 or 32, depending on whether
2463 * NCQ support is built into the driver and enabled.  It might have been
2464 * better to derive the true size from the drive itself, but the sata
2465 * module only sends down that information on the first NCQ command,
2466 * which means possibly re-sizing the structures on an interrupt stack,
2467 * making error handling more messy.  The easy way is to just allocate
2468 * all 32 slots, which is what most drives support anyway.
2469 */
2470static int
2471nv_init_port(nv_port_t *nvp)
2472{
2473	nv_ctl_t *nvc = nvp->nvp_ctlp;
2474	size_t	prd_size = sizeof (prde_t) * NV_DMA_NSEGS;
2475	dev_info_t *dip = nvc->nvc_dip;
2476	ddi_device_acc_attr_t dev_attr;
2477	size_t buf_size;
2478	ddi_dma_cookie_t cookie;
2479	uint_t count;
2480	int rc, i;
2481
2482	dev_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
2483	dev_attr.devacc_attr_endian_flags = DDI_NEVERSWAP_ACC;
2484	dev_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
2485
2486	if (nvp->nvp_state & NV_PORT_INIT) {
2487		NVLOG((NVDBG_INIT, nvc, nvp,
2488		    "nv_init_port previously initialized"));
2489
2490		return (NV_SUCCESS);
2491	} else {
2492		NVLOG((NVDBG_INIT, nvc, nvp, "nv_init_port initializing"));
2493	}
2494
2495	nvp->nvp_sg_dma_hdl = kmem_zalloc(sizeof (ddi_dma_handle_t) *
2496	    NV_QUEUE_SLOTS, KM_SLEEP);
2497
2498	nvp->nvp_sg_acc_hdl = kmem_zalloc(sizeof (ddi_acc_handle_t) *
2499	    NV_QUEUE_SLOTS, KM_SLEEP);
2500
2501	nvp->nvp_sg_addr = kmem_zalloc(sizeof (caddr_t) *
2502	    NV_QUEUE_SLOTS, KM_SLEEP);
2503
2504	nvp->nvp_sg_paddr = kmem_zalloc(sizeof (uint32_t) *
2505	    NV_QUEUE_SLOTS, KM_SLEEP);
2506
2507	nvp->nvp_slot = kmem_zalloc(sizeof (nv_slot_t) * NV_QUEUE_SLOTS,
2508	    KM_SLEEP);
2509
2510	for (i = 0; i < NV_QUEUE_SLOTS; i++) {
2511
2512		rc = ddi_dma_alloc_handle(dip, &nv_prd_dma_attr,
2513		    DDI_DMA_SLEEP, NULL, &(nvp->nvp_sg_dma_hdl[i]));
2514
2515		if (rc != DDI_SUCCESS) {
2516			nv_uninit_port(nvp);
2517
2518			return (NV_FAILURE);
2519		}
2520
2521		rc = ddi_dma_mem_alloc(nvp->nvp_sg_dma_hdl[i], prd_size,
2522		    &dev_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
2523		    NULL, &(nvp->nvp_sg_addr[i]), &buf_size,
2524		    &(nvp->nvp_sg_acc_hdl[i]));
2525
2526		if (rc != DDI_SUCCESS) {
2527			nv_uninit_port(nvp);
2528
2529			return (NV_FAILURE);
2530		}
2531
2532		rc = ddi_dma_addr_bind_handle(nvp->nvp_sg_dma_hdl[i], NULL,
2533		    nvp->nvp_sg_addr[i], buf_size,
2534		    DDI_DMA_WRITE | DDI_DMA_CONSISTENT,
2535		    DDI_DMA_SLEEP, NULL, &cookie, &count);
2536
2537		if (rc != DDI_DMA_MAPPED) {
2538			nv_uninit_port(nvp);
2539
2540			return (NV_FAILURE);
2541		}
2542
2543		ASSERT(count == 1);
2544		ASSERT((cookie.dmac_address & (sizeof (int) - 1)) == 0);
2545
2546		ASSERT(cookie.dmac_laddress <= UINT32_MAX);
2547
2548		nvp->nvp_sg_paddr[i] = cookie.dmac_address;
2549	}
2550
2551	/*
2552	 * nvp_queue_depth represents the actual drive queue depth, not the
2553	 * number of slots allocated in the structures (which may be more).
2554	 * Actual queue depth is only learned after the first NCQ command, so
2555	 * initialize it to 1 for now.
2556	 */
2557	nvp->nvp_queue_depth = 1;
2558
2559	nvp->nvp_state |= NV_PORT_INIT;
2560
2561	return (NV_SUCCESS);
2562}
2563
2564
2565/*
2566 * Free dynamically allocated structures for port.
2567 */
2568static void
2569nv_uninit_port(nv_port_t *nvp)
2570{
2571	int i;
2572
2573	/*
2574	 * It is possible to reach here before a port has been initialized or
2575	 * after it has already been uninitialized.  Just return in that case.
2576	 */
2577	if (nvp->nvp_slot == NULL) {
2578
2579		return;
2580	}
2581
2582	NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp,
2583	    "nv_uninit_port uninitializing"));
2584
2585	nvp->nvp_type = SATA_DTYPE_NONE;
2586
2587	for (i = 0; i < NV_QUEUE_SLOTS; i++) {
2588		if (nvp->nvp_sg_paddr[i]) {
2589			(void) ddi_dma_unbind_handle(nvp->nvp_sg_dma_hdl[i]);
2590		}
2591
2592		if (nvp->nvp_sg_acc_hdl[i] != NULL) {
2593			ddi_dma_mem_free(&(nvp->nvp_sg_acc_hdl[i]));
2594		}
2595
2596		if (nvp->nvp_sg_dma_hdl[i] != NULL) {
2597			ddi_dma_free_handle(&(nvp->nvp_sg_dma_hdl[i]));
2598		}
2599	}
2600
2601	kmem_free(nvp->nvp_slot, sizeof (nv_slot_t) * NV_QUEUE_SLOTS);
2602	nvp->nvp_slot = NULL;
2603
2604	kmem_free(nvp->nvp_sg_dma_hdl,
2605	    sizeof (ddi_dma_handle_t) * NV_QUEUE_SLOTS);
2606	nvp->nvp_sg_dma_hdl = NULL;
2607
2608	kmem_free(nvp->nvp_sg_acc_hdl,
2609	    sizeof (ddi_acc_handle_t) * NV_QUEUE_SLOTS);
2610	nvp->nvp_sg_acc_hdl = NULL;
2611
2612	kmem_free(nvp->nvp_sg_addr, sizeof (caddr_t) * NV_QUEUE_SLOTS);
2613	nvp->nvp_sg_addr = NULL;
2614
2615	kmem_free(nvp->nvp_sg_paddr, sizeof (uint32_t) * NV_QUEUE_SLOTS);
2616	nvp->nvp_sg_paddr = NULL;
2617
2618	nvp->nvp_state &= ~NV_PORT_INIT;
2619	nvp->nvp_signature = 0;
2620}
2621
2622
2623/*
2624 * Cache register offsets and access handles to frequently accessed registers
2625 * which are common to either chipset.
2626 */
2627static void
2628nv_common_reg_init(nv_ctl_t *nvc)
2629{
2630	uchar_t *bar5_addr = nvc->nvc_bar_addr[5];
2631	uchar_t *bm_addr_offset, *sreg_offset;
2632	uint8_t bar, port;
2633	nv_port_t *nvp;
2634
2635	for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
2636		if (port == 0) {
2637			bar = NV_BAR_0;
2638			bm_addr_offset = 0;
2639			sreg_offset = (uchar_t *)(CH0_SREG_OFFSET + bar5_addr);
2640		} else {
2641			bar = NV_BAR_2;
2642			bm_addr_offset = (uchar_t *)8;
2643			sreg_offset = (uchar_t *)(CH1_SREG_OFFSET + bar5_addr);
2644		}
2645
2646		nvp = &(nvc->nvc_port[port]);
2647		nvp->nvp_ctlp = nvc;
2648		nvp->nvp_port_num = port;
2649		NVLOG((NVDBG_INIT, nvc, nvp, "setting up port mappings"));
2650
2651		nvp->nvp_cmd_hdl = nvc->nvc_bar_hdl[bar];
2652		nvp->nvp_cmd_addr = nvc->nvc_bar_addr[bar];
2653		nvp->nvp_ctl_hdl = nvc->nvc_bar_hdl[bar + 1];
2654		nvp->nvp_ctl_addr = nvc->nvc_bar_addr[bar + 1];
2655		nvp->nvp_bm_hdl = nvc->nvc_bar_hdl[NV_BAR_4];
2656		nvp->nvp_bm_addr = nvc->nvc_bar_addr[NV_BAR_4] +
2657		    (long)bm_addr_offset;
2658
2659		nvp->nvp_sstatus = (uint32_t *)(sreg_offset + NV_SSTATUS);
2660		nvp->nvp_serror = (uint32_t *)(sreg_offset + NV_SERROR);
2661		nvp->nvp_sactive = (uint32_t *)(sreg_offset + NV_SACTIVE);
2662		nvp->nvp_sctrl = (uint32_t *)(sreg_offset + NV_SCTRL);
2663	}
2664}
2665
2666
2667static void
2668nv_uninit_ctl(nv_ctl_t *nvc)
2669{
2670	int port;
2671	nv_port_t *nvp;
2672
2673	NVLOG((NVDBG_INIT, nvc, NULL, "nv_uninit_ctl entered"));
2674
2675	for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
2676		nvp = &(nvc->nvc_port[port]);
2677		mutex_enter(&nvp->nvp_mutex);
2678		NVLOG((NVDBG_INIT, nvc, nvp, "uninitializing port"));
2679		nv_uninit_port(nvp);
2680		mutex_exit(&nvp->nvp_mutex);
2681		mutex_destroy(&nvp->nvp_mutex);
2682		cv_destroy(&nvp->nvp_poll_cv);
2683	}
2684
2685	kmem_free(nvc->nvc_port, NV_MAX_PORTS(nvc) * sizeof (nv_port_t));
2686	nvc->nvc_port = NULL;
2687}
2688
2689
2690/*
2691 * ck804 interrupt.  This is a wrapper around ck804_intr_process so
2692 * that interrupts from other devices can be disregarded while dtracing.
2693 */
2694/* ARGSUSED */
2695static uint_t
2696ck804_intr(caddr_t arg1, caddr_t arg2)
2697{
2698	nv_ctl_t *nvc = (nv_ctl_t *)arg1;
2699	uint8_t intr_status;
2700	ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5];
2701
2702	if (nvc->nvc_state & NV_CTRL_SUSPEND)
2703		return (DDI_INTR_UNCLAIMED);
2704
2705	intr_status = ddi_get8(bar5_hdl, nvc->nvc_ck804_int_status);
2706
2707	if (intr_status == 0) {
2708
2709		return (DDI_INTR_UNCLAIMED);
2710	}
2711
2712	ck804_intr_process(nvc, intr_status);
2713
2714	return (DDI_INTR_CLAIMED);
2715}
2716
2717
2718/*
2719 * Main interrupt handler for ck804.  handles normal device
2720 * interrupts as well as port hot plug and remove interrupts.
2721 *
2722 */
2723static void
2724ck804_intr_process(nv_ctl_t *nvc, uint8_t intr_status)
2725{
2726
2727	int port, i;
2728	nv_port_t *nvp;
2729	nv_slot_t *nv_slotp;
2730	uchar_t	status;
2731	sata_pkt_t *spkt;
2732	uint8_t bmstatus, clear_bits;
2733	ddi_acc_handle_t bmhdl;
2734	int nvcleared = 0;
2735	ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5];
2736	uint32_t sstatus;
2737	int port_mask_hot[] = {
2738		CK804_INT_PDEV_HOT, CK804_INT_SDEV_HOT,
2739	};
2740	int port_mask_pm[] = {
2741		CK804_INT_PDEV_PM, CK804_INT_SDEV_PM,
2742	};
2743
2744	NVLOG((NVDBG_INTR, nvc, NULL,
2745	    "ck804_intr_process entered intr_status=%x", intr_status));
2746
2747	/*
2748	 * For command completion interrupt, explicit clear is not required.
2749	 * however, for the error cases explicit clear is performed.
2750	 */
2751	for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
2752
2753		int port_mask[] = {CK804_INT_PDEV_INT, CK804_INT_SDEV_INT};
2754
2755		if ((port_mask[port] & intr_status) == 0) {
2756			continue;
2757		}
2758
2759		NVLOG((NVDBG_INTR, nvc, NULL,
2760		    "ck804_intr_process interrupt on port %d", port));
2761
2762		nvp = &(nvc->nvc_port[port]);
2763
2764		mutex_enter(&nvp->nvp_mutex);
2765
2766		/*
2767		 * there was a corner case found where an interrupt
2768		 * arrived before nvp_slot was set.  Should
2769		 * probably should track down why that happens and try
2770		 * to eliminate that source and then get rid of this
2771		 * check.
2772		 */
2773		if (nvp->nvp_slot == NULL) {
2774			status = nv_get8(nvp->nvp_ctl_hdl, nvp->nvp_status);
2775			NVLOG((NVDBG_ALWAYS, nvc, nvp, "spurious interrupt "
2776			    "received before initialization "
2777			    "completed status=%x", status));
2778			mutex_exit(&nvp->nvp_mutex);
2779
2780			/*
2781			 * clear interrupt bits
2782			 */
2783			nv_put8(bar5_hdl, nvc->nvc_ck804_int_status,
2784			    port_mask[port]);
2785
2786			continue;
2787		}
2788
2789		if ((&(nvp->nvp_slot[0]))->nvslot_spkt == NULL)  {
2790			status = nv_get8(nvp->nvp_ctl_hdl, nvp->nvp_status);
2791			NVLOG((NVDBG_ALWAYS, nvc, nvp, "spurious interrupt "
2792			    " no command in progress status=%x", status));
2793			mutex_exit(&nvp->nvp_mutex);
2794
2795			/*
2796			 * clear interrupt bits
2797			 */
2798			nv_put8(bar5_hdl, nvc->nvc_ck804_int_status,
2799			    port_mask[port]);
2800
2801			continue;
2802		}
2803
2804		bmhdl = nvp->nvp_bm_hdl;
2805		bmstatus = nv_get8(bmhdl, nvp->nvp_bmisx);
2806
2807		if (!(bmstatus & BMISX_IDEINTS)) {
2808			mutex_exit(&nvp->nvp_mutex);
2809
2810			continue;
2811		}
2812
2813		status = nv_get8(nvp->nvp_ctl_hdl, nvp->nvp_altstatus);
2814
2815		if (status & SATA_STATUS_BSY) {
2816			mutex_exit(&nvp->nvp_mutex);
2817
2818			continue;
2819		}
2820
2821		nv_slotp = &(nvp->nvp_slot[0]);
2822
2823		ASSERT(nv_slotp);
2824
2825		spkt = nv_slotp->nvslot_spkt;
2826
2827		if (spkt == NULL) {
2828			mutex_exit(&nvp->nvp_mutex);
2829
2830			continue;
2831		}
2832
2833		(*nv_slotp->nvslot_intr)(nvp, nv_slotp);
2834
2835		nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
2836
2837		/*
2838		 * If there is no link cannot be certain about the completion
2839		 * of the packet, so abort it.
2840		 */
2841		if (nv_check_link((&spkt->satapkt_device)->
2842		    satadev_scr.sstatus) == B_FALSE) {
2843
2844			(void) nv_abort_active(nvp, NULL, SATA_PKT_PORT_ERROR);
2845
2846		} else if (nv_slotp->nvslot_flags == NVSLOT_COMPLETE) {
2847
2848			nv_complete_io(nvp, spkt, 0);
2849		}
2850
2851		mutex_exit(&nvp->nvp_mutex);
2852	}
2853
2854	/*
2855	 * ck804 often doesn't correctly distinguish hot add/remove
2856	 * interrupts.  Frequently both the ADD and the REMOVE bits
2857	 * are asserted, whether it was a remove or add.  Use sstatus
2858	 * to distinguish hot add from hot remove.
2859	 */
2860
2861	for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
2862		clear_bits = 0;
2863
2864		nvp = &(nvc->nvc_port[port]);
2865		mutex_enter(&nvp->nvp_mutex);
2866
2867		if ((port_mask_pm[port] & intr_status) != 0) {
2868			clear_bits = port_mask_pm[port];
2869			NVLOG((NVDBG_HOT, nvc, nvp,
2870			    "clearing PM interrupt bit: %x",
2871			    intr_status & port_mask_pm[port]));
2872		}
2873
2874		if ((port_mask_hot[port] & intr_status) == 0) {
2875			if (clear_bits != 0) {
2876				goto clear;
2877			} else {
2878				mutex_exit(&nvp->nvp_mutex);
2879				continue;
2880			}
2881		}
2882
2883		/*
2884		 * reaching here means there was a hot add or remove.
2885		 */
2886		clear_bits |= port_mask_hot[port];
2887
2888		ASSERT(nvc->nvc_port[port].nvp_sstatus);
2889
2890		sstatus = nv_get32(bar5_hdl,
2891		    nvc->nvc_port[port].nvp_sstatus);
2892
2893		if ((sstatus & SSTATUS_DET_DEVPRE_PHYCOM) ==
2894		    SSTATUS_DET_DEVPRE_PHYCOM) {
2895			nv_report_add_remove(nvp, 0);
2896		} else {
2897			nv_report_add_remove(nvp, NV_PORT_HOTREMOVED);
2898		}
2899	clear:
2900		/*
2901		 * clear interrupt bits.  explicit interrupt clear is
2902		 * required for hotplug interrupts.
2903		 */
2904		nv_put8(bar5_hdl, nvc->nvc_ck804_int_status, clear_bits);
2905
2906		/*
2907		 * make sure it's flushed and cleared.  If not try
2908		 * again.  Sometimes it has been observed to not clear
2909		 * on the first try.
2910		 */
2911		intr_status = nv_get8(bar5_hdl, nvc->nvc_ck804_int_status);
2912
2913		/*
2914		 * make 10 additional attempts to clear the interrupt
2915		 */
2916		for (i = 0; (intr_status & clear_bits) && (i < 10); i++) {
2917			NVLOG((NVDBG_ALWAYS, nvc, nvp, "inst_status=%x "
2918			    "still not clear try=%d", intr_status,
2919			    ++nvcleared));
2920			nv_put8(bar5_hdl, nvc->nvc_ck804_int_status,
2921			    clear_bits);
2922			intr_status = nv_get8(bar5_hdl,
2923			    nvc->nvc_ck804_int_status);
2924		}
2925
2926		/*
2927		 * if still not clear, log a message and disable the
2928		 * port. highly unlikely that this path is taken, but it
2929		 * gives protection against a wedged interrupt.
2930		 */
2931		if (intr_status & clear_bits) {
2932			(*(nvc->nvc_set_intr))(nvp, NV_INTR_DISABLE);
2933			nv_port_state_change(nvp, SATA_EVNT_PORT_FAILED,
2934			    SATA_ADDR_CPORT, SATA_PSTATE_FAILED);
2935			nvp->nvp_state |= NV_PORT_FAILED;
2936			(void) nv_abort_active(nvp, NULL, SATA_PKT_DEV_ERROR);
2937			nv_cmn_err(CE_WARN, nvc, nvp, "unable to clear "
2938			    "interrupt.  disabling port intr_status=%X",
2939			    intr_status);
2940		}
2941
2942		mutex_exit(&nvp->nvp_mutex);
2943	}
2944}
2945
2946
2947/*
2948 * Interrupt handler for mcp5x.  It is invoked by the wrapper for each port
2949 * on the controller, to handle completion and hot plug and remove events.
2950 *
2951 */
2952static uint_t
2953mcp5x_intr_port(nv_port_t *nvp)
2954{
2955	nv_ctl_t *nvc = nvp->nvp_ctlp;
2956	ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5];
2957	uint8_t clear = 0, intr_cycles = 0;
2958	int ret = DDI_INTR_UNCLAIMED;
2959	uint16_t int_status;
2960
2961	NVLOG((NVDBG_INTR, nvc, nvp, "mcp5x_intr_port entered"));
2962
2963	for (;;) {
2964		/*
2965		 * read current interrupt status
2966		 */
2967		int_status = nv_get16(bar5_hdl, nvp->nvp_mcp5x_int_status);
2968
2969		NVLOG((NVDBG_INTR, nvc, nvp, "int_status = %x", int_status));
2970
2971		/*
2972		 * MCP5X_INT_IGNORE interrupts will show up in the status,
2973		 * but are masked out from causing an interrupt to be generated
2974		 * to the processor.  Ignore them here by masking them out.
2975		 */
2976		int_status &= ~(MCP5X_INT_IGNORE);
2977
2978		/*
2979		 * exit the loop when no more interrupts to process
2980		 */
2981		if (int_status == 0) {
2982
2983			break;
2984		}
2985
2986		if (int_status & MCP5X_INT_COMPLETE) {
2987			NVLOG((NVDBG_INTR, nvc, nvp,
2988			    "mcp5x_packet_complete_intr"));
2989			/*
2990			 * since int_status was set, return DDI_INTR_CLAIMED
2991			 * from the DDI's perspective even though the packet
2992			 * completion may not have succeeded.  If it fails,
2993			 * need to manually clear the interrupt, otherwise
2994			 * clearing is implicit.
2995			 */
2996			ret = DDI_INTR_CLAIMED;
2997			if (mcp5x_packet_complete_intr(nvc, nvp) ==
2998			    NV_FAILURE) {
2999				clear = MCP5X_INT_COMPLETE;
3000			} else {
3001				intr_cycles = 0;
3002			}
3003		}
3004
3005		if (int_status & MCP5X_INT_DMA_SETUP) {
3006			NVLOG((NVDBG_INTR, nvc, nvp, "mcp5x_dma_setup_intr"));
3007
3008			/*
3009			 * Needs to be cleared before starting the BM, so do it
3010			 * now.  make sure this is still working.
3011			 */
3012			nv_put16(bar5_hdl, nvp->nvp_mcp5x_int_status,
3013			    MCP5X_INT_DMA_SETUP);
3014#ifdef NCQ
3015			ret = mcp5x_dma_setup_intr(nvc, nvp);
3016#endif
3017		}
3018
3019		if (int_status & MCP5X_INT_REM) {
3020			NVLOG((NVDBG_INTR, nvc, nvp, "mcp5x device removed"));
3021			clear = MCP5X_INT_REM;
3022			ret = DDI_INTR_CLAIMED;
3023
3024			mutex_enter(&nvp->nvp_mutex);
3025			nv_report_add_remove(nvp, NV_PORT_HOTREMOVED);
3026			mutex_exit(&nvp->nvp_mutex);
3027
3028		} else if (int_status & MCP5X_INT_ADD) {
3029			NVLOG((NVDBG_HOT, nvc, nvp, "mcp5x device added"));
3030			clear = MCP5X_INT_ADD;
3031			ret = DDI_INTR_CLAIMED;
3032
3033			mutex_enter(&nvp->nvp_mutex);
3034			nv_report_add_remove(nvp, 0);
3035			mutex_exit(&nvp->nvp_mutex);
3036		}
3037
3038		if (clear) {
3039			nv_put16(bar5_hdl, nvp->nvp_mcp5x_int_status, clear);
3040			clear = 0;
3041		}
3042
3043		if (intr_cycles++ == NV_MAX_INTR_LOOP) {
3044			nv_cmn_err(CE_WARN, nvc, nvp, "excessive interrupt "
3045			    "processing.  Disabling port int_status=%X"
3046			    " clear=%X", int_status, clear);
3047			mutex_enter(&nvp->nvp_mutex);
3048			(*(nvc->nvc_set_intr))(nvp, NV_INTR_DISABLE);
3049			nv_port_state_change(nvp, SATA_EVNT_PORT_FAILED,
3050			    SATA_ADDR_CPORT, SATA_PSTATE_FAILED);
3051			nvp->nvp_state |= NV_PORT_FAILED;
3052			(void) nv_abort_active(nvp, NULL, SATA_PKT_DEV_ERROR);
3053			mutex_exit(&nvp->nvp_mutex);
3054		}
3055	}
3056
3057	NVLOG((NVDBG_INTR, nvc, nvp, "mcp5x_intr_port: finished ret=%d", ret));
3058
3059	return (ret);
3060}
3061
3062
3063/* ARGSUSED */
3064static uint_t
3065mcp5x_intr(caddr_t arg1, caddr_t arg2)
3066{
3067	nv_ctl_t *nvc = (nv_ctl_t *)arg1;
3068	int ret;
3069
3070	if (nvc->nvc_state & NV_CTRL_SUSPEND)
3071		return (DDI_INTR_UNCLAIMED);
3072
3073	ret = mcp5x_intr_port(&(nvc->nvc_port[0]));
3074	ret |= mcp5x_intr_port(&(nvc->nvc_port[1]));
3075
3076	return (ret);
3077}
3078
3079
3080#ifdef NCQ
3081/*
3082 * with software driven NCQ on mcp5x, an interrupt occurs right
3083 * before the drive is ready to do a DMA transfer.  At this point,
3084 * the PRD table needs to be programmed and the DMA engine enabled
3085 * and ready to go.
3086 *
3087 * -- MCP_SATA_AE_INT_STATUS_SDEV_DMA_SETUP indicates the interrupt
3088 * -- MCP_SATA_AE_NCQ_PDEV_DMA_SETUP_TAG shows which command is ready
3089 * -- clear bit 0 of master command reg
3090 * -- program PRD
3091 * -- clear the interrupt status bit for the DMA Setup FIS
3092 * -- set bit 0 of the bus master command register
3093 */
3094static int
3095mcp5x_dma_setup_intr(nv_ctl_t *nvc, nv_port_t *nvp)
3096{
3097	int slot;
3098	ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl;
3099	uint8_t bmicx;
3100	int port = nvp->nvp_port_num;
3101	uint8_t tag_shift[] = {MCP_SATA_AE_NCQ_PDEV_DMA_SETUP_TAG_SHIFT,
3102	    MCP_SATA_AE_NCQ_SDEV_DMA_SETUP_TAG_SHIFT};
3103
3104	nv_cmn_err(CE_PANIC, nvc, nvp,
3105	    "this is should not be executed at all until NCQ");
3106
3107	mutex_enter(&nvp->nvp_mutex);
3108
3109	slot = nv_get32(nvc->nvc_bar_hdl[5], nvc->nvc_mcp5x_ncq);
3110
3111	slot = (slot >> tag_shift[port]) & MCP_SATA_AE_NCQ_DMA_SETUP_TAG_MASK;
3112
3113	NVLOG((NVDBG_INTR, nvc, nvp, "mcp5x_dma_setup_intr slot %d"
3114	    " nvp_slot_sactive %X", slot, nvp->nvp_sactive_cache));
3115
3116	/*
3117	 * halt the DMA engine.  This step is necessary according to
3118	 * the mcp5x spec, probably since there may have been a "first" packet
3119	 * that already programmed the DMA engine, but may not turn out to
3120	 * be the first one processed.
3121	 */
3122	bmicx = nv_get8(bmhdl, nvp->nvp_bmicx);
3123
3124#if 0
3125	if (bmicx & BMICX_SSBM) {
3126		NVLOG((NVDBG_INTR, nvc, nvp, "BM was already enabled for "
3127		    "another packet.  Cancelling and reprogramming"));
3128		nv_put8(bmhdl, nvp->nvp_bmicx,  bmicx & ~BMICX_SSBM);
3129	}
3130#endif
3131	nv_put8(bmhdl, nvp->nvp_bmicx,  bmicx & ~BMICX_SSBM);
3132
3133	nv_start_dma_engine(nvp, slot);
3134
3135	mutex_exit(&nvp->nvp_mutex);
3136
3137	return (DDI_INTR_CLAIMED);
3138}
3139#endif /* NCQ */
3140
3141
3142/*
3143 * packet completion interrupt.  If the packet is complete, invoke
3144 * the packet completion callback.
3145 */
3146static int
3147mcp5x_packet_complete_intr(nv_ctl_t *nvc, nv_port_t *nvp)
3148{
3149	uint8_t status, bmstatus;
3150	ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl;
3151	int sactive;
3152	int active_pkt_bit = 0, active_pkt = 0, ncq_command = B_FALSE;
3153	sata_pkt_t *spkt;
3154	nv_slot_t *nv_slotp;
3155
3156	mutex_enter(&nvp->nvp_mutex);
3157
3158	bmstatus = nv_get8(bmhdl, nvp->nvp_bmisx);
3159
3160	if (!(bmstatus & BMISX_IDEINTS)) {
3161		NVLOG((NVDBG_INTR, nvc, nvp, "BMISX_IDEINTS not set"));
3162		mutex_exit(&nvp->nvp_mutex);
3163
3164		return (NV_FAILURE);
3165	}
3166
3167	/*
3168	 * If the just completed item is a non-ncq command, the busy
3169	 * bit should not be set
3170	 */
3171	if (nvp->nvp_non_ncq_run) {
3172		status = nv_get8(nvp->nvp_ctl_hdl, nvp->nvp_altstatus);
3173		if (status & SATA_STATUS_BSY) {
3174			nv_cmn_err(CE_WARN, nvc, nvp,
3175			    "unexpected SATA_STATUS_BSY set");
3176			mutex_exit(&nvp->nvp_mutex);
3177			/*
3178			 * calling function will clear interrupt.  then
3179			 * the real interrupt will either arrive or the
3180			 * packet timeout handling will take over and
3181			 * reset.
3182			 */
3183			return (NV_FAILURE);
3184		}
3185
3186	} else {
3187		/*
3188		 * NCQ check for BSY here and wait if still bsy before
3189		 * continuing. Rather than wait for it to be cleared
3190		 * when starting a packet and wasting CPU time, the starting
3191		 * thread can exit immediate, but might have to spin here
3192		 * for a bit possibly.  Needs more work and experimentation.
3193		 */
3194		ASSERT(nvp->nvp_ncq_run);
3195	}
3196
3197
3198	if (nvp->nvp_ncq_run) {
3199		ncq_command = B_TRUE;
3200		ASSERT(nvp->nvp_non_ncq_run == 0);
3201	} else {
3202		ASSERT(nvp->nvp_non_ncq_run != 0);
3203	}
3204
3205	/*
3206	 * active_pkt_bit will represent the bitmap of the single completed
3207	 * packet.  Because of the nature of sw assisted NCQ, only one
3208	 * command will complete per interrupt.
3209	 */
3210
3211	if (ncq_command == B_FALSE) {
3212		active_pkt = 0;
3213	} else {
3214		/*
3215		 * NCQ: determine which command just completed, by examining
3216		 * which bit cleared in the register since last written.
3217		 */
3218		sactive = nv_get32(nvc->nvc_bar_hdl[5], nvp->nvp_sactive);
3219
3220		active_pkt_bit = ~sactive & nvp->nvp_sactive_cache;
3221
3222		ASSERT(active_pkt_bit);
3223
3224
3225		/*
3226		 * this failure path needs more work to handle the
3227		 * error condition and recovery.
3228		 */
3229		if (active_pkt_bit == 0) {
3230			ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
3231
3232			nv_cmn_err(CE_CONT, nvc, nvp, "ERROR sactive = %X  "
3233			    "nvp->nvp_sactive %X", sactive,
3234			    nvp->nvp_sactive_cache);
3235
3236			(void) nv_get8(cmdhdl, nvp->nvp_status);
3237
3238			mutex_exit(&nvp->nvp_mutex);
3239
3240			return (NV_FAILURE);
3241		}
3242
3243		for (active_pkt = 0; (active_pkt_bit & 0x1) != 0x1;
3244		    active_pkt++, active_pkt_bit >>= 1) {
3245		}
3246
3247		/*
3248		 * make sure only one bit is ever turned on
3249		 */
3250		ASSERT(active_pkt_bit == 1);
3251
3252		nvp->nvp_sactive_cache &= ~(0x01 << active_pkt);
3253	}
3254
3255	nv_slotp = &(nvp->nvp_slot[active_pkt]);
3256
3257	spkt = nv_slotp->nvslot_spkt;
3258
3259	ASSERT(spkt != NULL);
3260
3261	(*nv_slotp->nvslot_intr)(nvp, nv_slotp);
3262
3263	nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
3264
3265	/*
3266	 * If there is no link cannot be certain about the completion
3267	 * of the packet, so abort it.
3268	 */
3269	if (nv_check_link((&spkt->satapkt_device)->
3270	    satadev_scr.sstatus) == B_FALSE) {
3271		(void) nv_abort_active(nvp, NULL, SATA_PKT_PORT_ERROR);
3272
3273	} else if (nv_slotp->nvslot_flags == NVSLOT_COMPLETE) {
3274
3275		nv_complete_io(nvp, spkt, active_pkt);
3276	}
3277
3278	mutex_exit(&nvp->nvp_mutex);
3279
3280	return (NV_SUCCESS);
3281}
3282
3283
3284static void
3285nv_complete_io(nv_port_t *nvp, sata_pkt_t *spkt, int slot)
3286{
3287
3288	ASSERT(MUTEX_HELD(&nvp->nvp_mutex));
3289
3290	if ((&(nvp->nvp_slot[slot]))->nvslot_flags & NVSLOT_NCQ) {
3291		nvp->nvp_ncq_run--;
3292	} else {
3293		nvp->nvp_non_ncq_run--;
3294	}
3295
3296	/*
3297	 * mark the packet slot idle so it can be reused.  Do this before
3298	 * calling satapkt_comp so the slot can be reused.
3299	 */
3300	(&(nvp->nvp_slot[slot]))->nvslot_spkt = NULL;
3301
3302	if (spkt->satapkt_op_mode & SATA_OPMODE_SYNCH) {
3303		/*
3304		 * If this is not timed polled mode cmd, which has an
3305		 * active thread monitoring for completion, then need
3306		 * to signal the sleeping thread that the cmd is complete.
3307		 */
3308		if ((spkt->satapkt_op_mode & SATA_OPMODE_POLLING) == 0) {
3309			cv_signal(&nvp->nvp_poll_cv);
3310		}
3311
3312		return;
3313	}
3314
3315	if (spkt->satapkt_comp != NULL) {
3316		mutex_exit(&nvp->nvp_mutex);
3317		(*spkt->satapkt_comp)(spkt);
3318		mutex_enter(&nvp->nvp_mutex);
3319	}
3320}
3321
3322
3323/*
3324 * check whether packet is ncq command or not.  for ncq command,
3325 * start it if there is still room on queue.  for non-ncq command only
3326 * start if no other command is running.
3327 */
3328static int
3329nv_start_async(nv_port_t *nvp, sata_pkt_t *spkt)
3330{
3331	uint8_t cmd, ncq;
3332
3333	NVLOG((NVDBG_ENTRY, nvp->nvp_ctlp, nvp, "nv_start_async: entry"));
3334
3335	cmd = spkt->satapkt_cmd.satacmd_cmd_reg;
3336
3337	ncq = ((cmd == SATAC_WRITE_FPDMA_QUEUED) ||
3338	    (cmd == SATAC_READ_FPDMA_QUEUED));
3339
3340	if (ncq == B_FALSE) {
3341
3342		if ((nvp->nvp_non_ncq_run == 1) ||
3343		    (nvp->nvp_ncq_run > 0)) {
3344			/*
3345			 * next command is non-ncq which can't run
3346			 * concurrently.  exit and return queue full.
3347			 */
3348			spkt->satapkt_reason = SATA_PKT_QUEUE_FULL;
3349
3350			return (SATA_TRAN_QUEUE_FULL);
3351		}
3352
3353		return (nv_start_common(nvp, spkt));
3354	}
3355
3356	/*
3357	 * ncq == B_TRUE
3358	 */
3359	if (nvp->nvp_non_ncq_run == 1) {
3360		/*
3361		 * cannot start any NCQ commands when there
3362		 * is a non-NCQ command running.
3363		 */
3364		spkt->satapkt_reason = SATA_PKT_QUEUE_FULL;
3365
3366		return (SATA_TRAN_QUEUE_FULL);
3367	}
3368
3369#ifdef NCQ
3370	/*
3371	 * this is not compiled for now as satapkt_device.satadev_qdepth
3372	 * is being pulled out until NCQ support is later addressed
3373	 *
3374	 * nvp_queue_depth is initialized by the first NCQ command
3375	 * received.
3376	 */
3377	if (nvp->nvp_queue_depth == 1) {
3378		nvp->nvp_queue_depth =
3379		    spkt->satapkt_device.satadev_qdepth;
3380
3381		ASSERT(nvp->nvp_queue_depth > 1);
3382
3383		NVLOG((NVDBG_ENTRY, nvp->nvp_ctlp, nvp,
3384		    "nv_process_queue: nvp_queue_depth set to %d",
3385		    nvp->nvp_queue_depth));
3386	}
3387#endif
3388
3389	if (nvp->nvp_ncq_run >= nvp->nvp_queue_depth) {
3390		/*
3391		 * max number of NCQ commands already active
3392		 */
3393		spkt->satapkt_reason = SATA_PKT_QUEUE_FULL;
3394
3395		return (SATA_TRAN_QUEUE_FULL);
3396	}
3397
3398	return (nv_start_common(nvp, spkt));
3399}
3400
3401
3402/*
3403 * configure INTx and legacy interrupts
3404 */
3405static int
3406nv_add_legacy_intrs(nv_ctl_t *nvc)
3407{
3408	dev_info_t	*devinfo = nvc->nvc_dip;
3409	int		actual, count = 0;
3410	int		x, y, rc, inum = 0;
3411
3412	NVLOG((NVDBG_ENTRY, nvc, NULL, "nv_add_legacy_intrs"));
3413
3414	/*
3415	 * get number of interrupts
3416	 */
3417	rc = ddi_intr_get_nintrs(devinfo, DDI_INTR_TYPE_FIXED, &count);
3418	if ((rc != DDI_SUCCESS) || (count == 0)) {
3419		NVLOG((NVDBG_INTR, nvc, NULL,
3420		    "ddi_intr_get_nintrs() failed, "
3421		    "rc %d count %d", rc, count));
3422
3423		return (DDI_FAILURE);
3424	}
3425
3426	/*
3427	 * allocate an array of interrupt handles
3428	 */
3429	nvc->nvc_intr_size = count * sizeof (ddi_intr_handle_t);
3430	nvc->nvc_htable = kmem_zalloc(nvc->nvc_intr_size, KM_SLEEP);
3431
3432	/*
3433	 * call ddi_intr_alloc()
3434	 */
3435	rc = ddi_intr_alloc(devinfo, nvc->nvc_htable, DDI_INTR_TYPE_FIXED,
3436	    inum, count, &actual, DDI_INTR_ALLOC_STRICT);
3437
3438	if ((rc != DDI_SUCCESS) || (actual == 0)) {
3439		nv_cmn_err(CE_WARN, nvc, NULL,
3440		    "ddi_intr_alloc() failed, rc %d", rc);
3441		kmem_free(nvc->nvc_htable, nvc->nvc_intr_size);
3442
3443		return (DDI_FAILURE);
3444	}
3445
3446	if (actual < count) {
3447		nv_cmn_err(CE_WARN, nvc, NULL,
3448		    "ddi_intr_alloc: requested: %d, received: %d",
3449		    count, actual);
3450
3451		goto failure;
3452	}
3453
3454	nvc->nvc_intr_cnt = actual;
3455
3456	/*
3457	 * get intr priority
3458	 */
3459	if (ddi_intr_get_pri(nvc->nvc_htable[0], &nvc->nvc_intr_pri) !=
3460	    DDI_SUCCESS) {
3461		nv_cmn_err(CE_WARN, nvc, NULL, "ddi_intr_get_pri() failed");
3462
3463		goto failure;
3464	}
3465
3466	/*
3467	 * Test for high level mutex
3468	 */
3469	if (nvc->nvc_intr_pri >= ddi_intr_get_hilevel_pri()) {
3470		nv_cmn_err(CE_WARN, nvc, NULL,
3471		    "nv_add_legacy_intrs: high level intr not supported");
3472
3473		goto failure;
3474	}
3475
3476	for (x = 0; x < actual; x++) {
3477		if (ddi_intr_add_handler(nvc->nvc_htable[x],
3478		    nvc->nvc_interrupt, (caddr_t)nvc, NULL) != DDI_SUCCESS) {
3479			nv_cmn_err(CE_WARN, nvc, NULL,
3480			    "ddi_intr_add_handler() failed");
3481
3482			goto failure;
3483		}
3484	}
3485
3486	/*
3487	 * call ddi_intr_enable() for legacy interrupts
3488	 */
3489	for (x = 0; x < nvc->nvc_intr_cnt; x++) {
3490		(void) ddi_intr_enable(nvc->nvc_htable[x]);
3491	}
3492
3493	return (DDI_SUCCESS);
3494
3495	failure:
3496	/*
3497	 * free allocated intr and nvc_htable
3498	 */
3499	for (y = 0; y < actual; y++) {
3500		(void) ddi_intr_free(nvc->nvc_htable[y]);
3501	}
3502
3503	kmem_free(nvc->nvc_htable, nvc->nvc_intr_size);
3504
3505	return (DDI_FAILURE);
3506}
3507
3508#ifdef	NV_MSI_SUPPORTED
3509/*
3510 * configure MSI interrupts
3511 */
3512static int
3513nv_add_msi_intrs(nv_ctl_t *nvc)
3514{
3515	dev_info_t	*devinfo = nvc->nvc_dip;
3516	int		count, avail, actual;
3517	int		x, y, rc, inum = 0;
3518
3519	NVLOG((NVDBG_ENTRY, nvc, NULL, "nv_add_msi_intrs"));
3520
3521	/*
3522	 * get number of interrupts
3523	 */
3524	rc = ddi_intr_get_nintrs(devinfo, DDI_INTR_TYPE_MSI, &count);
3525	if ((rc != DDI_SUCCESS) || (count == 0)) {
3526		nv_cmn_err(CE_WARN, nvc, NULL,
3527		    "ddi_intr_get_nintrs() failed, "
3528		    "rc %d count %d", rc, count);
3529
3530		return (DDI_FAILURE);
3531	}
3532
3533	/*
3534	 * get number of available interrupts
3535	 */
3536	rc = ddi_intr_get_navail(devinfo, DDI_INTR_TYPE_MSI, &avail);
3537	if ((rc != DDI_SUCCESS) || (avail == 0)) {
3538		nv_cmn_err(CE_WARN, nvc, NULL,
3539		    "ddi_intr_get_navail() failed, "
3540		    "rc %d avail %d", rc, avail);
3541
3542		return (DDI_FAILURE);
3543	}
3544
3545	if (avail < count) {
3546		nv_cmn_err(CE_WARN, nvc, NULL,
3547		    "ddi_intr_get_nvail returned %d ddi_intr_get_nintrs: %d",
3548		    avail, count);
3549	}
3550
3551	/*
3552	 * allocate an array of interrupt handles
3553	 */
3554	nvc->nvc_intr_size = count * sizeof (ddi_intr_handle_t);
3555	nvc->nvc_htable = kmem_alloc(nvc->nvc_intr_size, KM_SLEEP);
3556
3557	rc = ddi_intr_alloc(devinfo, nvc->nvc_htable, DDI_INTR_TYPE_MSI,
3558	    inum, count, &actual, DDI_INTR_ALLOC_NORMAL);
3559
3560	if ((rc != DDI_SUCCESS) || (actual == 0)) {
3561		nv_cmn_err(CE_WARN, nvc, NULL,
3562		    "ddi_intr_alloc() failed, rc %d", rc);
3563		kmem_free(nvc->nvc_htable, nvc->nvc_intr_size);
3564
3565		return (DDI_FAILURE);
3566	}
3567
3568	/*
3569	 * Use interrupt count returned or abort?
3570	 */
3571	if (actual < count) {
3572		NVLOG((NVDBG_INIT, nvc, NULL,
3573		    "Requested: %d, Received: %d", count, actual));
3574	}
3575
3576	nvc->nvc_intr_cnt = actual;
3577
3578	/*
3579	 * get priority for first msi, assume remaining are all the same
3580	 */
3581	if (ddi_intr_get_pri(nvc->nvc_htable[0], &nvc->nvc_intr_pri) !=
3582	    DDI_SUCCESS) {
3583		nv_cmn_err(CE_WARN, nvc, NULL, "ddi_intr_get_pri() failed");
3584
3585		goto failure;
3586	}
3587
3588	/*
3589	 * test for high level mutex
3590	 */
3591	if (nvc->nvc_intr_pri >= ddi_intr_get_hilevel_pri()) {
3592		nv_cmn_err(CE_WARN, nvc, NULL,
3593		    "nv_add_msi_intrs: high level intr not supported");
3594
3595		goto failure;
3596	}
3597
3598	/*
3599	 * Call ddi_intr_add_handler()
3600	 */
3601	for (x = 0; x < actual; x++) {
3602		if (ddi_intr_add_handler(nvc->nvc_htable[x],
3603		    nvc->nvc_interrupt, (caddr_t)nvc, NULL) != DDI_SUCCESS) {
3604			nv_cmn_err(CE_WARN, nvc, NULL,
3605			    "ddi_intr_add_handler() failed");
3606
3607			goto failure;
3608		}
3609	}
3610
3611	(void) ddi_intr_get_cap(nvc->nvc_htable[0], &nvc->nvc_intr_cap);
3612
3613	if (nvc->nvc_intr_cap & DDI_INTR_FLAG_BLOCK) {
3614		(void) ddi_intr_block_enable(nvc->nvc_htable,
3615		    nvc->nvc_intr_cnt);
3616	} else {
3617		/*
3618		 * Call ddi_intr_enable() for MSI non block enable
3619		 */
3620		for (x = 0; x < nvc->nvc_intr_cnt; x++) {
3621			(void) ddi_intr_enable(nvc->nvc_htable[x]);
3622		}
3623	}
3624
3625	return (DDI_SUCCESS);
3626
3627	failure:
3628	/*
3629	 * free allocated intr and nvc_htable
3630	 */
3631	for (y = 0; y < actual; y++) {
3632		(void) ddi_intr_free(nvc->nvc_htable[y]);
3633	}
3634
3635	kmem_free(nvc->nvc_htable, nvc->nvc_intr_size);
3636
3637	return (DDI_FAILURE);
3638}
3639#endif
3640
3641
3642static void
3643nv_rem_intrs(nv_ctl_t *nvc)
3644{
3645	int x, i;
3646	nv_port_t *nvp;
3647
3648	NVLOG((NVDBG_ENTRY, nvc, NULL, "nv_rem_intrs"));
3649
3650	/*
3651	 * prevent controller from generating interrupts by
3652	 * masking them out.  This is an extra precaution.
3653	 */
3654	for (i = 0; i < NV_MAX_PORTS(nvc); i++) {
3655		nvp = (&nvc->nvc_port[i]);
3656		mutex_enter(&nvp->nvp_mutex);
3657		(*(nvc->nvc_set_intr))(nvp, NV_INTR_DISABLE);
3658		mutex_exit(&nvp->nvp_mutex);
3659	}
3660
3661	/*
3662	 * disable all interrupts
3663	 */
3664	if ((nvc->nvc_intr_type == DDI_INTR_TYPE_MSI) &&
3665	    (nvc->nvc_intr_cap & DDI_INTR_FLAG_BLOCK)) {
3666		(void) ddi_intr_block_disable(nvc->nvc_htable,
3667		    nvc->nvc_intr_cnt);
3668	} else {
3669		for (x = 0; x < nvc->nvc_intr_cnt; x++) {
3670			(void) ddi_intr_disable(nvc->nvc_htable[x]);
3671		}
3672	}
3673
3674	for (x = 0; x < nvc->nvc_intr_cnt; x++) {
3675		(void) ddi_intr_remove_handler(nvc->nvc_htable[x]);
3676		(void) ddi_intr_free(nvc->nvc_htable[x]);
3677	}
3678
3679	kmem_free(nvc->nvc_htable, nvc->nvc_intr_size);
3680}
3681
3682
3683/*
3684 * variable argument wrapper for cmn_err.  prefixes the instance and port
3685 * number if possible
3686 */
3687static void
3688nv_vcmn_err(int ce, nv_ctl_t *nvc, nv_port_t *nvp, char *fmt, va_list ap)
3689{
3690	char port[NV_STRING_10];
3691	char inst[NV_STRING_10];
3692
3693	mutex_enter(&nv_log_mutex);
3694
3695	if (nvc) {
3696		(void) snprintf(inst, NV_STRING_10, "inst %d",
3697		    ddi_get_instance(nvc->nvc_dip));
3698	} else {
3699		inst[0] = '\0';
3700	}
3701
3702	if (nvp) {
3703		(void) sprintf(port, " port %d", nvp->nvp_port_num);
3704	} else {
3705		port[0] = '\0';
3706	}
3707
3708	(void) sprintf(nv_log_buf, "nv_sata %s%s%s", inst, port,
3709	    (inst[0]|port[0] ? ": " :""));
3710
3711	(void) vsnprintf(&nv_log_buf[strlen(nv_log_buf)],
3712	    NV_STRING_512 - strlen(nv_log_buf), fmt, ap);
3713
3714	/*
3715	 * normally set to log to console but in some debug situations it
3716	 * may be useful to log only to a file.
3717	 */
3718	if (nv_log_to_console) {
3719		if (nv_prom_print) {
3720			prom_printf("%s\n", nv_log_buf);
3721		} else {
3722			cmn_err(ce, "%s", nv_log_buf);
3723		}
3724
3725
3726	} else {
3727		cmn_err(ce, "!%s", nv_log_buf);
3728	}
3729
3730	mutex_exit(&nv_log_mutex);
3731}
3732
3733
3734/*
3735 * wrapper for cmn_err
3736 */
3737static void
3738nv_cmn_err(int ce, nv_ctl_t *nvc, nv_port_t *nvp, char *fmt, ...)
3739{
3740	va_list ap;
3741
3742	va_start(ap, fmt);
3743	nv_vcmn_err(ce, nvc, nvp, fmt, ap);
3744	va_end(ap);
3745}
3746
3747
3748#if defined(DEBUG)
3749/*
3750 * prefixes the instance and port number if possible to the debug message
3751 */
3752static void
3753nv_log(uint_t flag, nv_ctl_t *nvc, nv_port_t *nvp, char *fmt, ...)
3754{
3755	va_list ap;
3756
3757	if ((nv_debug_flags & flag) == 0) {
3758		return;
3759	}
3760
3761	va_start(ap, fmt);
3762	nv_vcmn_err(CE_NOTE, nvc, nvp, fmt, ap);
3763	va_end(ap);
3764
3765	/*
3766	 * useful for some debugging situations
3767	 */
3768	if (nv_log_delay) {
3769		drv_usecwait(nv_log_delay);
3770	}
3771
3772}
3773#endif /* DEBUG */
3774
3775
3776/*
3777 * program registers which are common to all commands
3778 */
3779static void
3780nv_program_taskfile_regs(nv_port_t *nvp, int slot)
3781{
3782	nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
3783	sata_pkt_t *spkt;
3784	sata_cmd_t *satacmd;
3785	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
3786	uint8_t cmd, ncq = B_FALSE;
3787
3788	spkt = nv_slotp->nvslot_spkt;
3789	satacmd = &spkt->satapkt_cmd;
3790	cmd = satacmd->satacmd_cmd_reg;
3791
3792	ASSERT(nvp->nvp_slot);
3793
3794	if ((cmd == SATAC_WRITE_FPDMA_QUEUED) ||
3795	    (cmd == SATAC_READ_FPDMA_QUEUED)) {
3796		ncq = B_TRUE;
3797	}
3798
3799	/*
3800	 * select the drive
3801	 */
3802	nv_put8(cmdhdl, nvp->nvp_drvhd, satacmd->satacmd_device_reg);
3803
3804	/*
3805	 * make certain the drive selected
3806	 */
3807	if (nv_wait(nvp, SATA_STATUS_DRDY, SATA_STATUS_BSY,
3808	    NV_SEC2USEC(5), 0) == B_FALSE) {
3809
3810		return;
3811	}
3812
3813	switch (spkt->satapkt_cmd.satacmd_addr_type) {
3814
3815	case ATA_ADDR_LBA:
3816		NVLOG((NVDBG_DELIVER, nvp->nvp_ctlp, nvp, "ATA_ADDR_LBA mode"));
3817
3818		nv_put8(cmdhdl, nvp->nvp_count, satacmd->satacmd_sec_count_lsb);
3819		nv_put8(cmdhdl, nvp->nvp_hcyl, satacmd->satacmd_lba_high_lsb);
3820		nv_put8(cmdhdl, nvp->nvp_lcyl, satacmd->satacmd_lba_mid_lsb);
3821		nv_put8(cmdhdl, nvp->nvp_sect, satacmd->satacmd_lba_low_lsb);
3822
3823		break;
3824
3825	case ATA_ADDR_LBA28:
3826		NVLOG((NVDBG_DELIVER, nvp->nvp_ctlp, nvp,
3827		    "ATA_ADDR_LBA28 mode"));
3828		/*
3829		 * NCQ only uses 48-bit addressing
3830		 */
3831		ASSERT(ncq != B_TRUE);
3832
3833		nv_put8(cmdhdl, nvp->nvp_count, satacmd->satacmd_sec_count_lsb);
3834		nv_put8(cmdhdl, nvp->nvp_hcyl, satacmd->satacmd_lba_high_lsb);
3835		nv_put8(cmdhdl, nvp->nvp_lcyl, satacmd->satacmd_lba_mid_lsb);
3836		nv_put8(cmdhdl, nvp->nvp_sect, satacmd->satacmd_lba_low_lsb);
3837
3838		break;
3839
3840	case ATA_ADDR_LBA48:
3841		NVLOG((NVDBG_DELIVER, nvp->nvp_ctlp, nvp,
3842		    "ATA_ADDR_LBA48 mode"));
3843
3844		/*
3845		 * for NCQ, tag goes into count register and real sector count
3846		 * into features register.  The sata module does the translation
3847		 * in the satacmd.
3848		 */
3849		if (ncq == B_TRUE) {
3850			nv_put8(cmdhdl, nvp->nvp_count, slot << 3);
3851			nv_put8(cmdhdl, nvp->nvp_feature,
3852			    satacmd->satacmd_features_reg_ext);
3853			nv_put8(cmdhdl, nvp->nvp_feature,
3854			    satacmd->satacmd_features_reg);
3855		} else {
3856			nv_put8(cmdhdl, nvp->nvp_count,
3857			    satacmd->satacmd_sec_count_msb);
3858			nv_put8(cmdhdl, nvp->nvp_count,
3859			    satacmd->satacmd_sec_count_lsb);
3860		}
3861
3862		/*
3863		 * send the high-order half first
3864		 */
3865		nv_put8(cmdhdl, nvp->nvp_hcyl, satacmd->satacmd_lba_high_msb);
3866		nv_put8(cmdhdl, nvp->nvp_lcyl, satacmd->satacmd_lba_mid_msb);
3867		nv_put8(cmdhdl, nvp->nvp_sect, satacmd->satacmd_lba_low_msb);
3868		/*
3869		 * Send the low-order half
3870		 */
3871		nv_put8(cmdhdl, nvp->nvp_hcyl, satacmd->satacmd_lba_high_lsb);
3872		nv_put8(cmdhdl, nvp->nvp_lcyl, satacmd->satacmd_lba_mid_lsb);
3873		nv_put8(cmdhdl, nvp->nvp_sect, satacmd->satacmd_lba_low_lsb);
3874
3875		break;
3876
3877	case 0:
3878		/*
3879		 * non-media access commands such as identify and features
3880		 * take this path.
3881		 */
3882		nv_put8(cmdhdl, nvp->nvp_count, satacmd->satacmd_sec_count_lsb);
3883		nv_put8(cmdhdl, nvp->nvp_feature,
3884		    satacmd->satacmd_features_reg);
3885		nv_put8(cmdhdl, nvp->nvp_hcyl, satacmd->satacmd_lba_high_lsb);
3886		nv_put8(cmdhdl, nvp->nvp_lcyl, satacmd->satacmd_lba_mid_lsb);
3887		nv_put8(cmdhdl, nvp->nvp_sect, satacmd->satacmd_lba_low_lsb);
3888
3889		break;
3890
3891	default:
3892		break;
3893	}
3894
3895	ASSERT(nvp->nvp_slot);
3896}
3897
3898
3899/*
3900 * start a command that involves no media access
3901 */
3902static int
3903nv_start_nodata(nv_port_t *nvp, int slot)
3904{
3905	nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
3906	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
3907	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
3908	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
3909
3910	nv_program_taskfile_regs(nvp, slot);
3911
3912	/*
3913	 * This next one sets the controller in motion
3914	 */
3915	nv_put8(cmdhdl, nvp->nvp_cmd, sata_cmdp->satacmd_cmd_reg);
3916
3917	return (SATA_TRAN_ACCEPTED);
3918}
3919
3920
3921int
3922nv_bm_status_clear(nv_port_t *nvp)
3923{
3924	ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl;
3925	uchar_t	status, ret;
3926
3927	/*
3928	 * Get the current BM status
3929	 */
3930	ret = status = nv_get8(bmhdl, nvp->nvp_bmisx);
3931
3932	status = (status & BMISX_MASK) | BMISX_IDERR | BMISX_IDEINTS;
3933
3934	/*
3935	 * Clear the latches (and preserve the other bits)
3936	 */
3937	nv_put8(bmhdl, nvp->nvp_bmisx, status);
3938
3939	return (ret);
3940}
3941
3942
3943/*
3944 * program the bus master DMA engine with the PRD address for
3945 * the active slot command, and start the DMA engine.
3946 */
3947static void
3948nv_start_dma_engine(nv_port_t *nvp, int slot)
3949{
3950	nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
3951	ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl;
3952	uchar_t direction;
3953
3954	ASSERT(nv_slotp->nvslot_spkt != NULL);
3955
3956	if (nv_slotp->nvslot_spkt->satapkt_cmd.satacmd_flags.sata_data_direction
3957	    == SATA_DIR_READ) {
3958		direction = BMICX_RWCON_WRITE_TO_MEMORY;
3959	} else {
3960		direction = BMICX_RWCON_READ_FROM_MEMORY;
3961	}
3962
3963	NVLOG((NVDBG_DELIVER, nvp->nvp_ctlp, nvp,
3964	    "nv_start_dma_engine entered"));
3965
3966	/*
3967	 * reset the controller's interrupt and error status bits
3968	 */
3969	(void) nv_bm_status_clear(nvp);
3970
3971	/*
3972	 * program the PRD table physical start address
3973	 */
3974	nv_put32(bmhdl, nvp->nvp_bmidtpx, nvp->nvp_sg_paddr[slot]);
3975
3976	/*
3977	 * set the direction control and start the DMA controller
3978	 */
3979	nv_put8(bmhdl, nvp->nvp_bmicx, direction | BMICX_SSBM);
3980}
3981
3982/*
3983 * start dma command, either in or out
3984 */
3985static int
3986nv_start_dma(nv_port_t *nvp, int slot)
3987{
3988	nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
3989	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
3990	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
3991	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
3992	uint8_t cmd = sata_cmdp->satacmd_cmd_reg;
3993#ifdef NCQ
3994	uint8_t ncq = B_FALSE;
3995#endif
3996	ddi_acc_handle_t sghdl = nvp->nvp_sg_acc_hdl[slot];
3997	uint_t *dstp = (uint_t *)nvp->nvp_sg_addr[slot];
3998	int sg_count = sata_cmdp->satacmd_num_dma_cookies, idx;
3999	ddi_dma_cookie_t  *srcp = sata_cmdp->satacmd_dma_cookie_list;
4000
4001	ASSERT(sg_count != 0);
4002
4003	if (sata_cmdp->satacmd_num_dma_cookies > NV_DMA_NSEGS) {
4004		nv_cmn_err(CE_WARN, nvp->nvp_ctlp, nvp, "NV_DMA_NSEGS=%d <"
4005		    " satacmd_num_dma_cookies=%d", NV_DMA_NSEGS,
4006		    sata_cmdp->satacmd_num_dma_cookies);
4007
4008		return (NV_FAILURE);
4009	}
4010
4011	nv_program_taskfile_regs(nvp, slot);
4012
4013	/*
4014	 * start the drive in motion
4015	 */
4016	nv_put8(cmdhdl, nvp->nvp_cmd, cmd);
4017
4018	/*
4019	 * the drive starts processing the transaction when the cmd register
4020	 * is written.  This is done here before programming the DMA engine to
4021	 * parallelize and save some time.  In the event that the drive is ready
4022	 * before DMA, it will wait.
4023	 */
4024#ifdef NCQ
4025	if ((cmd == SATAC_WRITE_FPDMA_QUEUED) ||
4026	    (cmd == SATAC_READ_FPDMA_QUEUED)) {
4027		ncq = B_TRUE;
4028	}
4029#endif
4030
4031	/*
4032	 * copy the PRD list to PRD table in DMA accessible memory
4033	 * so that the controller can access it.
4034	 */
4035	for (idx = 0; idx < sg_count; idx++, srcp++) {
4036		uint32_t size;
4037
4038		ASSERT(srcp->dmac_size <= UINT16_MAX);
4039
4040		nv_put32(sghdl, dstp++, srcp->dmac_address);
4041
4042		size = srcp->dmac_size;
4043
4044		/*
4045		 * If this is a 40-bit address, copy bits 32-40 of the
4046		 * physical address to bits 16-24 of the PRD count.
4047		 */
4048		if (srcp->dmac_laddress > UINT32_MAX) {
4049			size |= ((srcp->dmac_laddress & 0xff00000000) >> 16);
4050		}
4051
4052		/*
4053		 * set the end of table flag for the last entry
4054		 */
4055		if (idx == (sg_count - 1)) {
4056			size |= PRDE_EOT;
4057		}
4058
4059		nv_put32(sghdl, dstp++, size);
4060	}
4061
4062	(void) ddi_dma_sync(nvp->nvp_sg_dma_hdl[slot], 0,
4063	    sizeof (prde_t) * NV_DMA_NSEGS, DDI_DMA_SYNC_FORDEV);
4064
4065	nv_start_dma_engine(nvp, slot);
4066
4067#ifdef NCQ
4068	/*
4069	 * optimization:  for SWNCQ, start DMA engine if this is the only
4070	 * command running.  Preliminary NCQ efforts indicated this needs
4071	 * more debugging.
4072	 *
4073	 * if (nvp->nvp_ncq_run <= 1)
4074	 */
4075
4076	if (ncq == B_FALSE) {
4077		NVLOG((NVDBG_DELIVER, nvp->nvp_ctlp, nvp,
4078		    "NOT NCQ so starting DMA NOW non_ncq_commands=%d"
4079		    " cmd = %X", non_ncq_commands++, cmd));
4080		nv_start_dma_engine(nvp, slot);
4081	} else {
4082		NVLOG((NVDBG_DELIVER, nvp->nvp_ctlp, nvp, "?NCQ, so program "
4083		    "DMA later ncq_commands=%d cmd = %X", ncq_commands++, cmd));
4084	}
4085#endif /* NCQ */
4086
4087	return (SATA_TRAN_ACCEPTED);
4088}
4089
4090
4091/*
4092 * start a PIO data-in ATA command
4093 */
4094static int
4095nv_start_pio_in(nv_port_t *nvp, int slot)
4096{
4097
4098	nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
4099	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4100	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4101
4102	nv_program_taskfile_regs(nvp, slot);
4103
4104	/*
4105	 * This next one sets the drive in motion
4106	 */
4107	nv_put8(cmdhdl, nvp->nvp_cmd, spkt->satapkt_cmd.satacmd_cmd_reg);
4108
4109	return (SATA_TRAN_ACCEPTED);
4110}
4111
4112
4113/*
4114 * start a PIO data-out ATA command
4115 */
4116static int
4117nv_start_pio_out(nv_port_t *nvp, int slot)
4118{
4119	nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
4120	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4121	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4122
4123	nv_program_taskfile_regs(nvp, slot);
4124
4125	/*
4126	 * this next one sets the drive in motion
4127	 */
4128	nv_put8(cmdhdl, nvp->nvp_cmd, spkt->satapkt_cmd.satacmd_cmd_reg);
4129
4130	/*
4131	 * wait for the busy bit to settle
4132	 */
4133	NV_DELAY_NSEC(400);
4134
4135	/*
4136	 * wait for the drive to assert DRQ to send the first chunk
4137	 * of data. Have to busy wait because there's no interrupt for
4138	 * the first chunk. This is bad... uses a lot of cycles if the
4139	 * drive responds too slowly or if the wait loop granularity
4140	 * is too large. It's even worse if the drive is defective and
4141	 * the loop times out.
4142	 */
4143	if (nv_wait3(nvp, SATA_STATUS_DRQ, SATA_STATUS_BSY, /* okay */
4144	    SATA_STATUS_ERR, SATA_STATUS_BSY, /* cmd failed */
4145	    SATA_STATUS_DF, SATA_STATUS_BSY, /* drive failed */
4146	    4000000, 0) == B_FALSE) {
4147		spkt->satapkt_reason = SATA_PKT_TIMEOUT;
4148
4149		goto error;
4150	}
4151
4152	/*
4153	 * send the first block.
4154	 */
4155	nv_intr_pio_out(nvp, nv_slotp);
4156
4157	/*
4158	 * If nvslot_flags is not set to COMPLETE yet, then processing
4159	 * is OK so far, so return.  Otherwise, fall into error handling
4160	 * below.
4161	 */
4162	if (nv_slotp->nvslot_flags != NVSLOT_COMPLETE) {
4163
4164		return (SATA_TRAN_ACCEPTED);
4165	}
4166
4167	error:
4168	/*
4169	 * there was an error so reset the device and complete the packet.
4170	 */
4171	nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
4172	nv_complete_io(nvp, spkt, 0);
4173	nv_reset(nvp);
4174
4175	return (SATA_TRAN_PORT_ERROR);
4176}
4177
4178
4179/*
4180 * start a ATAPI Packet command (PIO data in or out)
4181 */
4182static int
4183nv_start_pkt_pio(nv_port_t *nvp, int slot)
4184{
4185	nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
4186	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4187	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4188	sata_cmd_t *satacmd = &spkt->satapkt_cmd;
4189
4190	NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4191	    "nv_start_pkt_pio: start"));
4192
4193	/*
4194	 * Write the PACKET command to the command register.  Normally
4195	 * this would be done through nv_program_taskfile_regs().  It
4196	 * is done here because some values need to be overridden.
4197	 */
4198
4199	/* select the drive */
4200	nv_put8(cmdhdl, nvp->nvp_drvhd, satacmd->satacmd_device_reg);
4201
4202	/* make certain the drive selected */
4203	if (nv_wait(nvp, SATA_STATUS_DRDY, SATA_STATUS_BSY,
4204	    NV_SEC2USEC(5), 0) == B_FALSE) {
4205		NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4206		    "nv_start_pkt_pio: drive select failed"));
4207		return (SATA_TRAN_PORT_ERROR);
4208	}
4209
4210	/*
4211	 * The command is always sent via PIO, despite whatever the SATA
4212	 * framework sets in the command.  Overwrite the DMA bit to do this.
4213	 * Also, overwrite the overlay bit to be safe (it shouldn't be set).
4214	 */
4215	nv_put8(cmdhdl, nvp->nvp_feature, 0);	/* deassert DMA and OVL */
4216
4217	/* set appropriately by the sata framework */
4218	nv_put8(cmdhdl, nvp->nvp_hcyl, satacmd->satacmd_lba_high_lsb);
4219	nv_put8(cmdhdl, nvp->nvp_lcyl, satacmd->satacmd_lba_mid_lsb);
4220	nv_put8(cmdhdl, nvp->nvp_sect, satacmd->satacmd_lba_low_lsb);
4221	nv_put8(cmdhdl, nvp->nvp_count, satacmd->satacmd_sec_count_lsb);
4222
4223	/* initiate the command by writing the command register last */
4224	nv_put8(cmdhdl, nvp->nvp_cmd, spkt->satapkt_cmd.satacmd_cmd_reg);
4225
4226	/* Give the host controller time to do its thing */
4227	NV_DELAY_NSEC(400);
4228
4229	/*
4230	 * Wait for the device to indicate that it is ready for the command
4231	 * ATAPI protocol state - HP0: Check_Status_A
4232	 */
4233
4234	if (nv_wait3(nvp, SATA_STATUS_DRQ, SATA_STATUS_BSY, /* okay */
4235	    SATA_STATUS_ERR, SATA_STATUS_BSY, /* cmd failed */
4236	    SATA_STATUS_DF, SATA_STATUS_BSY, /* drive failed */
4237	    4000000, 0) == B_FALSE) {
4238		/*
4239		 * Either an error or device fault occurred or the wait
4240		 * timed out.  According to the ATAPI protocol, command
4241		 * completion is also possible.  Other implementations of
4242		 * this protocol don't handle this last case, so neither
4243		 * does this code.
4244		 */
4245
4246		if (nv_get8(cmdhdl, nvp->nvp_status) &
4247		    (SATA_STATUS_ERR | SATA_STATUS_DF)) {
4248			spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4249
4250			NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4251			    "nv_start_pkt_pio: device error (HP0)"));
4252		} else {
4253			spkt->satapkt_reason = SATA_PKT_TIMEOUT;
4254
4255			NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4256			    "nv_start_pkt_pio: timeout (HP0)"));
4257		}
4258
4259		nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
4260		nv_complete_io(nvp, spkt, 0);
4261		nv_reset(nvp);
4262
4263		return (SATA_TRAN_PORT_ERROR);
4264	}
4265
4266	/*
4267	 * Put the ATAPI command in the data register
4268	 * ATAPI protocol state - HP1: Send_Packet
4269	 */
4270
4271	ddi_rep_put16(cmdhdl, (ushort_t *)spkt->satapkt_cmd.satacmd_acdb,
4272	    (ushort_t *)nvp->nvp_data,
4273	    (spkt->satapkt_cmd.satacmd_acdb_len >> 1), DDI_DEV_NO_AUTOINCR);
4274
4275	/*
4276	 * See you in nv_intr_pkt_pio.
4277	 * ATAPI protocol state - HP3: INTRQ_wait
4278	 */
4279
4280	NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4281	    "nv_start_pkt_pio: exiting into HP3"));
4282
4283	return (SATA_TRAN_ACCEPTED);
4284}
4285
4286
4287/*
4288 * Interrupt processing for a non-data ATA command.
4289 */
4290static void
4291nv_intr_nodata(nv_port_t *nvp, nv_slot_t *nv_slotp)
4292{
4293	uchar_t status;
4294	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4295	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
4296	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
4297	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4298
4299	NVLOG((NVDBG_INTR, nvp->nvp_ctlp, nvp, "nv_intr_nodata entered"));
4300
4301	status = nv_get8(cmdhdl, nvp->nvp_status);
4302
4303	/*
4304	 * check for errors
4305	 */
4306	if (status & (SATA_STATUS_DF | SATA_STATUS_ERR)) {
4307		spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4308		sata_cmdp->satacmd_status_reg = nv_get8(ctlhdl,
4309		    nvp->nvp_altstatus);
4310		sata_cmdp->satacmd_error_reg = nv_get8(cmdhdl, nvp->nvp_error);
4311	} else {
4312		spkt->satapkt_reason = SATA_PKT_COMPLETED;
4313	}
4314
4315	nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4316}
4317
4318
4319/*
4320 * ATA command, PIO data in
4321 */
4322static void
4323nv_intr_pio_in(nv_port_t *nvp, nv_slot_t *nv_slotp)
4324{
4325	uchar_t	status;
4326	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4327	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
4328	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
4329	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4330	int count;
4331
4332	status = nv_get8(cmdhdl, nvp->nvp_status);
4333
4334	if (status & SATA_STATUS_BSY) {
4335		spkt->satapkt_reason = SATA_PKT_TIMEOUT;
4336		nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4337		sata_cmdp->satacmd_status_reg = nv_get8(ctlhdl,
4338		    nvp->nvp_altstatus);
4339		sata_cmdp->satacmd_error_reg = nv_get8(cmdhdl, nvp->nvp_error);
4340		nv_reset(nvp);
4341
4342		return;
4343	}
4344
4345	/*
4346	 * check for errors
4347	 */
4348	if ((status & (SATA_STATUS_DRQ | SATA_STATUS_DF |
4349	    SATA_STATUS_ERR)) != SATA_STATUS_DRQ) {
4350		nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
4351		nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4352		spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4353
4354		return;
4355	}
4356
4357	/*
4358	 * read the next chunk of data (if any)
4359	 */
4360	count = min(nv_slotp->nvslot_byte_count, NV_BYTES_PER_SEC);
4361
4362	/*
4363	 * read count bytes
4364	 */
4365	ASSERT(count != 0);
4366
4367	ddi_rep_get16(cmdhdl, (ushort_t *)nv_slotp->nvslot_v_addr,
4368	    (ushort_t *)nvp->nvp_data, (count >> 1), DDI_DEV_NO_AUTOINCR);
4369
4370	nv_slotp->nvslot_v_addr += count;
4371	nv_slotp->nvslot_byte_count -= count;
4372
4373
4374	if (nv_slotp->nvslot_byte_count != 0) {
4375		/*
4376		 * more to transfer.  Wait for next interrupt.
4377		 */
4378		return;
4379	}
4380
4381	/*
4382	 * transfer is complete. wait for the busy bit to settle.
4383	 */
4384	NV_DELAY_NSEC(400);
4385
4386	spkt->satapkt_reason = SATA_PKT_COMPLETED;
4387	nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4388}
4389
4390
4391/*
4392 * ATA command PIO data out
4393 */
4394static void
4395nv_intr_pio_out(nv_port_t *nvp, nv_slot_t *nv_slotp)
4396{
4397	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4398	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
4399	uchar_t status;
4400	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
4401	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4402	int count;
4403
4404	/*
4405	 * clear the IRQ
4406	 */
4407	status = nv_get8(cmdhdl, nvp->nvp_status);
4408
4409	if (status & SATA_STATUS_BSY) {
4410		/*
4411		 * this should not happen
4412		 */
4413		spkt->satapkt_reason = SATA_PKT_TIMEOUT;
4414		nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4415		sata_cmdp->satacmd_status_reg = nv_get8(ctlhdl,
4416		    nvp->nvp_altstatus);
4417		sata_cmdp->satacmd_error_reg = nv_get8(cmdhdl, nvp->nvp_error);
4418
4419		return;
4420	}
4421
4422	/*
4423	 * check for errors
4424	 */
4425	if (status & (SATA_STATUS_DF | SATA_STATUS_ERR)) {
4426		nv_copy_registers(nvp,  &spkt->satapkt_device, spkt);
4427		nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4428		spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4429
4430		return;
4431	}
4432
4433	/*
4434	 * this is the condition which signals the drive is
4435	 * no longer ready to transfer.  Likely that the transfer
4436	 * completed successfully, but check that byte_count is
4437	 * zero.
4438	 */
4439	if ((status & SATA_STATUS_DRQ) == 0) {
4440
4441		if (nv_slotp->nvslot_byte_count == 0) {
4442			/*
4443			 * complete; successful transfer
4444			 */
4445			spkt->satapkt_reason = SATA_PKT_COMPLETED;
4446		} else {
4447			/*
4448			 * error condition, incomplete transfer
4449			 */
4450			nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
4451			spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4452		}
4453		nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4454
4455		return;
4456	}
4457
4458	/*
4459	 * write the next chunk of data
4460	 */
4461	count = min(nv_slotp->nvslot_byte_count, NV_BYTES_PER_SEC);
4462
4463	/*
4464	 * read or write count bytes
4465	 */
4466
4467	ASSERT(count != 0);
4468
4469	ddi_rep_put16(cmdhdl, (ushort_t *)nv_slotp->nvslot_v_addr,
4470	    (ushort_t *)nvp->nvp_data, (count >> 1), DDI_DEV_NO_AUTOINCR);
4471
4472	nv_slotp->nvslot_v_addr += count;
4473	nv_slotp->nvslot_byte_count -= count;
4474}
4475
4476
4477/*
4478 * ATAPI PACKET command, PIO in/out interrupt
4479 *
4480 * Under normal circumstances, one of four different interrupt scenarios
4481 * will result in this function being called:
4482 *
4483 * 1. Packet command data transfer
4484 * 2. Packet command completion
4485 * 3. Request sense data transfer
4486 * 4. Request sense command completion
4487 */
4488static void
4489nv_intr_pkt_pio(nv_port_t *nvp, nv_slot_t *nv_slotp)
4490{
4491	uchar_t	status;
4492	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4493	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
4494	int direction = sata_cmdp->satacmd_flags.sata_data_direction;
4495	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
4496	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4497	uint16_t ctlr_count;
4498	int count;
4499
4500	/* ATAPI protocol state - HP2: Check_Status_B */
4501
4502	status = nv_get8(cmdhdl, nvp->nvp_status);
4503	NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4504	    "nv_intr_pkt_pio: status 0x%x", status));
4505
4506	if (status & SATA_STATUS_BSY) {
4507		if ((nv_slotp->nvslot_flags & NVSLOT_RQSENSE) != 0) {
4508			nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4509			spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4510		} else {
4511			nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4512			spkt->satapkt_reason = SATA_PKT_TIMEOUT;
4513
4514			nv_reset(nvp);
4515		}
4516
4517		NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4518		    "nv_intr_pkt_pio: busy - status 0x%x", status));
4519
4520		return;
4521	}
4522
4523	if ((status & SATA_STATUS_DF) != 0) {
4524		/*
4525		 * On device fault, just clean up and bail.  Request sense
4526		 * will just default to its NO SENSE initialized value.
4527		 */
4528
4529		if ((nv_slotp->nvslot_flags & NVSLOT_RQSENSE) == 0) {
4530			nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
4531		}
4532
4533		nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4534		spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4535
4536		sata_cmdp->satacmd_status_reg = nv_get8(ctlhdl,
4537		    nvp->nvp_altstatus);
4538		sata_cmdp->satacmd_error_reg = nv_get8(cmdhdl,
4539		    nvp->nvp_error);
4540
4541		NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4542		    "nv_intr_pkt_pio: device fault"));
4543
4544		return;
4545	}
4546
4547	if ((status & SATA_STATUS_ERR) != 0) {
4548		/*
4549		 * On command error, figure out whether we are processing a
4550		 * request sense.  If so, clean up and bail.  Otherwise,
4551		 * do a REQUEST SENSE.
4552		 */
4553
4554		if ((nv_slotp->nvslot_flags & NVSLOT_RQSENSE) == 0) {
4555			nv_slotp->nvslot_flags |= NVSLOT_RQSENSE;
4556			if (nv_start_rqsense_pio(nvp, nv_slotp) ==
4557			    NV_FAILURE) {
4558				nv_copy_registers(nvp, &spkt->satapkt_device,
4559				    spkt);
4560				nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4561				spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4562			}
4563
4564			sata_cmdp->satacmd_status_reg = nv_get8(ctlhdl,
4565			    nvp->nvp_altstatus);
4566			sata_cmdp->satacmd_error_reg = nv_get8(cmdhdl,
4567			    nvp->nvp_error);
4568		} else {
4569			nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4570			spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4571
4572			nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
4573		}
4574
4575		NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4576		    "nv_intr_pkt_pio: error (status 0x%x)", status));
4577
4578		return;
4579	}
4580
4581	if ((nv_slotp->nvslot_flags & NVSLOT_RQSENSE) != 0) {
4582		/*
4583		 * REQUEST SENSE command processing
4584		 */
4585
4586		if ((status & (SATA_STATUS_DRQ)) != 0) {
4587			/* ATAPI state - HP4: Transfer_Data */
4588
4589			/* read the byte count from the controller */
4590			ctlr_count =
4591			    (uint16_t)nv_get8(cmdhdl, nvp->nvp_hcyl) << 8;
4592			ctlr_count |= nv_get8(cmdhdl, nvp->nvp_lcyl);
4593
4594			NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4595			    "nv_intr_pkt_pio: ctlr byte count - %d",
4596			    ctlr_count));
4597
4598			if (ctlr_count == 0) {
4599				/* no data to transfer - some devices do this */
4600
4601				spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4602				nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4603
4604				NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4605				    "nv_intr_pkt_pio: done (no data)"));
4606
4607				return;
4608			}
4609
4610			count = min(ctlr_count, SATA_ATAPI_RQSENSE_LEN);
4611
4612			/* transfer the data */
4613			ddi_rep_get16(cmdhdl,
4614			    (ushort_t *)nv_slotp->nvslot_rqsense_buff,
4615			    (ushort_t *)nvp->nvp_data, (count >> 1),
4616			    DDI_DEV_NO_AUTOINCR);
4617
4618			/* consume residual bytes */
4619			ctlr_count -= count;
4620
4621			if (ctlr_count > 0) {
4622				for (; ctlr_count > 0; ctlr_count -= 2)
4623					(void) ddi_get16(cmdhdl,
4624					    (ushort_t *)nvp->nvp_data);
4625			}
4626
4627			NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4628			    "nv_intr_pkt_pio: transition to HP2"));
4629		} else {
4630			/* still in ATAPI state - HP2 */
4631
4632			/*
4633			 * In order to avoid clobbering the rqsense data
4634			 * set by the SATA framework, the sense data read
4635			 * from the device is put in a separate buffer and
4636			 * copied into the packet after the request sense
4637			 * command successfully completes.
4638			 */
4639			bcopy(nv_slotp->nvslot_rqsense_buff,
4640			    spkt->satapkt_cmd.satacmd_rqsense,
4641			    SATA_ATAPI_RQSENSE_LEN);
4642
4643			nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4644			spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4645
4646			NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4647			    "nv_intr_pkt_pio: request sense done"));
4648		}
4649
4650		return;
4651	}
4652
4653	/*
4654	 * Normal command processing
4655	 */
4656
4657	if ((status & (SATA_STATUS_DRQ)) != 0) {
4658		/* ATAPI protocol state - HP4: Transfer_Data */
4659
4660		/* read the byte count from the controller */
4661		ctlr_count = (uint16_t)nv_get8(cmdhdl, nvp->nvp_hcyl) << 8;
4662		ctlr_count |= nv_get8(cmdhdl, nvp->nvp_lcyl);
4663
4664		if (ctlr_count == 0) {
4665			/* no data to transfer - some devices do this */
4666
4667			spkt->satapkt_reason = SATA_PKT_COMPLETED;
4668			nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4669
4670			NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4671			    "nv_intr_pkt_pio: done (no data)"));
4672
4673			return;
4674		}
4675
4676		count = min(ctlr_count, nv_slotp->nvslot_byte_count);
4677
4678		NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4679		    "nv_intr_pkt_pio: drive_bytes 0x%x", ctlr_count));
4680
4681		NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4682		    "nv_intr_pkt_pio: byte_count 0x%x",
4683		    nv_slotp->nvslot_byte_count));
4684
4685		/* transfer the data */
4686
4687		if (direction == SATA_DIR_READ) {
4688			ddi_rep_get16(cmdhdl,
4689			    (ushort_t *)nv_slotp->nvslot_v_addr,
4690			    (ushort_t *)nvp->nvp_data, (count >> 1),
4691			    DDI_DEV_NO_AUTOINCR);
4692
4693			ctlr_count -= count;
4694
4695			if (ctlr_count > 0) {
4696				/* consume remainding bytes */
4697
4698				for (; ctlr_count > 0;
4699				    ctlr_count -= 2)
4700					(void) ddi_get16(cmdhdl,
4701					    (ushort_t *)nvp->nvp_data);
4702
4703				NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4704				    "nv_intr_pkt_pio: bytes remained"));
4705			}
4706		} else {
4707			ddi_rep_put16(cmdhdl,
4708			    (ushort_t *)nv_slotp->nvslot_v_addr,
4709			    (ushort_t *)nvp->nvp_data, (count >> 1),
4710			    DDI_DEV_NO_AUTOINCR);
4711		}
4712
4713		nv_slotp->nvslot_v_addr += count;
4714		nv_slotp->nvslot_byte_count -= count;
4715
4716		NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4717		    "nv_intr_pkt_pio: transition to HP2"));
4718	} else {
4719		/* still in ATAPI state - HP2 */
4720
4721		spkt->satapkt_reason = SATA_PKT_COMPLETED;
4722		nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4723
4724		NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4725		    "nv_intr_pkt_pio: done"));
4726	}
4727}
4728
4729
4730/*
4731 * ATA command, DMA data in/out
4732 */
4733static void
4734nv_intr_dma(nv_port_t *nvp, struct nv_slot *nv_slotp)
4735{
4736	uchar_t status;
4737	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4738	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
4739	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
4740	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4741	ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl;
4742	uchar_t	bmicx;
4743	uchar_t bm_status;
4744
4745	nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4746
4747	/*
4748	 * stop DMA engine.
4749	 */
4750	bmicx = nv_get8(bmhdl, nvp->nvp_bmicx);
4751	nv_put8(bmhdl, nvp->nvp_bmicx,  bmicx & ~BMICX_SSBM);
4752
4753	/*
4754	 * get the status and clear the IRQ, and check for DMA error
4755	 */
4756	status = nv_get8(cmdhdl, nvp->nvp_status);
4757
4758	/*
4759	 * check for drive errors
4760	 */
4761	if (status & (SATA_STATUS_DF | SATA_STATUS_ERR)) {
4762		nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
4763		spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4764		(void) nv_bm_status_clear(nvp);
4765
4766		return;
4767	}
4768
4769	bm_status = nv_bm_status_clear(nvp);
4770
4771	/*
4772	 * check for bus master errors
4773	 */
4774	if (bm_status & BMISX_IDERR) {
4775		spkt->satapkt_reason = SATA_PKT_RESET;
4776		sata_cmdp->satacmd_status_reg = nv_get8(ctlhdl,
4777		    nvp->nvp_altstatus);
4778		sata_cmdp->satacmd_error_reg = nv_get8(cmdhdl, nvp->nvp_error);
4779		nv_reset(nvp);
4780
4781		return;
4782	}
4783
4784	spkt->satapkt_reason = SATA_PKT_COMPLETED;
4785}
4786
4787
4788/*
4789 * Wait for a register of a controller to achieve a specific state.
4790 * To return normally, all the bits in the first sub-mask must be ON,
4791 * all the bits in the second sub-mask must be OFF.
4792 * If timeout_usec microseconds pass without the controller achieving
4793 * the desired bit configuration, return TRUE, else FALSE.
4794 *
4795 * hybrid waiting algorithm: if not in interrupt context, busy looping will
4796 * occur for the first 250 us, then switch over to a sleeping wait.
4797 *
4798 */
4799int
4800nv_wait(nv_port_t *nvp, uchar_t onbits, uchar_t offbits, uint_t timeout_usec,
4801    int type_wait)
4802{
4803	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
4804	hrtime_t end, cur, start_sleep, start;
4805	int first_time = B_TRUE;
4806	ushort_t val;
4807
4808	for (;;) {
4809		val = nv_get8(ctlhdl, nvp->nvp_altstatus);
4810
4811		if ((val & onbits) == onbits && (val & offbits) == 0) {
4812
4813			return (B_TRUE);
4814		}
4815
4816		cur = gethrtime();
4817
4818		/*
4819		 * store the start time and calculate the end
4820		 * time.  also calculate "start_sleep" which is
4821		 * the point after which the driver will stop busy
4822		 * waiting and change to sleep waiting.
4823		 */
4824		if (first_time) {
4825			first_time = B_FALSE;
4826			/*
4827			 * start and end are in nanoseconds
4828			 */
4829			start = cur;
4830			end = start + timeout_usec * 1000;
4831			/*
4832			 * add 1 ms to start
4833			 */
4834			start_sleep =  start + 250000;
4835
4836			if (servicing_interrupt()) {
4837				type_wait = NV_NOSLEEP;
4838			}
4839		}
4840
4841		if (cur > end) {
4842
4843			break;
4844		}
4845
4846		if ((type_wait != NV_NOSLEEP) && (cur > start_sleep)) {
4847#if ! defined(__lock_lint)
4848			delay(1);
4849#endif
4850		} else {
4851			drv_usecwait(nv_usec_delay);
4852		}
4853	}
4854
4855	return (B_FALSE);
4856}
4857
4858
4859/*
4860 * This is a slightly more complicated version that checks
4861 * for error conditions and bails-out rather than looping
4862 * until the timeout is exceeded.
4863 *
4864 * hybrid waiting algorithm: if not in interrupt context, busy looping will
4865 * occur for the first 250 us, then switch over to a sleeping wait.
4866 */
4867int
4868nv_wait3(
4869	nv_port_t	*nvp,
4870	uchar_t		onbits1,
4871	uchar_t		offbits1,
4872	uchar_t		failure_onbits2,
4873	uchar_t		failure_offbits2,
4874	uchar_t		failure_onbits3,
4875	uchar_t		failure_offbits3,
4876	uint_t		timeout_usec,
4877	int		type_wait)
4878{
4879	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
4880	hrtime_t end, cur, start_sleep, start;
4881	int first_time = B_TRUE;
4882	ushort_t val;
4883
4884	for (;;) {
4885		val = nv_get8(ctlhdl, nvp->nvp_altstatus);
4886
4887		/*
4888		 * check for expected condition
4889		 */
4890		if ((val & onbits1) == onbits1 && (val & offbits1) == 0) {
4891
4892			return (B_TRUE);
4893		}
4894
4895		/*
4896		 * check for error conditions
4897		 */
4898		if ((val & failure_onbits2) == failure_onbits2 &&
4899		    (val & failure_offbits2) == 0) {
4900
4901			return (B_FALSE);
4902		}
4903
4904		if ((val & failure_onbits3) == failure_onbits3 &&
4905		    (val & failure_offbits3) == 0) {
4906
4907			return (B_FALSE);
4908		}
4909
4910		/*
4911		 * store the start time and calculate the end
4912		 * time.  also calculate "start_sleep" which is
4913		 * the point after which the driver will stop busy
4914		 * waiting and change to sleep waiting.
4915		 */
4916		if (first_time) {
4917			first_time = B_FALSE;
4918			/*
4919			 * start and end are in nanoseconds
4920			 */
4921			cur = start = gethrtime();
4922			end = start + timeout_usec * 1000;
4923			/*
4924			 * add 1 ms to start
4925			 */
4926			start_sleep =  start + 250000;
4927
4928			if (servicing_interrupt()) {
4929				type_wait = NV_NOSLEEP;
4930			}
4931		} else {
4932			cur = gethrtime();
4933		}
4934
4935		if (cur > end) {
4936
4937			break;
4938		}
4939
4940		if ((type_wait != NV_NOSLEEP) && (cur > start_sleep)) {
4941#if ! defined(__lock_lint)
4942			delay(1);
4943#endif
4944		} else {
4945			drv_usecwait(nv_usec_delay);
4946		}
4947	}
4948
4949	return (B_FALSE);
4950}
4951
4952
4953/*
4954 * nv_check_link() checks if a specified link is active device present
4955 * and communicating.
4956 */
4957static boolean_t
4958nv_check_link(uint32_t sstatus)
4959{
4960	uint8_t det;
4961
4962	det = (sstatus & SSTATUS_DET) >> SSTATUS_DET_SHIFT;
4963
4964	return (det == SSTATUS_DET_DEVPRE_PHYCOM);
4965}
4966
4967
4968/*
4969 * nv_port_state_change() reports the state of the port to the
4970 * sata module by calling sata_hba_event_notify().  This
4971 * function is called any time the state of the port is changed
4972 */
4973static void
4974nv_port_state_change(nv_port_t *nvp, int event, uint8_t addr_type, int state)
4975{
4976	sata_device_t sd;
4977
4978	bzero((void *)&sd, sizeof (sata_device_t));
4979	sd.satadev_rev = SATA_DEVICE_REV;
4980	nv_copy_registers(nvp, &sd, NULL);
4981
4982	/*
4983	 * When NCQ is implemented sactive and snotific field need to be
4984	 * updated.
4985	 */
4986	sd.satadev_addr.cport = nvp->nvp_port_num;
4987	sd.satadev_addr.qual = addr_type;
4988	sd.satadev_state = state;
4989
4990	sata_hba_event_notify(nvp->nvp_ctlp->nvc_dip, &sd, event);
4991}
4992
4993
4994/*
4995 * timeout processing:
4996 *
4997 * Check if any packets have crossed a timeout threshold.  If so, then
4998 * abort the packet.  This function is not NCQ aware.
4999 *
5000 * If reset was invoked in any other place than nv_sata_probe(), then
5001 * monitor for reset completion here.
5002 *
5003 */
5004static void
5005nv_timeout(void *arg)
5006{
5007	nv_port_t *nvp = arg;
5008	nv_slot_t *nv_slotp;
5009	int restart_timeout = B_FALSE;
5010
5011	mutex_enter(&nvp->nvp_mutex);
5012
5013	/*
5014	 * If the probe entry point is driving the reset and signature
5015	 * acquisition, just return.
5016	 */
5017	if (nvp->nvp_state & NV_PORT_RESET_PROBE) {
5018		goto finished;
5019	}
5020
5021	/*
5022	 * If the port is not in the init state, it likely
5023	 * means the link was lost while a timeout was active.
5024	 */
5025	if ((nvp->nvp_state & NV_PORT_INIT) == 0) {
5026		NVLOG((NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp,
5027		    "nv_timeout: port uninitialized"));
5028
5029		goto finished;
5030	}
5031
5032	if (nvp->nvp_state & NV_PORT_RESET) {
5033		ddi_acc_handle_t bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5];
5034		uint32_t sstatus;
5035
5036		NVLOG((NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp,
5037		    "nv_timeout(): port waiting for signature"));
5038
5039		sstatus = nv_get32(bar5_hdl, nvp->nvp_sstatus);
5040
5041		/*
5042		 * check for link presence.  If the link remains
5043		 * missing for more than 2 seconds, send a remove
5044		 * event and abort signature acquisition.
5045		 */
5046		if (nv_check_link(sstatus) == B_FALSE) {
5047			clock_t e_link_lost = ddi_get_lbolt();
5048
5049			if (nvp->nvp_link_lost_time == 0) {
5050				nvp->nvp_link_lost_time = e_link_lost;
5051			}
5052			if (TICK_TO_SEC(e_link_lost -
5053			    nvp->nvp_link_lost_time) < NV_LINK_LOST_OK) {
5054				NVLOG((NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp,
5055				    "probe: intermittent link lost while"
5056				    " resetting"));
5057				restart_timeout = B_TRUE;
5058			} else {
5059				NVLOG((NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp,
5060				    "link lost during signature acquisition."
5061				    "  Giving up"));
5062				nv_port_state_change(nvp,
5063				    SATA_EVNT_DEVICE_DETACHED|
5064				    SATA_EVNT_LINK_LOST,
5065				    SATA_ADDR_CPORT, 0);
5066				nvp->nvp_state |= NV_PORT_HOTREMOVED;
5067				nvp->nvp_state &= ~NV_PORT_RESET;
5068			}
5069
5070			goto finished;
5071		} else {
5072
5073			nvp->nvp_link_lost_time = 0;
5074		}
5075
5076		nv_read_signature(nvp);
5077
5078		if (nvp->nvp_signature != 0) {
5079			if ((nvp->nvp_type == SATA_DTYPE_ATADISK) ||
5080			    (nvp->nvp_type == SATA_DTYPE_ATAPICD)) {
5081				nvp->nvp_state |= NV_PORT_RESTORE;
5082				nv_port_state_change(nvp,
5083				    SATA_EVNT_DEVICE_RESET,
5084				    SATA_ADDR_DCPORT,
5085				    SATA_DSTATE_RESET|SATA_DSTATE_PWR_ACTIVE);
5086			}
5087
5088			goto finished;
5089		}
5090
5091		/*
5092		 * Reset if more than 5 seconds has passed without
5093		 * acquiring a signature.
5094		 */
5095		if (TICK_TO_SEC(ddi_get_lbolt() - nvp->nvp_reset_time) > 5) {
5096			nv_reset(nvp);
5097		}
5098
5099		restart_timeout = B_TRUE;
5100		goto finished;
5101	}
5102
5103
5104	/*
5105	 * not yet NCQ aware
5106	 */
5107	nv_slotp = &(nvp->nvp_slot[0]);
5108
5109	/*
5110	 * this happens early on before nv_slotp is set
5111	 * up OR when a device was unexpectedly removed and
5112	 * there was an active packet.
5113	 */
5114	if (nv_slotp == NULL) {
5115		NVLOG((NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp,
5116		    "nv_timeout: nv_slotp == NULL"));
5117
5118		goto finished;
5119	}
5120
5121	/*
5122	 * perform timeout checking and processing only if there is an
5123	 * active packet on the port
5124	 */
5125	if (nv_slotp->nvslot_spkt != NULL)  {
5126		sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
5127		sata_cmd_t *satacmd = &spkt->satapkt_cmd;
5128		uint8_t cmd = satacmd->satacmd_cmd_reg;
5129		uint64_t lba;
5130
5131#if ! defined(__lock_lint) && defined(DEBUG)
5132
5133		lba = (uint64_t)satacmd->satacmd_lba_low_lsb |
5134		    ((uint64_t)satacmd->satacmd_lba_mid_lsb << 8) |
5135		    ((uint64_t)satacmd->satacmd_lba_high_lsb << 16) |
5136		    ((uint64_t)satacmd->satacmd_lba_low_msb << 24) |
5137		    ((uint64_t)satacmd->satacmd_lba_mid_msb << 32) |
5138		    ((uint64_t)satacmd->satacmd_lba_high_msb << 40);
5139#endif
5140
5141		/*
5142		 * timeout not needed if there is a polling thread
5143		 */
5144		if (spkt->satapkt_op_mode & SATA_OPMODE_POLLING) {
5145
5146			goto finished;
5147		}
5148
5149		if (TICK_TO_SEC(ddi_get_lbolt() - nv_slotp->nvslot_stime) >
5150		    spkt->satapkt_time) {
5151			NVLOG((NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp,
5152			    "abort timeout: "
5153			    "nvslot_stime: %ld max ticks till timeout: "
5154			    "%ld cur_time: %ld cmd=%x lba=%d",
5155			    nv_slotp->nvslot_stime, drv_usectohz(MICROSEC *
5156			    spkt->satapkt_time), ddi_get_lbolt(), cmd, lba));
5157
5158			(void) nv_abort_active(nvp, spkt, SATA_PKT_TIMEOUT);
5159
5160		} else {
5161			NVLOG((NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp, "nv_timeout:"
5162			    " still in use so restarting timeout"));
5163		}
5164		restart_timeout = B_TRUE;
5165
5166	} else {
5167		/*
5168		 * there was no active packet, so do not re-enable timeout
5169		 */
5170		NVLOG((NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp,
5171		    "nv_timeout: no active packet so not re-arming timeout"));
5172	}
5173
5174	finished:
5175
5176	if (restart_timeout == B_TRUE) {
5177		nvp->nvp_timeout_id = timeout(nv_timeout, (void *)nvp,
5178		    drv_usectohz(NV_ONE_SEC));
5179	} else {
5180		nvp->nvp_timeout_id = 0;
5181	}
5182	mutex_exit(&nvp->nvp_mutex);
5183}
5184
5185
5186/*
5187 * enable or disable the 3 interrupt types the driver is
5188 * interested in: completion, add and remove.
5189 */
5190static void
5191ck804_set_intr(nv_port_t *nvp, int flag)
5192{
5193	nv_ctl_t *nvc = nvp->nvp_ctlp;
5194	ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5];
5195	uchar_t *bar5  = nvc->nvc_bar_addr[5];
5196	uint8_t intr_bits[] = { CK804_INT_PDEV_HOT|CK804_INT_PDEV_INT,
5197	    CK804_INT_SDEV_HOT|CK804_INT_SDEV_INT };
5198	uint8_t clear_all_bits[] = { CK804_INT_PDEV_ALL, CK804_INT_SDEV_ALL };
5199	uint8_t int_en, port = nvp->nvp_port_num, intr_status;
5200
5201	if (flag & NV_INTR_DISABLE_NON_BLOCKING) {
5202		int_en = nv_get8(bar5_hdl,
5203		    (uint8_t *)(bar5 + CK804_SATA_INT_EN));
5204		int_en &= ~intr_bits[port];
5205		nv_put8(bar5_hdl, (uint8_t *)(bar5 + CK804_SATA_INT_EN),
5206		    int_en);
5207		return;
5208	}
5209
5210	ASSERT(mutex_owned(&nvp->nvp_mutex));
5211
5212	/*
5213	 * controller level lock also required since access to an 8-bit
5214	 * interrupt register is shared between both channels.
5215	 */
5216	mutex_enter(&nvc->nvc_mutex);
5217
5218	if (flag & NV_INTR_CLEAR_ALL) {
5219		NVLOG((NVDBG_INTR, nvc, nvp,
5220		    "ck804_set_intr: NV_INTR_CLEAR_ALL"));
5221
5222		intr_status = nv_get8(nvc->nvc_bar_hdl[5],
5223		    (uint8_t *)(nvc->nvc_ck804_int_status));
5224
5225		if (intr_status & clear_all_bits[port]) {
5226
5227			nv_put8(nvc->nvc_bar_hdl[5],
5228			    (uint8_t *)(nvc->nvc_ck804_int_status),
5229			    clear_all_bits[port]);
5230
5231			NVLOG((NVDBG_INTR, nvc, nvp,
5232			    "interrupt bits cleared %x",
5233			    intr_status & clear_all_bits[port]));
5234		}
5235	}
5236
5237	if (flag & NV_INTR_DISABLE) {
5238		NVLOG((NVDBG_INTR, nvc, nvp,
5239		    "ck804_set_intr: NV_INTR_DISABLE"));
5240		int_en = nv_get8(bar5_hdl,
5241		    (uint8_t *)(bar5 + CK804_SATA_INT_EN));
5242		int_en &= ~intr_bits[port];
5243		nv_put8(bar5_hdl, (uint8_t *)(bar5 + CK804_SATA_INT_EN),
5244		    int_en);
5245	}
5246
5247	if (flag & NV_INTR_ENABLE) {
5248		NVLOG((NVDBG_INTR, nvc, nvp, "ck804_set_intr: NV_INTR_ENABLE"));
5249		int_en = nv_get8(bar5_hdl,
5250		    (uint8_t *)(bar5 + CK804_SATA_INT_EN));
5251		int_en |= intr_bits[port];
5252		nv_put8(bar5_hdl, (uint8_t *)(bar5 + CK804_SATA_INT_EN),
5253		    int_en);
5254	}
5255
5256	mutex_exit(&nvc->nvc_mutex);
5257}
5258
5259
5260/*
5261 * enable or disable the 3 interrupts the driver is interested in:
5262 * completion interrupt, hot add, and hot remove interrupt.
5263 */
5264static void
5265mcp5x_set_intr(nv_port_t *nvp, int flag)
5266{
5267	nv_ctl_t *nvc = nvp->nvp_ctlp;
5268	ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5];
5269	uint16_t intr_bits =
5270	    MCP5X_INT_ADD|MCP5X_INT_REM|MCP5X_INT_COMPLETE;
5271	uint16_t int_en;
5272
5273	if (flag & NV_INTR_DISABLE_NON_BLOCKING) {
5274		int_en = nv_get16(bar5_hdl, nvp->nvp_mcp5x_int_ctl);
5275		int_en &= ~intr_bits;
5276		nv_put16(bar5_hdl, nvp->nvp_mcp5x_int_ctl, int_en);
5277		return;
5278	}
5279
5280	ASSERT(mutex_owned(&nvp->nvp_mutex));
5281
5282	NVLOG((NVDBG_HOT, nvc, nvp, "mcp055_set_intr: enter flag: %d", flag));
5283
5284	if (flag & NV_INTR_CLEAR_ALL) {
5285		NVLOG((NVDBG_INTR, nvc, nvp,
5286		    "mcp5x_set_intr: NV_INTR_CLEAR_ALL"));
5287		nv_put16(bar5_hdl, nvp->nvp_mcp5x_int_status, MCP5X_INT_CLEAR);
5288	}
5289
5290	if (flag & NV_INTR_ENABLE) {
5291		NVLOG((NVDBG_INTR, nvc, nvp, "mcp5x_set_intr: NV_INTR_ENABLE"));
5292		int_en = nv_get16(bar5_hdl, nvp->nvp_mcp5x_int_ctl);
5293		int_en |= intr_bits;
5294		nv_put16(bar5_hdl, nvp->nvp_mcp5x_int_ctl, int_en);
5295	}
5296
5297	if (flag & NV_INTR_DISABLE) {
5298		NVLOG((NVDBG_INTR, nvc, nvp,
5299		    "mcp5x_set_intr: NV_INTR_DISABLE"));
5300		int_en = nv_get16(bar5_hdl, nvp->nvp_mcp5x_int_ctl);
5301		int_en &= ~intr_bits;
5302		nv_put16(bar5_hdl, nvp->nvp_mcp5x_int_ctl, int_en);
5303	}
5304}
5305
5306
5307static void
5308nv_resume(nv_port_t *nvp)
5309{
5310	NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp, "nv_resume()"));
5311
5312	mutex_enter(&nvp->nvp_mutex);
5313
5314	if (nvp->nvp_state & NV_PORT_INACTIVE) {
5315		mutex_exit(&nvp->nvp_mutex);
5316		return;
5317	}
5318
5319#ifdef SGPIO_SUPPORT
5320	nv_sgp_drive_connect(nvp->nvp_ctlp, SGP_CTLR_PORT_TO_DRV(
5321	    nvp->nvp_ctlp->nvc_ctlr_num, nvp->nvp_port_num));
5322#endif
5323
5324	/* Enable interrupt */
5325	(*(nvp->nvp_ctlp->nvc_set_intr))(nvp, NV_INTR_CLEAR_ALL|NV_INTR_ENABLE);
5326
5327	/*
5328	 * power may have been removed to the port and the
5329	 * drive, and/or a drive may have been added or removed.
5330	 * Force a reset which will cause a probe and re-establish
5331	 * any state needed on the drive.
5332	 */
5333	nv_reset(nvp);
5334
5335	mutex_exit(&nvp->nvp_mutex);
5336}
5337
5338
5339static void
5340nv_suspend(nv_port_t *nvp)
5341{
5342	NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp, "nv_suspend()"));
5343
5344	mutex_enter(&nvp->nvp_mutex);
5345
5346#ifdef SGPIO_SUPPORT
5347	nv_sgp_drive_disconnect(nvp->nvp_ctlp, SGP_CTLR_PORT_TO_DRV(
5348	    nvp->nvp_ctlp->nvc_ctlr_num, nvp->nvp_port_num));
5349#endif
5350
5351	if (nvp->nvp_state & NV_PORT_INACTIVE) {
5352		mutex_exit(&nvp->nvp_mutex);
5353		return;
5354	}
5355
5356	/*
5357	 * Stop the timeout handler.
5358	 * (It will be restarted in nv_reset() during nv_resume().)
5359	 */
5360	if (nvp->nvp_timeout_id) {
5361		(void) untimeout(nvp->nvp_timeout_id);
5362		nvp->nvp_timeout_id = 0;
5363	}
5364
5365	/* Disable interrupt */
5366	(*(nvp->nvp_ctlp->nvc_set_intr))(nvp,
5367	    NV_INTR_CLEAR_ALL|NV_INTR_DISABLE);
5368
5369	mutex_exit(&nvp->nvp_mutex);
5370}
5371
5372
5373static void
5374nv_copy_registers(nv_port_t *nvp, sata_device_t *sd, sata_pkt_t *spkt)
5375{
5376	ddi_acc_handle_t bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5];
5377	sata_cmd_t *scmd = &spkt->satapkt_cmd;
5378	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
5379	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
5380	uchar_t status;
5381	struct sata_cmd_flags flags;
5382
5383	NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp, "nv_copy_registers()"));
5384
5385	sd->satadev_scr.sstatus = nv_get32(bar5_hdl, nvp->nvp_sstatus);
5386	sd->satadev_scr.serror = nv_get32(bar5_hdl, nvp->nvp_serror);
5387	sd->satadev_scr.scontrol = nv_get32(bar5_hdl, nvp->nvp_sctrl);
5388
5389	if (spkt == NULL) {
5390
5391		return;
5392	}
5393
5394	/*
5395	 * in the error case, implicitly set the return of regs needed
5396	 * for error handling.
5397	 */
5398	status = scmd->satacmd_status_reg = nv_get8(ctlhdl,
5399	    nvp->nvp_altstatus);
5400
5401	flags = scmd->satacmd_flags;
5402
5403	if (status & SATA_STATUS_ERR) {
5404		flags.sata_copy_out_lba_low_msb = B_TRUE;
5405		flags.sata_copy_out_lba_mid_msb = B_TRUE;
5406		flags.sata_copy_out_lba_high_msb = B_TRUE;
5407		flags.sata_copy_out_lba_low_lsb = B_TRUE;
5408		flags.sata_copy_out_lba_mid_lsb = B_TRUE;
5409		flags.sata_copy_out_lba_high_lsb = B_TRUE;
5410		flags.sata_copy_out_error_reg = B_TRUE;
5411		flags.sata_copy_out_sec_count_msb = B_TRUE;
5412		flags.sata_copy_out_sec_count_lsb = B_TRUE;
5413		scmd->satacmd_status_reg = status;
5414	}
5415
5416	if (scmd->satacmd_addr_type & ATA_ADDR_LBA48) {
5417
5418		/*
5419		 * set HOB so that high byte will be read
5420		 */
5421		nv_put8(ctlhdl, nvp->nvp_devctl, ATDC_HOB|ATDC_D3);
5422
5423		/*
5424		 * get the requested high bytes
5425		 */
5426		if (flags.sata_copy_out_sec_count_msb) {
5427			scmd->satacmd_sec_count_msb =
5428			    nv_get8(cmdhdl, nvp->nvp_count);
5429		}
5430
5431		if (flags.sata_copy_out_lba_low_msb) {
5432			scmd->satacmd_lba_low_msb =
5433			    nv_get8(cmdhdl, nvp->nvp_sect);
5434		}
5435
5436		if (flags.sata_copy_out_lba_mid_msb) {
5437			scmd->satacmd_lba_mid_msb =
5438			    nv_get8(cmdhdl, nvp->nvp_lcyl);
5439		}
5440
5441		if (flags.sata_copy_out_lba_high_msb) {
5442			scmd->satacmd_lba_high_msb =
5443			    nv_get8(cmdhdl, nvp->nvp_hcyl);
5444		}
5445	}
5446
5447	/*
5448	 * disable HOB so that low byte is read
5449	 */
5450	nv_put8(ctlhdl, nvp->nvp_devctl, ATDC_D3);
5451
5452	/*
5453	 * get the requested low bytes
5454	 */
5455	if (flags.sata_copy_out_sec_count_lsb) {
5456		scmd->satacmd_sec_count_lsb = nv_get8(cmdhdl, nvp->nvp_count);
5457	}
5458
5459	if (flags.sata_copy_out_lba_low_lsb) {
5460		scmd->satacmd_lba_low_lsb = nv_get8(cmdhdl, nvp->nvp_sect);
5461	}
5462
5463	if (flags.sata_copy_out_lba_mid_lsb) {
5464		scmd->satacmd_lba_mid_lsb = nv_get8(cmdhdl, nvp->nvp_lcyl);
5465	}
5466
5467	if (flags.sata_copy_out_lba_high_lsb) {
5468		scmd->satacmd_lba_high_lsb = nv_get8(cmdhdl, nvp->nvp_hcyl);
5469	}
5470
5471	/*
5472	 * get the device register if requested
5473	 */
5474	if (flags.sata_copy_out_device_reg) {
5475		scmd->satacmd_device_reg =  nv_get8(cmdhdl, nvp->nvp_drvhd);
5476	}
5477
5478	/*
5479	 * get the error register if requested
5480	 */
5481	if (flags.sata_copy_out_error_reg) {
5482		scmd->satacmd_error_reg = nv_get8(cmdhdl, nvp->nvp_error);
5483	}
5484}
5485
5486
5487/*
5488 * Hot plug and remove interrupts can occur when the device is reset.  Just
5489 * masking the interrupt doesn't always work well because if a
5490 * different interrupt arrives on the other port, the driver can still
5491 * end up checking the state of the other port and discover the hot
5492 * interrupt flag is set even though it was masked.  Checking for recent
5493 * reset activity and then ignoring turns out to be the easiest way.
5494 */
5495static void
5496nv_report_add_remove(nv_port_t *nvp, int flags)
5497{
5498	ddi_acc_handle_t bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5];
5499	clock_t time_diff = ddi_get_lbolt() - nvp->nvp_reset_time;
5500	uint32_t sstatus;
5501	int i;
5502
5503	/*
5504	 * If reset within last 1 second ignore.  This should be
5505	 * reworked and improved instead of having this somewhat
5506	 * heavy handed clamping job.
5507	 */
5508	if (time_diff < drv_usectohz(NV_ONE_SEC)) {
5509		NVLOG((NVDBG_HOT, nvp->nvp_ctlp, nvp, "nv_report_add_remove()"
5510		    "ignoring plug interrupt was %dms ago",
5511		    TICK_TO_MSEC(time_diff)));
5512
5513		return;
5514	}
5515
5516	/*
5517	 * wait up to 1ms for sstatus to settle and reflect the true
5518	 * status of the port.  Failure to do so can create confusion
5519	 * in probe, where the incorrect sstatus value can still
5520	 * persist.
5521	 */
5522	for (i = 0; i < 1000; i++) {
5523		sstatus = nv_get32(bar5_hdl, nvp->nvp_sstatus);
5524
5525		if ((flags == NV_PORT_HOTREMOVED) &&
5526		    ((sstatus & SSTATUS_DET_DEVPRE_PHYCOM) !=
5527		    SSTATUS_DET_DEVPRE_PHYCOM)) {
5528			break;
5529		}
5530
5531		if ((flags != NV_PORT_HOTREMOVED) &&
5532		    ((sstatus & SSTATUS_DET_DEVPRE_PHYCOM) ==
5533		    SSTATUS_DET_DEVPRE_PHYCOM)) {
5534			break;
5535		}
5536		drv_usecwait(1);
5537	}
5538
5539	NVLOG((NVDBG_HOT, nvp->nvp_ctlp, nvp,
5540	    "sstatus took %i us for DEVPRE_PHYCOM to settle", i));
5541
5542	if (flags == NV_PORT_HOTREMOVED) {
5543		NVLOG((NVDBG_HOT, nvp->nvp_ctlp, nvp,
5544		    "nv_report_add_remove() hot removed"));
5545		nv_port_state_change(nvp,
5546		    SATA_EVNT_DEVICE_DETACHED,
5547		    SATA_ADDR_CPORT, 0);
5548
5549		nvp->nvp_state |= NV_PORT_HOTREMOVED;
5550	} else {
5551		NVLOG((NVDBG_HOT, nvp->nvp_ctlp, nvp,
5552		    "nv_report_add_remove() hot plugged"));
5553		nv_port_state_change(nvp, SATA_EVNT_DEVICE_ATTACHED,
5554		    SATA_ADDR_CPORT, 0);
5555	}
5556}
5557
5558/*
5559 * Get request sense data and stuff it the command's sense buffer.
5560 * Start a request sense command in order to get sense data to insert
5561 * in the sata packet's rqsense buffer.  The command completion
5562 * processing is in nv_intr_pkt_pio.
5563 *
5564 * The sata framework provides a function to allocate and set-up a
5565 * request sense packet command. The reasons it is not being used here is:
5566 * a) it cannot be called in an interrupt context and this function is
5567 *    called in an interrupt context.
5568 * b) it allocates DMA resources that are not used here because this is
5569 *    implemented using PIO.
5570 *
5571 * If, in the future, this is changed to use DMA, the sata framework should
5572 * be used to allocate and set-up the error retrieval (request sense)
5573 * command.
5574 */
5575static int
5576nv_start_rqsense_pio(nv_port_t *nvp, nv_slot_t *nv_slotp)
5577{
5578	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
5579	sata_cmd_t *satacmd = &spkt->satapkt_cmd;
5580	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
5581	int cdb_len = spkt->satapkt_cmd.satacmd_acdb_len;
5582
5583	NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
5584	    "nv_start_rqsense_pio: start"));
5585
5586	/* clear the local request sense buffer before starting the command */
5587	bzero(nv_slotp->nvslot_rqsense_buff, SATA_ATAPI_RQSENSE_LEN);
5588
5589	/* Write the request sense PACKET command */
5590
5591	/* select the drive */
5592	nv_put8(cmdhdl, nvp->nvp_drvhd, satacmd->satacmd_device_reg);
5593
5594	/* make certain the drive selected */
5595	if (nv_wait(nvp, SATA_STATUS_DRDY, SATA_STATUS_BSY,
5596	    NV_SEC2USEC(5), 0) == B_FALSE) {
5597		NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
5598		    "nv_start_rqsense_pio: drive select failed"));
5599		return (NV_FAILURE);
5600	}
5601
5602	/* set up the command */
5603	nv_put8(cmdhdl, nvp->nvp_feature, 0);	/* deassert DMA and OVL */
5604	nv_put8(cmdhdl, nvp->nvp_hcyl, SATA_ATAPI_MAX_BYTES_PER_DRQ >> 8);
5605	nv_put8(cmdhdl, nvp->nvp_lcyl, SATA_ATAPI_MAX_BYTES_PER_DRQ & 0xff);
5606	nv_put8(cmdhdl, nvp->nvp_sect, 0);
5607	nv_put8(cmdhdl, nvp->nvp_count, 0);	/* no tag */
5608
5609	/* initiate the command by writing the command register last */
5610	nv_put8(cmdhdl, nvp->nvp_cmd, SATAC_PACKET);
5611
5612	/* Give the host ctlr time to do its thing, according to ATA/ATAPI */
5613	NV_DELAY_NSEC(400);
5614
5615	/*
5616	 * Wait for the device to indicate that it is ready for the command
5617	 * ATAPI protocol state - HP0: Check_Status_A
5618	 */
5619
5620	if (nv_wait3(nvp, SATA_STATUS_DRQ, SATA_STATUS_BSY, /* okay */
5621	    SATA_STATUS_ERR, SATA_STATUS_BSY, /* cmd failed */
5622	    SATA_STATUS_DF, SATA_STATUS_BSY, /* drive failed */
5623	    4000000, 0) == B_FALSE) {
5624		if (nv_get8(cmdhdl, nvp->nvp_status) &
5625		    (SATA_STATUS_ERR | SATA_STATUS_DF)) {
5626			NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
5627			    "nv_start_rqsense_pio: rqsense dev error (HP0)"));
5628		} else {
5629			NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
5630			    "nv_start_rqsense_pio: rqsense timeout (HP0)"));
5631		}
5632
5633		nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
5634		nv_complete_io(nvp, spkt, 0);
5635		nv_reset(nvp);
5636
5637		return (NV_FAILURE);
5638	}
5639
5640	/*
5641	 * Put the ATAPI command in the data register
5642	 * ATAPI protocol state - HP1: Send_Packet
5643	 */
5644
5645	ddi_rep_put16(cmdhdl, (ushort_t *)nv_rqsense_cdb,
5646	    (ushort_t *)nvp->nvp_data,
5647	    (cdb_len >> 1), DDI_DEV_NO_AUTOINCR);
5648
5649	NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
5650	    "nv_start_rqsense_pio: exiting into HP3"));
5651
5652	return (NV_SUCCESS);
5653}
5654
5655/*
5656 * quiesce(9E) entry point.
5657 *
5658 * This function is called when the system is single-threaded at high
5659 * PIL with preemption disabled. Therefore, this function must not be
5660 * blocked.
5661 *
5662 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
5663 * DDI_FAILURE indicates an error condition and should almost never happen.
5664 */
5665static int
5666nv_quiesce(dev_info_t *dip)
5667{
5668	int port, instance = ddi_get_instance(dip);
5669	nv_ctl_t *nvc;
5670
5671	if ((nvc = (nv_ctl_t *)ddi_get_soft_state(nv_statep, instance)) == NULL)
5672		return (DDI_FAILURE);
5673
5674	for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
5675		nv_port_t *nvp = &(nvc->nvc_port[port]);
5676		ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
5677		ddi_acc_handle_t bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5];
5678		uint32_t sctrl;
5679
5680		/*
5681		 * Stop the controllers from generating interrupts.
5682		 */
5683		(*(nvc->nvc_set_intr))(nvp, NV_INTR_DISABLE_NON_BLOCKING);
5684
5685		/*
5686		 * clear signature registers
5687		 */
5688		nv_put8(cmdhdl, nvp->nvp_sect, 0);
5689		nv_put8(cmdhdl, nvp->nvp_lcyl, 0);
5690		nv_put8(cmdhdl, nvp->nvp_hcyl, 0);
5691		nv_put8(cmdhdl, nvp->nvp_count, 0);
5692
5693		nvp->nvp_signature = 0;
5694		nvp->nvp_type = 0;
5695		nvp->nvp_state |= NV_PORT_RESET;
5696		nvp->nvp_reset_time = ddi_get_lbolt();
5697		nvp->nvp_link_lost_time = 0;
5698
5699		/*
5700		 * assert reset in PHY by writing a 1 to bit 0 scontrol
5701		 */
5702		sctrl = nv_get32(bar5_hdl, nvp->nvp_sctrl);
5703
5704		nv_put32(bar5_hdl, nvp->nvp_sctrl,
5705		    sctrl | SCONTROL_DET_COMRESET);
5706
5707		/*
5708		 * wait 1ms
5709		 */
5710		drv_usecwait(1000);
5711
5712		/*
5713		 * de-assert reset in PHY
5714		 */
5715		nv_put32(bar5_hdl, nvp->nvp_sctrl, sctrl);
5716	}
5717
5718	return (DDI_SUCCESS);
5719}
5720
5721
5722#ifdef SGPIO_SUPPORT
5723/*
5724 * NVIDIA specific SGPIO LED support
5725 * Please refer to the NVIDIA documentation for additional details
5726 */
5727
5728/*
5729 * nv_sgp_led_init
5730 * Detect SGPIO support.  If present, initialize.
5731 */
5732static void
5733nv_sgp_led_init(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle)
5734{
5735	uint16_t csrp;		/* SGPIO_CSRP from PCI config space */
5736	uint32_t cbp;		/* SGPIO_CBP from PCI config space */
5737	nv_sgp_cmn_t *cmn;	/* shared data structure */
5738	char tqname[SGPIO_TQ_NAME_LEN];
5739	extern caddr_t psm_map_phys_new(paddr_t, size_t, int);
5740
5741	/*
5742	 * Initialize with appropriately invalid values in case this function
5743	 * exits without initializing SGPIO (for example, there is no SGPIO
5744	 * support).
5745	 */
5746	nvc->nvc_sgp_csr = 0;
5747	nvc->nvc_sgp_cbp = NULL;
5748
5749	/*
5750	 * Only try to initialize SGPIO LED support if this property
5751	 * indicates it should be.
5752	 */
5753	if (ddi_getprop(DDI_DEV_T_ANY, nvc->nvc_dip, DDI_PROP_DONTPASS,
5754	    "enable-sgpio-leds", 0) != 1)
5755		return;
5756
5757	/*
5758	 * CK804 can pass the sgpio_detect test even though it does not support
5759	 * SGPIO, so don't even look at a CK804.
5760	 */
5761	if (nvc->nvc_mcp5x_flag != B_TRUE)
5762		return;
5763
5764	/*
5765	 * The NVIDIA SGPIO support can nominally handle 6 drives.
5766	 * However, the current implementation only supports 4 drives.
5767	 * With two drives per controller, that means only look at the
5768	 * first two controllers.
5769	 */
5770	if ((nvc->nvc_ctlr_num != 0) && (nvc->nvc_ctlr_num != 1))
5771		return;
5772
5773	/* confirm that the SGPIO registers are there */
5774	if (nv_sgp_detect(pci_conf_handle, &csrp, &cbp) != NV_SUCCESS) {
5775		NVLOG((NVDBG_INIT, nvc, NULL,
5776		    "SGPIO registers not detected"));
5777		return;
5778	}
5779
5780	/* save off the SGPIO_CSR I/O address */
5781	nvc->nvc_sgp_csr = csrp;
5782
5783	/* map in Command Block */
5784	nvc->nvc_sgp_cbp = (nv_sgp_cb_t *)psm_map_phys_new(cbp,
5785	    sizeof (nv_sgp_cb_t), PROT_READ | PROT_WRITE);
5786
5787	/* initialize the SGPIO h/w */
5788	if (nv_sgp_init(nvc) == NV_FAILURE) {
5789		nv_cmn_err(CE_WARN, nvc, NULL,
5790		    "!Unable to initialize SGPIO");
5791	}
5792
5793	if (nvc->nvc_ctlr_num == 0) {
5794		/*
5795		 * Controller 0 on the MCP5X/IO55 initialized the SGPIO
5796		 * and the data that is shared between the controllers.
5797		 * The clever thing to do would be to let the first controller
5798		 * that comes up be the one that initializes all this.
5799		 * However, SGPIO state is not necessarily zeroed between
5800		 * between OS reboots, so there might be old data there.
5801		 */
5802
5803		/* allocate shared space */
5804		cmn = (nv_sgp_cmn_t *)kmem_zalloc(sizeof (nv_sgp_cmn_t),
5805		    KM_SLEEP);
5806		if (cmn == NULL) {
5807			nv_cmn_err(CE_WARN, nvc, NULL,
5808			    "!Failed to allocate shared data");
5809			return;
5810		}
5811
5812		nvc->nvc_sgp_cmn = cmn;
5813
5814		/* initialize the shared data structure */
5815		cmn->nvs_magic = SGPIO_MAGIC;
5816		cmn->nvs_in_use = (1 << nvc->nvc_ctlr_num);
5817		cmn->nvs_connected = 0;
5818		cmn->nvs_activity = 0;
5819
5820		mutex_init(&cmn->nvs_slock, NULL, MUTEX_DRIVER, NULL);
5821		mutex_init(&cmn->nvs_tlock, NULL, MUTEX_DRIVER, NULL);
5822		cv_init(&cmn->nvs_cv, NULL, CV_DRIVER, NULL);
5823
5824		/* put the address in the SGPIO scratch register */
5825#if defined(__amd64)
5826		nvc->nvc_sgp_cbp->sgpio_sr = (uint64_t)cmn;
5827#else
5828		nvc->nvc_sgp_cbp->sgpio_sr = (uint32_t)cmn;
5829#endif
5830
5831		/* start the activity LED taskq */
5832
5833		/*
5834		 * The taskq name should be unique and the time
5835		 */
5836		(void) snprintf(tqname, SGPIO_TQ_NAME_LEN,
5837		    "nvSataLed%x", (short)(ddi_get_lbolt() & 0xffff));
5838		cmn->nvs_taskq = ddi_taskq_create(nvc->nvc_dip, tqname, 1,
5839		    TASKQ_DEFAULTPRI, 0);
5840		if (cmn->nvs_taskq == NULL) {
5841			cmn->nvs_taskq_delay = 0;
5842			nv_cmn_err(CE_WARN, nvc, NULL,
5843			    "!Failed to start activity LED taskq");
5844		} else {
5845			cmn->nvs_taskq_delay = SGPIO_LOOP_WAIT_USECS;
5846			(void) ddi_taskq_dispatch(cmn->nvs_taskq,
5847			    nv_sgp_activity_led_ctl, nvc, DDI_SLEEP);
5848		}
5849
5850	} else if (nvc->nvc_ctlr_num == 1) {
5851		/*
5852		 * Controller 1 confirms that SGPIO has been initialized
5853		 * and, if so, try to get the shared data pointer, otherwise
5854		 * get the shared data pointer when accessing the data.
5855		 */
5856
5857		if (nvc->nvc_sgp_cbp->sgpio_sr != 0) {
5858			cmn = (nv_sgp_cmn_t *)nvc->nvc_sgp_cbp->sgpio_sr;
5859
5860			/*
5861			 * It looks like a pointer, but is it the shared data?
5862			 */
5863			if (cmn->nvs_magic == SGPIO_MAGIC) {
5864				nvc->nvc_sgp_cmn = cmn;
5865
5866				cmn->nvs_in_use |= (1 << nvc->nvc_ctlr_num);
5867			}
5868		}
5869	}
5870}
5871
5872/*
5873 * nv_sgp_detect
5874 * Read the SGPIO_CSR and SGPIO_CBP values from PCI config space and
5875 * report back whether both were readable.
5876 */
5877static int
5878nv_sgp_detect(ddi_acc_handle_t pci_conf_handle, uint16_t *csrpp,
5879    uint32_t *cbpp)
5880{
5881	/* get the SGPIO_CSRP */
5882	*csrpp = pci_config_get16(pci_conf_handle, SGPIO_CSRP);
5883	if (*csrpp == 0) {
5884		return (NV_FAILURE);
5885	}
5886
5887	/* SGPIO_CSRP is good, get the SGPIO_CBP */
5888	*cbpp = pci_config_get32(pci_conf_handle, SGPIO_CBP);
5889	if (*cbpp == 0) {
5890		return (NV_FAILURE);
5891	}
5892
5893	/* SGPIO_CBP is good, so we must support SGPIO */
5894	return (NV_SUCCESS);
5895}
5896
5897/*
5898 * nv_sgp_init
5899 * Initialize SGPIO.  The process is specified by NVIDIA.
5900 */
5901static int
5902nv_sgp_init(nv_ctl_t *nvc)
5903{
5904	uint32_t status;
5905	int drive_count;
5906
5907	/*
5908	 * if SGPIO status set to SGPIO_STATE_RESET, logic has been
5909	 * reset and needs to be initialized.
5910	 */
5911	status = nv_sgp_csr_read(nvc);
5912	if (SGPIO_CSR_SSTAT(status) == SGPIO_STATE_RESET) {
5913		if (nv_sgp_init_cmd(nvc) == NV_FAILURE) {
5914			/* reset and try again */
5915			nv_sgp_reset(nvc);
5916			if (nv_sgp_init_cmd(nvc) == NV_FAILURE) {
5917				NVLOG((NVDBG_ALWAYS, nvc, NULL,
5918				    "SGPIO init failed"));
5919				return (NV_FAILURE);
5920			}
5921		}
5922	}
5923
5924	/*
5925	 * NVIDIA recommends reading the supported drive count even
5926	 * though they also indicate that it is 4 at this time.
5927	 */
5928	drive_count = SGP_CR0_DRV_CNT(nvc->nvc_sgp_cbp->sgpio_cr0);
5929	if (drive_count != SGPIO_DRV_CNT_VALUE) {
5930		NVLOG((NVDBG_ALWAYS, nvc, NULL,
5931		    "SGPIO reported undocumented drive count - %d",
5932		    drive_count));
5933	}
5934
5935	NVLOG((NVDBG_INIT, nvc, NULL,
5936	    "initialized ctlr: %d csr: 0x%08x",
5937	    nvc->nvc_ctlr_num, nvc->nvc_sgp_csr));
5938
5939	return (NV_SUCCESS);
5940}
5941
5942static void
5943nv_sgp_reset(nv_ctl_t *nvc)
5944{
5945	uint32_t cmd;
5946	uint32_t status;
5947
5948	cmd = SGPIO_CMD_RESET;
5949	nv_sgp_csr_write(nvc, cmd);
5950
5951	status = nv_sgp_csr_read(nvc);
5952
5953	if (SGPIO_CSR_CSTAT(status) != SGPIO_CMD_OK) {
5954		NVLOG((NVDBG_ALWAYS, nvc, NULL,
5955		    "SGPIO reset failed: CSR - 0x%x", status));
5956	}
5957}
5958
5959static int
5960nv_sgp_init_cmd(nv_ctl_t *nvc)
5961{
5962	int seq;
5963	hrtime_t start, end;
5964	uint32_t status;
5965	uint32_t cmd;
5966
5967	/* get the old sequence value */
5968	status = nv_sgp_csr_read(nvc);
5969	seq = SGPIO_CSR_SEQ(status);
5970
5971	/* check the state since we have the info anyway */
5972	if (SGPIO_CSR_SSTAT(status) != SGPIO_STATE_OPERATIONAL) {
5973		NVLOG((NVDBG_ALWAYS, nvc, NULL,
5974		    "SGPIO init_cmd: state not operational"));
5975	}
5976
5977	/* issue command */
5978	cmd = SGPIO_CSR_CMD_SET(SGPIO_CMD_READ_PARAMS);
5979	nv_sgp_csr_write(nvc, cmd);
5980
5981	DTRACE_PROBE2(sgpio__cmd, int, cmd, int, status);
5982
5983	/* poll for completion */
5984	start = gethrtime();
5985	end = start + NV_SGP_CMD_TIMEOUT;
5986	for (;;) {
5987		status = nv_sgp_csr_read(nvc);
5988
5989		/* break on error */
5990		if (SGPIO_CSR_CSTAT(status) == SGPIO_CMD_ERROR)
5991			break;
5992
5993		/* break on command completion (seq changed) */
5994		if (SGPIO_CSR_SEQ(status) != seq) {
5995			if (SGPIO_CSR_CSTAT(status) == SGPIO_CMD_ACTIVE) {
5996				NVLOG((NVDBG_ALWAYS, nvc, NULL,
5997				    "Seq changed but command still active"));
5998			}
5999
6000			break;
6001		}
6002
6003		/* Wait 400 ns and try again */
6004		NV_DELAY_NSEC(400);
6005
6006		if (gethrtime() > end)
6007			break;
6008	}
6009
6010	if (SGPIO_CSR_CSTAT(status) == SGPIO_CMD_OK)
6011		return (NV_SUCCESS);
6012
6013	return (NV_FAILURE);
6014}
6015
6016static int
6017nv_sgp_check_set_cmn(nv_ctl_t *nvc)
6018{
6019	nv_sgp_cmn_t *cmn;
6020
6021	if (nvc->nvc_sgp_cbp == NULL)
6022		return (NV_FAILURE);
6023
6024	/* check to see if Scratch Register is set */
6025	if (nvc->nvc_sgp_cbp->sgpio_sr != 0) {
6026		nvc->nvc_sgp_cmn =
6027		    (nv_sgp_cmn_t *)nvc->nvc_sgp_cbp->sgpio_sr;
6028
6029		if (nvc->nvc_sgp_cmn->nvs_magic != SGPIO_MAGIC)
6030			return (NV_FAILURE);
6031
6032		cmn = nvc->nvc_sgp_cmn;
6033
6034		mutex_enter(&cmn->nvs_slock);
6035		cmn->nvs_in_use |= (1 << nvc->nvc_ctlr_num);
6036		mutex_exit(&cmn->nvs_slock);
6037
6038		return (NV_SUCCESS);
6039	}
6040
6041	return (NV_FAILURE);
6042}
6043
6044/*
6045 * nv_sgp_csr_read
6046 * This is just a 32-bit port read from the value that was obtained from the
6047 * PCI config space.
6048 *
6049 * XXX It was advised to use the in[bwl] function for this, even though they
6050 * are obsolete interfaces.
6051 */
6052static int
6053nv_sgp_csr_read(nv_ctl_t *nvc)
6054{
6055	return (inl(nvc->nvc_sgp_csr));
6056}
6057
6058/*
6059 * nv_sgp_csr_write
6060 * This is just a 32-bit I/O port write.  The port number was obtained from
6061 * the PCI config space.
6062 *
6063 * XXX It was advised to use the out[bwl] function for this, even though they
6064 * are obsolete interfaces.
6065 */
6066static void
6067nv_sgp_csr_write(nv_ctl_t *nvc, uint32_t val)
6068{
6069	outl(nvc->nvc_sgp_csr, val);
6070}
6071
6072/*
6073 * nv_sgp_write_data
6074 * Cause SGPIO to send Command Block data
6075 */
6076static int
6077nv_sgp_write_data(nv_ctl_t *nvc)
6078{
6079	hrtime_t start, end;
6080	uint32_t status;
6081	uint32_t cmd;
6082
6083	/* issue command */
6084	cmd = SGPIO_CSR_CMD_SET(SGPIO_CMD_WRITE_DATA);
6085	nv_sgp_csr_write(nvc, cmd);
6086
6087	/* poll for completion */
6088	start = gethrtime();
6089	end = start + NV_SGP_CMD_TIMEOUT;
6090	for (;;) {
6091		status = nv_sgp_csr_read(nvc);
6092
6093		/* break on error completion */
6094		if (SGPIO_CSR_CSTAT(status) == SGPIO_CMD_ERROR)
6095			break;
6096
6097		/* break on successful completion */
6098		if (SGPIO_CSR_CSTAT(status) == SGPIO_CMD_OK)
6099			break;
6100
6101		/* Wait 400 ns and try again */
6102		NV_DELAY_NSEC(400);
6103
6104		if (gethrtime() > end)
6105			break;
6106	}
6107
6108	if (SGPIO_CSR_CSTAT(status) == SGPIO_CMD_OK)
6109		return (NV_SUCCESS);
6110
6111	return (NV_FAILURE);
6112}
6113
6114/*
6115 * nv_sgp_activity_led_ctl
6116 * This is run as a taskq.  It wakes up at a fixed interval and checks to
6117 * see if any of the activity LEDs need to be changed.
6118 */
6119static void
6120nv_sgp_activity_led_ctl(void *arg)
6121{
6122	nv_ctl_t *nvc = (nv_ctl_t *)arg;
6123	nv_sgp_cmn_t *cmn;
6124	volatile nv_sgp_cb_t *cbp;
6125	clock_t ticks;
6126	uint8_t drv_leds;
6127	uint32_t old_leds;
6128	uint32_t new_led_state;
6129	int i;
6130
6131	cmn = nvc->nvc_sgp_cmn;
6132	cbp = nvc->nvc_sgp_cbp;
6133
6134	do {
6135		/* save off the old state of all of the LEDs */
6136		old_leds = cbp->sgpio0_tr;
6137
6138		DTRACE_PROBE3(sgpio__activity__state,
6139		    int, cmn->nvs_connected, int, cmn->nvs_activity,
6140		    int, old_leds);
6141
6142		new_led_state = 0;
6143
6144		/* for each drive */
6145		for (i = 0; i < SGPIO_DRV_CNT_VALUE; i++) {
6146
6147			/* get the current state of the LEDs for the drive */
6148			drv_leds = SGPIO0_TR_DRV(old_leds, i);
6149
6150			if ((cmn->nvs_connected & (1 << i)) == 0) {
6151				/* if not connected, turn off activity */
6152				drv_leds &= ~TR_ACTIVE_MASK;
6153				drv_leds |= TR_ACTIVE_SET(TR_ACTIVE_DISABLE);
6154
6155				new_led_state &= SGPIO0_TR_DRV_CLR(i);
6156				new_led_state |=
6157				    SGPIO0_TR_DRV_SET(drv_leds, i);
6158
6159				continue;
6160			}
6161
6162			if ((cmn->nvs_activity & (1 << i)) == 0) {
6163				/* connected, but not active */
6164				drv_leds &= ~TR_ACTIVE_MASK;
6165				drv_leds |= TR_ACTIVE_SET(TR_ACTIVE_ENABLE);
6166
6167				new_led_state &= SGPIO0_TR_DRV_CLR(i);
6168				new_led_state |=
6169				    SGPIO0_TR_DRV_SET(drv_leds, i);
6170
6171				continue;
6172			}
6173
6174			/* connected and active */
6175			if (TR_ACTIVE(drv_leds) == TR_ACTIVE_ENABLE) {
6176				/* was enabled, so disable */
6177				drv_leds &= ~TR_ACTIVE_MASK;
6178				drv_leds |=
6179				    TR_ACTIVE_SET(TR_ACTIVE_DISABLE);
6180
6181				new_led_state &= SGPIO0_TR_DRV_CLR(i);
6182				new_led_state |=
6183				    SGPIO0_TR_DRV_SET(drv_leds, i);
6184			} else {
6185				/* was disabled, so enable */
6186				drv_leds &= ~TR_ACTIVE_MASK;
6187				drv_leds |= TR_ACTIVE_SET(TR_ACTIVE_ENABLE);
6188
6189				new_led_state &= SGPIO0_TR_DRV_CLR(i);
6190				new_led_state |=
6191				    SGPIO0_TR_DRV_SET(drv_leds, i);
6192			}
6193
6194			/*
6195			 * clear the activity bit
6196			 * if there is drive activity again within the
6197			 * loop interval (now 1/16 second), nvs_activity
6198			 * will be reset and the "connected and active"
6199			 * condition above will cause the LED to blink
6200			 * off and on at the loop interval rate.  The
6201			 * rate may be increased (interval shortened) as
6202			 * long as it is not more than 1/30 second.
6203			 */
6204			mutex_enter(&cmn->nvs_slock);
6205			cmn->nvs_activity &= ~(1 << i);
6206			mutex_exit(&cmn->nvs_slock);
6207		}
6208
6209		DTRACE_PROBE1(sgpio__new__led__state, int, new_led_state);
6210
6211		/* write out LED values */
6212
6213		mutex_enter(&cmn->nvs_slock);
6214		cbp->sgpio0_tr &= ~TR_ACTIVE_MASK_ALL;
6215		cbp->sgpio0_tr |= new_led_state;
6216		cbp->sgpio_cr0 = SGP_CR0_ENABLE_MASK;
6217		mutex_exit(&cmn->nvs_slock);
6218
6219		if (nv_sgp_write_data(nvc) == NV_FAILURE) {
6220			NVLOG((NVDBG_VERBOSE, nvc, NULL,
6221			    "nv_sgp_write_data failure updating active LED"));
6222		}
6223
6224		/* now rest for the interval */
6225		mutex_enter(&cmn->nvs_tlock);
6226		ticks = drv_usectohz(cmn->nvs_taskq_delay);
6227		if (ticks > 0)
6228			(void) cv_timedwait(&cmn->nvs_cv, &cmn->nvs_tlock,
6229			    ddi_get_lbolt() + ticks);
6230		mutex_exit(&cmn->nvs_tlock);
6231	} while (ticks > 0);
6232}
6233
6234/*
6235 * nv_sgp_drive_connect
6236 * Set the flag used to indicate that the drive is attached to the HBA.
6237 * Used to let the taskq know that it should turn the Activity LED on.
6238 */
6239static void
6240nv_sgp_drive_connect(nv_ctl_t *nvc, int drive)
6241{
6242	nv_sgp_cmn_t *cmn;
6243
6244	if (nv_sgp_check_set_cmn(nvc) == NV_FAILURE)
6245		return;
6246	cmn = nvc->nvc_sgp_cmn;
6247
6248	mutex_enter(&cmn->nvs_slock);
6249	cmn->nvs_connected |= (1 << drive);
6250	mutex_exit(&cmn->nvs_slock);
6251}
6252
6253/*
6254 * nv_sgp_drive_disconnect
6255 * Clears the flag used to indicate that the drive is no longer attached
6256 * to the HBA.  Used to let the taskq know that it should turn the
6257 * Activity LED off.  The flag that indicates that the drive is in use is
6258 * also cleared.
6259 */
6260static void
6261nv_sgp_drive_disconnect(nv_ctl_t *nvc, int drive)
6262{
6263	nv_sgp_cmn_t *cmn;
6264
6265	if (nv_sgp_check_set_cmn(nvc) == NV_FAILURE)
6266		return;
6267	cmn = nvc->nvc_sgp_cmn;
6268
6269	mutex_enter(&cmn->nvs_slock);
6270	cmn->nvs_connected &= ~(1 << drive);
6271	cmn->nvs_activity &= ~(1 << drive);
6272	mutex_exit(&cmn->nvs_slock);
6273}
6274
6275/*
6276 * nv_sgp_drive_active
6277 * Sets the flag used to indicate that the drive has been accessed and the
6278 * LED should be flicked off, then on.  It is cleared at a fixed time
6279 * interval by the LED taskq and set by the sata command start.
6280 */
6281static void
6282nv_sgp_drive_active(nv_ctl_t *nvc, int drive)
6283{
6284	nv_sgp_cmn_t *cmn;
6285
6286	if (nv_sgp_check_set_cmn(nvc) == NV_FAILURE)
6287		return;
6288	cmn = nvc->nvc_sgp_cmn;
6289
6290	DTRACE_PROBE1(sgpio__active, int, drive);
6291
6292	mutex_enter(&cmn->nvs_slock);
6293	cmn->nvs_connected |= (1 << drive);
6294	cmn->nvs_activity |= (1 << drive);
6295	mutex_exit(&cmn->nvs_slock);
6296}
6297
6298
6299/*
6300 * nv_sgp_locate
6301 * Turns the Locate/OK2RM LED off or on for a particular drive.  State is
6302 * maintained in the SGPIO Command Block.
6303 */
6304static void
6305nv_sgp_locate(nv_ctl_t *nvc, int drive, int value)
6306{
6307	uint8_t leds;
6308	volatile nv_sgp_cb_t *cb = nvc->nvc_sgp_cbp;
6309	nv_sgp_cmn_t *cmn;
6310
6311	if (nv_sgp_check_set_cmn(nvc) == NV_FAILURE)
6312		return;
6313	cmn = nvc->nvc_sgp_cmn;
6314
6315	if ((drive < 0) || (drive >= SGPIO_DRV_CNT_VALUE))
6316		return;
6317
6318	DTRACE_PROBE2(sgpio__locate, int, drive, int, value);
6319
6320	mutex_enter(&cmn->nvs_slock);
6321
6322	leds = SGPIO0_TR_DRV(cb->sgpio0_tr, drive);
6323
6324	leds &= ~TR_LOCATE_MASK;
6325	leds |= TR_LOCATE_SET(value);
6326
6327	cb->sgpio0_tr &= SGPIO0_TR_DRV_CLR(drive);
6328	cb->sgpio0_tr |= SGPIO0_TR_DRV_SET(leds, drive);
6329
6330	cb->sgpio_cr0 = SGP_CR0_ENABLE_MASK;
6331
6332	mutex_exit(&cmn->nvs_slock);
6333
6334	if (nv_sgp_write_data(nvc) == NV_FAILURE) {
6335		nv_cmn_err(CE_WARN, nvc, NULL,
6336		    "!nv_sgp_write_data failure updating OK2RM/Locate LED");
6337	}
6338}
6339
6340/*
6341 * nv_sgp_error
6342 * Turns the Error/Failure LED off or on for a particular drive.  State is
6343 * maintained in the SGPIO Command Block.
6344 */
6345static void
6346nv_sgp_error(nv_ctl_t *nvc, int drive, int value)
6347{
6348	uint8_t leds;
6349	volatile nv_sgp_cb_t *cb = nvc->nvc_sgp_cbp;
6350	nv_sgp_cmn_t *cmn;
6351
6352	if (nv_sgp_check_set_cmn(nvc) == NV_FAILURE)
6353		return;
6354	cmn = nvc->nvc_sgp_cmn;
6355
6356	if ((drive < 0) || (drive >= SGPIO_DRV_CNT_VALUE))
6357		return;
6358
6359	DTRACE_PROBE2(sgpio__error, int, drive, int, value);
6360
6361	mutex_enter(&cmn->nvs_slock);
6362
6363	leds = SGPIO0_TR_DRV(cb->sgpio0_tr, drive);
6364
6365	leds &= ~TR_ERROR_MASK;
6366	leds |= TR_ERROR_SET(value);
6367
6368	cb->sgpio0_tr &= SGPIO0_TR_DRV_CLR(drive);
6369	cb->sgpio0_tr |= SGPIO0_TR_DRV_SET(leds, drive);
6370
6371	cb->sgpio_cr0 = SGP_CR0_ENABLE_MASK;
6372
6373	mutex_exit(&cmn->nvs_slock);
6374
6375	if (nv_sgp_write_data(nvc) == NV_FAILURE) {
6376		nv_cmn_err(CE_WARN, nvc, NULL,
6377		    "!nv_sgp_write_data failure updating Fail/Error LED");
6378	}
6379}
6380
6381static void
6382nv_sgp_cleanup(nv_ctl_t *nvc)
6383{
6384	int drive;
6385	uint8_t drv_leds;
6386	uint32_t led_state;
6387	volatile nv_sgp_cb_t *cb = nvc->nvc_sgp_cbp;
6388	nv_sgp_cmn_t *cmn = nvc->nvc_sgp_cmn;
6389	extern void psm_unmap_phys(caddr_t, size_t);
6390
6391	/*
6392	 * If the SGPIO command block isn't mapped or the shared data
6393	 * structure isn't present in this instance, there isn't much that
6394	 * can be cleaned up.
6395	 */
6396	if ((cb == NULL) || (cmn == NULL))
6397		return;
6398
6399	/* turn off activity LEDs for this controller */
6400	drv_leds = TR_ACTIVE_SET(TR_ACTIVE_DISABLE);
6401
6402	/* get the existing LED state */
6403	led_state = cb->sgpio0_tr;
6404
6405	/* turn off port 0 */
6406	drive = SGP_CTLR_PORT_TO_DRV(nvc->nvc_ctlr_num, 0);
6407	led_state &= SGPIO0_TR_DRV_CLR(drive);
6408	led_state |= SGPIO0_TR_DRV_SET(drv_leds, drive);
6409
6410	/* turn off port 1 */
6411	drive = SGP_CTLR_PORT_TO_DRV(nvc->nvc_ctlr_num, 1);
6412	led_state &= SGPIO0_TR_DRV_CLR(drive);
6413	led_state |= SGPIO0_TR_DRV_SET(drv_leds, drive);
6414
6415	/* set the new led state, which should turn off this ctrl's LEDs */
6416	cb->sgpio_cr0 = SGP_CR0_ENABLE_MASK;
6417	(void) nv_sgp_write_data(nvc);
6418
6419	/* clear the controller's in use bit */
6420	mutex_enter(&cmn->nvs_slock);
6421	cmn->nvs_in_use &= ~(1 << nvc->nvc_ctlr_num);
6422	mutex_exit(&cmn->nvs_slock);
6423
6424	if (cmn->nvs_in_use == 0) {
6425		/* if all "in use" bits cleared, take everything down */
6426
6427		if (cmn->nvs_taskq != NULL) {
6428			/* allow activity taskq to exit */
6429			cmn->nvs_taskq_delay = 0;
6430			cv_broadcast(&cmn->nvs_cv);
6431
6432			/* then destroy it */
6433			ddi_taskq_destroy(cmn->nvs_taskq);
6434		}
6435
6436		/* turn off all of the LEDs */
6437		cb->sgpio0_tr = 0;
6438		cb->sgpio_cr0 = SGP_CR0_ENABLE_MASK;
6439		(void) nv_sgp_write_data(nvc);
6440
6441		cb->sgpio_sr = NULL;
6442
6443		/* free resources */
6444		cv_destroy(&cmn->nvs_cv);
6445		mutex_destroy(&cmn->nvs_tlock);
6446		mutex_destroy(&cmn->nvs_slock);
6447
6448		kmem_free(nvc->nvc_sgp_cmn, sizeof (nv_sgp_cmn_t));
6449	}
6450
6451	nvc->nvc_sgp_cmn = NULL;
6452
6453	/* unmap the SGPIO Command Block */
6454	psm_unmap_phys((caddr_t)nvc->nvc_sgp_cbp, sizeof (nv_sgp_cb_t));
6455}
6456#endif	/* SGPIO_SUPPORT */
6457