nv_sata.c revision 6539:bd91313a1d3d
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22/*
23 * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
24 * Use is subject to license terms.
25 */
26
27#pragma ident	"%Z%%M%	%I%	%E% SMI"
28
29/*
30 *
31 * nv_sata is a combo SATA HBA driver for ck804/mcp55 based chipsets.
32 *
33 * NCQ
34 * ---
35 *
36 * A portion of the NCQ is in place, but is incomplete.  NCQ is disabled
37 * and is likely to be revisited in the future.
38 *
39 *
40 * Power Management
41 * ----------------
42 *
43 * Normally power management would be responsible for ensuring the device
44 * is quiescent and then changing power states to the device, such as
45 * powering down parts or all of the device.  mcp55/ck804 is unique in
46 * that it is only available as part of a larger southbridge chipset, so
47 * removing power to the device isn't possible.  Switches to control
48 * power management states D0/D3 in the PCI configuration space appear to
49 * be supported but changes to these states are apparently are ignored.
50 * The only further PM that the driver _could_ do is shut down the PHY,
51 * but in order to deliver the first rev of the driver sooner than later,
52 * that will be deferred until some future phase.
53 *
54 * Since the driver currently will not directly change any power state to
55 * the device, no power() entry point will be required.  However, it is
56 * possible that in ACPI power state S3, aka suspend to RAM, that power
57 * can be removed to the device, and the driver cannot rely on BIOS to
58 * have reset any state.  For the time being, there is no known
59 * non-default configurations that need to be programmed.  This judgement
60 * is based on the port of the legacy ata driver not having any such
61 * functionality and based on conversations with the PM team.  If such a
62 * restoration is later deemed necessary it can be incorporated into the
63 * DDI_RESUME processing.
64 *
65 */
66
67#include <sys/scsi/scsi.h>
68#include <sys/pci.h>
69#include <sys/byteorder.h>
70#include <sys/sata/sata_hba.h>
71#include <sys/sata/adapters/nv_sata/nv_sata.h>
72#include <sys/disp.h>
73#include <sys/note.h>
74#include <sys/promif.h>
75
76
77/*
78 * Function prototypes for driver entry points
79 */
80static int nv_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
81static int nv_detach(dev_info_t *dip, ddi_detach_cmd_t cmd);
82static int nv_getinfo(dev_info_t *dip, ddi_info_cmd_t infocmd,
83    void *arg, void **result);
84
85/*
86 * Function prototypes for entry points from sata service module
87 * These functions are distinguished from other local functions
88 * by the prefix "nv_sata_"
89 */
90static int nv_sata_start(dev_info_t *dip, sata_pkt_t *spkt);
91static int nv_sata_abort(dev_info_t *dip, sata_pkt_t *spkt, int);
92static int nv_sata_reset(dev_info_t *dip, sata_device_t *sd);
93static int nv_sata_activate(dev_info_t *dip, sata_device_t *sd);
94static int nv_sata_deactivate(dev_info_t *dip, sata_device_t *sd);
95
96/*
97 * Local function prototypes
98 */
99static uint_t mcp55_intr(caddr_t arg1, caddr_t arg2);
100static uint_t mcp04_intr(caddr_t arg1, caddr_t arg2);
101static int nv_add_legacy_intrs(nv_ctl_t *nvc);
102#ifdef NV_MSI_SUPPORTED
103static int nv_add_msi_intrs(nv_ctl_t *nvc);
104#endif
105static void nv_rem_intrs(nv_ctl_t *nvc);
106static int nv_start_common(nv_port_t *nvp, sata_pkt_t *spkt);
107static int nv_start_nodata(nv_port_t *nvp, int slot);
108static void nv_intr_nodata(nv_port_t *nvp, nv_slot_t *spkt);
109static int nv_start_pio_in(nv_port_t *nvp, int slot);
110static int nv_start_pio_out(nv_port_t *nvp, int slot);
111static void nv_intr_pio_in(nv_port_t *nvp, nv_slot_t *spkt);
112static void nv_intr_pio_out(nv_port_t *nvp, nv_slot_t *spkt);
113static int nv_start_dma(nv_port_t *nvp, int slot);
114static void nv_intr_dma(nv_port_t *nvp, struct nv_slot *spkt);
115static void nv_log(uint_t flag, nv_ctl_t *nvc, nv_port_t *nvp, char *fmt, ...);
116static void nv_uninit_ctl(nv_ctl_t *nvc);
117static void mcp55_reg_init(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle);
118static void mcp04_reg_init(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle);
119static void nv_uninit_port(nv_port_t *nvp);
120static int nv_init_port(nv_port_t *nvp);
121static int nv_init_ctl(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle);
122static int mcp55_packet_complete_intr(nv_ctl_t *nvc, nv_port_t *nvp);
123#ifdef NCQ
124static int mcp55_dma_setup_intr(nv_ctl_t *nvc, nv_port_t *nvp);
125#endif
126static void nv_start_dma_engine(nv_port_t *nvp, int slot);
127static void nv_port_state_change(nv_port_t *nvp, int event, uint8_t addr_type,
128    int state);
129static boolean_t nv_check_link(uint32_t sstatus);
130static void nv_common_reg_init(nv_ctl_t *nvc);
131static void mcp04_intr_process(nv_ctl_t *nvc, uint8_t intr_status);
132static void nv_reset(nv_port_t *nvp);
133static void nv_complete_io(nv_port_t *nvp,  sata_pkt_t *spkt, int slot);
134static void nv_timeout(void *);
135static int nv_poll_wait(nv_port_t *nvp, sata_pkt_t *spkt);
136static void nv_cmn_err(int ce, nv_ctl_t *nvc, nv_port_t *nvp, char *fmt, ...);
137static void nv_read_signature(nv_port_t *nvp);
138static void mcp55_set_intr(nv_port_t *nvp, int flag);
139static void mcp04_set_intr(nv_port_t *nvp, int flag);
140static void nv_resume(nv_port_t *nvp);
141static void nv_suspend(nv_port_t *nvp);
142static int nv_start_sync(nv_port_t *nvp, sata_pkt_t *spkt);
143static int nv_abort_active(nv_port_t *nvp, sata_pkt_t *spkt, int abort_reason);
144static void nv_copy_registers(nv_port_t *nvp, sata_device_t *sd,
145    sata_pkt_t *spkt);
146static void nv_report_add_remove(nv_port_t *nvp, int flags);
147static int nv_start_async(nv_port_t *nvp, sata_pkt_t *spkt);
148static int nv_wait3(nv_port_t *nvp, uchar_t onbits1, uchar_t offbits1,
149    uchar_t failure_onbits2, uchar_t failure_offbits2,
150    uchar_t failure_onbits3, uchar_t failure_offbits3,
151    uint_t timeout_usec, int type_wait);
152static int nv_wait(nv_port_t *nvp, uchar_t onbits, uchar_t offbits,
153    uint_t timeout_usec, int type_wait);
154
155
156/*
157 * DMA attributes for the data buffer for x86.  dma_attr_burstsizes is unused.
158 * Verify if needed if ported to other ISA.
159 */
160static ddi_dma_attr_t buffer_dma_attr = {
161	DMA_ATTR_V0,		/* dma_attr_version */
162	0,			/* dma_attr_addr_lo: lowest bus address */
163	0xffffffffull,		/* dma_attr_addr_hi: */
164	NV_BM_64K_BOUNDARY - 1,	/* dma_attr_count_max i.e for one cookie */
165	4,			/* dma_attr_align */
166	1,			/* dma_attr_burstsizes. */
167	1,			/* dma_attr_minxfer */
168	0xffffffffull,		/* dma_attr_max xfer including all cookies */
169	0xffffffffull,		/* dma_attr_seg */
170	NV_DMA_NSEGS,		/* dma_attr_sgllen */
171	512,			/* dma_attr_granular */
172	0,			/* dma_attr_flags */
173};
174
175
176/*
177 * DMA attributes for PRD tables
178 */
179ddi_dma_attr_t nv_prd_dma_attr = {
180	DMA_ATTR_V0,		/* dma_attr_version */
181	0,			/* dma_attr_addr_lo */
182	0xffffffffull,		/* dma_attr_addr_hi */
183	NV_BM_64K_BOUNDARY - 1,	/* dma_attr_count_max */
184	4,			/* dma_attr_align */
185	1,			/* dma_attr_burstsizes */
186	1,			/* dma_attr_minxfer */
187	NV_BM_64K_BOUNDARY,	/* dma_attr_maxxfer */
188	NV_BM_64K_BOUNDARY - 1,	/* dma_attr_seg */
189	1,			/* dma_attr_sgllen */
190	1,			/* dma_attr_granular */
191	0			/* dma_attr_flags */
192};
193
194/*
195 * Device access attributes
196 */
197static ddi_device_acc_attr_t accattr = {
198    DDI_DEVICE_ATTR_V0,
199    DDI_STRUCTURE_LE_ACC,
200    DDI_STRICTORDER_ACC
201};
202
203
204static struct dev_ops nv_dev_ops = {
205	DEVO_REV,		/* devo_rev */
206	0,			/* refcnt  */
207	nv_getinfo,		/* info */
208	nulldev,		/* identify */
209	nulldev,		/* probe */
210	nv_attach,		/* attach */
211	nv_detach,		/* detach */
212	nodev,			/* no reset */
213	(struct cb_ops *)0,	/* driver operations */
214	NULL,			/* bus operations */
215	NULL			/* power */
216};
217
218static sata_tran_hotplug_ops_t nv_hotplug_ops;
219
220extern struct mod_ops mod_driverops;
221
222static  struct modldrv modldrv = {
223	&mod_driverops,	/* driverops */
224	"Nvidia ck804/mcp55 HBA v%I%",
225	&nv_dev_ops,	/* driver ops */
226};
227
228static  struct modlinkage modlinkage = {
229	MODREV_1,
230	&modldrv,
231	NULL
232};
233
234
235/*
236 * wait between checks of reg status
237 */
238int nv_usec_delay = NV_WAIT_REG_CHECK;
239
240/*
241 * The following is needed for nv_vcmn_err()
242 */
243static kmutex_t nv_log_mutex; /* protects nv_log_buf */
244static char nv_log_buf[NV_STRING_512];
245int nv_debug_flags = NVDBG_ALWAYS;
246int nv_log_to_console = B_FALSE;
247
248int nv_log_delay = 0;
249int nv_prom_print = B_FALSE;
250
251/*
252 * for debugging
253 */
254#ifdef DEBUG
255int ncq_commands = 0;
256int non_ncq_commands = 0;
257#endif
258
259/*
260 * Opaque state pointer to be initialized by ddi_soft_state_init()
261 */
262static void *nv_statep	= NULL;
263
264
265static sata_tran_hotplug_ops_t nv_hotplug_ops = {
266	SATA_TRAN_HOTPLUG_OPS_REV_1,	/* structure version */
267	nv_sata_activate,	/* activate port. cfgadm -c connect */
268	nv_sata_deactivate	/* deactivate port. cfgadm -c disconnect */
269};
270
271
272/*
273 *  nv module initialization
274 */
275int
276_init(void)
277{
278	int	error;
279
280	error = ddi_soft_state_init(&nv_statep, sizeof (nv_ctl_t), 0);
281
282	if (error != 0) {
283
284		return (error);
285	}
286
287	mutex_init(&nv_log_mutex, NULL, MUTEX_DRIVER, NULL);
288
289	if ((error = sata_hba_init(&modlinkage)) != 0) {
290		ddi_soft_state_fini(&nv_statep);
291		mutex_destroy(&nv_log_mutex);
292
293		return (error);
294	}
295
296	error = mod_install(&modlinkage);
297	if (error != 0) {
298		sata_hba_fini(&modlinkage);
299		ddi_soft_state_fini(&nv_statep);
300		mutex_destroy(&nv_log_mutex);
301
302		return (error);
303	}
304
305	return (error);
306}
307
308
309/*
310 * nv module uninitialize
311 */
312int
313_fini(void)
314{
315	int	error;
316
317	error = mod_remove(&modlinkage);
318
319	if (error != 0) {
320		return (error);
321	}
322
323	/*
324	 * remove the resources allocated in _init()
325	 */
326	mutex_destroy(&nv_log_mutex);
327	sata_hba_fini(&modlinkage);
328	ddi_soft_state_fini(&nv_statep);
329
330	return (error);
331}
332
333
334/*
335 * nv _info entry point
336 */
337int
338_info(struct modinfo *modinfop)
339{
340	return (mod_info(&modlinkage, modinfop));
341}
342
343
344/*
345 * these wrappers for ddi_{get,put}8 are for observability
346 * with dtrace
347 */
348#ifdef DEBUG
349
350static void
351nv_put8(ddi_acc_handle_t handle, uint8_t *dev_addr, uint8_t value)
352{
353	ddi_put8(handle, dev_addr, value);
354}
355
356static void
357nv_put32(ddi_acc_handle_t handle, uint32_t *dev_addr, uint32_t value)
358{
359	ddi_put32(handle, dev_addr, value);
360}
361
362static uint32_t
363nv_get32(ddi_acc_handle_t handle, uint32_t *dev_addr)
364{
365	return (ddi_get32(handle, dev_addr));
366}
367
368static void
369nv_put16(ddi_acc_handle_t handle, uint16_t *dev_addr, uint16_t value)
370{
371	ddi_put16(handle, dev_addr, value);
372}
373
374static uint16_t
375nv_get16(ddi_acc_handle_t handle, uint16_t *dev_addr)
376{
377	return (ddi_get16(handle, dev_addr));
378}
379
380static uint8_t
381nv_get8(ddi_acc_handle_t handle, uint8_t *dev_addr)
382{
383	return (ddi_get8(handle, dev_addr));
384}
385
386#else
387
388#define	nv_put8 ddi_put8
389#define	nv_put32 ddi_put32
390#define	nv_get32 ddi_get32
391#define	nv_put16 ddi_put16
392#define	nv_get16 ddi_get16
393#define	nv_get8 ddi_get8
394
395#endif
396
397
398/*
399 * Driver attach
400 */
401static int
402nv_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
403{
404	int status, attach_state, intr_types, bar, i, command;
405	int inst = ddi_get_instance(dip);
406	ddi_acc_handle_t pci_conf_handle;
407	nv_ctl_t *nvc;
408	uint8_t subclass;
409	uint32_t reg32;
410
411	switch (cmd) {
412
413	case DDI_ATTACH:
414
415		NVLOG((NVDBG_INIT, NULL, NULL,
416		    "nv_attach(): DDI_ATTACH inst %d", inst));
417
418		attach_state = ATTACH_PROGRESS_NONE;
419
420		status = ddi_soft_state_zalloc(nv_statep, inst);
421
422		if (status != DDI_SUCCESS) {
423			break;
424		}
425
426		nvc = ddi_get_soft_state(nv_statep, inst);
427
428		nvc->nvc_dip = dip;
429
430		attach_state |= ATTACH_PROGRESS_STATEP_ALLOC;
431
432		if (pci_config_setup(dip, &pci_conf_handle) == DDI_SUCCESS) {
433			nvc->nvc_revid = pci_config_get8(pci_conf_handle,
434			    PCI_CONF_REVID);
435			NVLOG((NVDBG_INIT, NULL, NULL,
436			    "inst %d: silicon revid is %x nv_debug_flags=%x",
437			    inst, nvc->nvc_revid, nv_debug_flags));
438		} else {
439			break;
440		}
441
442		attach_state |= ATTACH_PROGRESS_CONF_HANDLE;
443
444		/*
445		 * If a device is attached after a suspend/resume, sometimes
446		 * the command register is zero, as it might not be set by
447		 * BIOS or a parent.  Set it again here.
448		 */
449		command = pci_config_get16(pci_conf_handle, PCI_CONF_COMM);
450
451		if (command == 0) {
452			cmn_err(CE_WARN, "nv_sata%d: restoring PCI command"
453			    " register", inst);
454			pci_config_put16(pci_conf_handle, PCI_CONF_COMM,
455			    PCI_COMM_IO|PCI_COMM_MAE|PCI_COMM_ME);
456		}
457
458		subclass = pci_config_get8(pci_conf_handle, PCI_CONF_SUBCLASS);
459
460		if (subclass & PCI_MASS_RAID) {
461			cmn_err(CE_WARN,
462			    "attach failed: RAID mode not supported");
463			break;
464		}
465
466		/*
467		 * the 6 bars of the controller are:
468		 * 0: port 0 task file
469		 * 1: port 0 status
470		 * 2: port 1 task file
471		 * 3: port 1 status
472		 * 4: bus master for both ports
473		 * 5: extended registers for SATA features
474		 */
475		for (bar = 0; bar < 6; bar++) {
476			status = ddi_regs_map_setup(dip, bar + 1,
477			    (caddr_t *)&nvc->nvc_bar_addr[bar], 0, 0, &accattr,
478			    &nvc->nvc_bar_hdl[bar]);
479
480			if (status != DDI_SUCCESS) {
481				NVLOG((NVDBG_INIT, nvc, NULL,
482				    "ddi_regs_map_setup failure for bar"
483				    " %d status = %d", bar, status));
484				break;
485			}
486		}
487
488		attach_state |= ATTACH_PROGRESS_BARS;
489
490		/*
491		 * initialize controller and driver core
492		 */
493		status = nv_init_ctl(nvc, pci_conf_handle);
494
495		if (status == NV_FAILURE) {
496			NVLOG((NVDBG_INIT, nvc, NULL, "nv_init_ctl failed"));
497
498			break;
499		}
500
501		attach_state |= ATTACH_PROGRESS_CTL_SETUP;
502
503		/*
504		 * initialize mutexes
505		 */
506		mutex_init(&nvc->nvc_mutex, NULL, MUTEX_DRIVER,
507		    DDI_INTR_PRI(nvc->nvc_intr_pri));
508
509		attach_state |= ATTACH_PROGRESS_MUTEX_INIT;
510
511		/*
512		 * get supported interrupt types
513		 */
514		if (ddi_intr_get_supported_types(dip, &intr_types) !=
515		    DDI_SUCCESS) {
516			nv_cmn_err(CE_WARN, nvc, NULL,
517			    "!ddi_intr_get_supported_types failed");
518			NVLOG((NVDBG_INIT, nvc, NULL,
519			    "interrupt supported types failed"));
520
521			break;
522		}
523
524		NVLOG((NVDBG_INIT, nvc, NULL,
525		    "ddi_intr_get_supported_types() returned: 0x%x",
526		    intr_types));
527
528#ifdef NV_MSI_SUPPORTED
529		if (intr_types & DDI_INTR_TYPE_MSI) {
530			NVLOG((NVDBG_INIT, nvc, NULL,
531			    "using MSI interrupt type"));
532
533			/*
534			 * Try MSI first, but fall back to legacy if MSI
535			 * attach fails
536			 */
537			if (nv_add_msi_intrs(nvc) == DDI_SUCCESS) {
538				nvc->nvc_intr_type = DDI_INTR_TYPE_MSI;
539				attach_state |= ATTACH_PROGRESS_INTR_ADDED;
540				NVLOG((NVDBG_INIT, nvc, NULL,
541				    "MSI interrupt setup done"));
542			} else {
543				nv_cmn_err(CE_CONT, nvc, NULL,
544				    "!MSI registration failed "
545				    "will try Legacy interrupts");
546			}
547		}
548#endif
549
550		/*
551		 * Either the MSI interrupt setup has failed or only
552		 * the fixed interrupts are available on the system.
553		 */
554		if (!(attach_state & ATTACH_PROGRESS_INTR_ADDED) &&
555		    (intr_types & DDI_INTR_TYPE_FIXED)) {
556
557			NVLOG((NVDBG_INIT, nvc, NULL,
558			    "using Legacy interrupt type"));
559
560			if (nv_add_legacy_intrs(nvc) == DDI_SUCCESS) {
561				nvc->nvc_intr_type = DDI_INTR_TYPE_FIXED;
562				attach_state |= ATTACH_PROGRESS_INTR_ADDED;
563				NVLOG((NVDBG_INIT, nvc, NULL,
564				    "Legacy interrupt setup done"));
565			} else {
566				nv_cmn_err(CE_WARN, nvc, NULL,
567				    "!legacy interrupt setup failed");
568				NVLOG((NVDBG_INIT, nvc, NULL,
569				    "legacy interrupt setup failed"));
570				break;
571			}
572		}
573
574		if (!(attach_state & ATTACH_PROGRESS_INTR_ADDED)) {
575			NVLOG((NVDBG_INIT, nvc, NULL,
576			    "no interrupts registered"));
577			break;
578		}
579
580		/*
581		 * attach to sata module
582		 */
583		if (sata_hba_attach(nvc->nvc_dip,
584		    &nvc->nvc_sata_hba_tran,
585		    DDI_ATTACH) != DDI_SUCCESS) {
586			attach_state |= ATTACH_PROGRESS_SATA_MODULE;
587
588			break;
589		}
590
591		pci_config_teardown(&pci_conf_handle);
592
593		NVLOG((NVDBG_INIT, nvc, NULL, "nv_attach DDI_SUCCESS"));
594
595		return (DDI_SUCCESS);
596
597	case DDI_RESUME:
598
599		nvc = ddi_get_soft_state(nv_statep, inst);
600
601		NVLOG((NVDBG_INIT, nvc, NULL,
602		    "nv_attach(): DDI_RESUME inst %d", inst));
603
604		if (pci_config_setup(dip, &pci_conf_handle) != DDI_SUCCESS) {
605			return (DDI_FAILURE);
606		}
607
608		/*
609		 * If a device is attached after a suspend/resume, sometimes
610		 * the command register is zero, as it might not be set by
611		 * BIOS or a parent.  Set it again here.
612		 */
613		command = pci_config_get16(pci_conf_handle, PCI_CONF_COMM);
614
615		if (command == 0) {
616			pci_config_put16(pci_conf_handle, PCI_CONF_COMM,
617			    PCI_COMM_IO|PCI_COMM_MAE|PCI_COMM_ME);
618		}
619
620		/*
621		 * Need to set bit 2 to 1 at config offset 0x50
622		 * to enable access to the bar5 registers.
623		 */
624		reg32 = pci_config_get32(pci_conf_handle, NV_SATA_CFG_20);
625
626		if ((reg32 & NV_BAR5_SPACE_EN) != NV_BAR5_SPACE_EN) {
627			pci_config_put32(pci_conf_handle, NV_SATA_CFG_20,
628			    reg32 | NV_BAR5_SPACE_EN);
629		}
630
631		nvc->nvc_state &= ~NV_CTRL_SUSPEND;
632
633		for (i = 0; i < NV_MAX_PORTS(nvc); i++) {
634			nv_resume(&(nvc->nvc_port[i]));
635		}
636
637		pci_config_teardown(&pci_conf_handle);
638
639		return (DDI_SUCCESS);
640
641	default:
642		return (DDI_FAILURE);
643	}
644
645
646	/*
647	 * DDI_ATTACH failure path starts here
648	 */
649
650	if (attach_state & ATTACH_PROGRESS_INTR_ADDED) {
651		nv_rem_intrs(nvc);
652	}
653
654	if (attach_state & ATTACH_PROGRESS_SATA_MODULE) {
655		/*
656		 * Remove timers
657		 */
658		int port = 0;
659		nv_port_t *nvp;
660
661		for (; port < NV_MAX_PORTS(nvc); port++) {
662			nvp = &(nvc->nvc_port[port]);
663			if (nvp->nvp_timeout_id != 0) {
664				(void) untimeout(nvp->nvp_timeout_id);
665			}
666		}
667	}
668
669	if (attach_state & ATTACH_PROGRESS_MUTEX_INIT) {
670		mutex_destroy(&nvc->nvc_mutex);
671	}
672
673	if (attach_state & ATTACH_PROGRESS_CTL_SETUP) {
674		nv_uninit_ctl(nvc);
675	}
676
677	if (attach_state & ATTACH_PROGRESS_BARS) {
678		while (--bar >= 0) {
679			ddi_regs_map_free(&nvc->nvc_bar_hdl[bar]);
680		}
681	}
682
683	if (attach_state & ATTACH_PROGRESS_STATEP_ALLOC) {
684		ddi_soft_state_free(nv_statep, inst);
685	}
686
687	if (attach_state & ATTACH_PROGRESS_CONF_HANDLE) {
688		pci_config_teardown(&pci_conf_handle);
689	}
690
691	cmn_err(CE_WARN, "nv_sata%d attach failed", inst);
692
693	return (DDI_FAILURE);
694}
695
696
697static int
698nv_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
699{
700	int i, port, inst = ddi_get_instance(dip);
701	nv_ctl_t *nvc;
702	nv_port_t *nvp;
703
704	nvc = ddi_get_soft_state(nv_statep, inst);
705
706	switch (cmd) {
707
708	case DDI_DETACH:
709
710		NVLOG((NVDBG_INIT, nvc, NULL, "nv_detach: DDI_DETACH"));
711
712		/*
713		 * Remove interrupts
714		 */
715		nv_rem_intrs(nvc);
716
717		/*
718		 * Remove timers
719		 */
720		for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
721			nvp = &(nvc->nvc_port[port]);
722			if (nvp->nvp_timeout_id != 0) {
723				(void) untimeout(nvp->nvp_timeout_id);
724			}
725		}
726
727		/*
728		 * Remove maps
729		 */
730		for (i = 0; i < 6; i++) {
731			ddi_regs_map_free(&nvc->nvc_bar_hdl[i]);
732		}
733
734		/*
735		 * Destroy mutexes
736		 */
737		mutex_destroy(&nvc->nvc_mutex);
738
739		/*
740		 * Uninitialize the controller
741		 */
742		nv_uninit_ctl(nvc);
743
744		/*
745		 * unregister from the sata module
746		 */
747		(void) sata_hba_detach(nvc->nvc_dip, DDI_DETACH);
748
749		/*
750		 * Free soft state
751		 */
752		ddi_soft_state_free(nv_statep, inst);
753
754		return (DDI_SUCCESS);
755
756	case DDI_SUSPEND:
757		/*
758		 * The PM functions for suspend and resume are incomplete
759		 * and need additional work.  It may or may not work in
760		 * the current state.
761		 */
762		NVLOG((NVDBG_INIT, nvc, NULL, "nv_detach: DDI_SUSPEND"));
763
764		for (i = 0; i < NV_MAX_PORTS(nvc); i++) {
765			nv_suspend(&(nvc->nvc_port[i]));
766		}
767
768		nvc->nvc_state |= NV_CTRL_SUSPEND;
769
770		return (DDI_SUCCESS);
771
772	default:
773		return (DDI_FAILURE);
774	}
775}
776
777
778/*ARGSUSED*/
779static int
780nv_getinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
781{
782	nv_ctl_t *nvc;
783	int instance;
784	dev_t dev;
785
786	dev = (dev_t)arg;
787	instance = getminor(dev);
788
789	switch (infocmd) {
790	case DDI_INFO_DEVT2DEVINFO:
791		nvc = ddi_get_soft_state(nv_statep,  instance);
792		if (nvc != NULL) {
793			*result = nvc->nvc_dip;
794			return (DDI_SUCCESS);
795		} else {
796			*result = NULL;
797			return (DDI_FAILURE);
798		}
799	case DDI_INFO_DEVT2INSTANCE:
800		*(int *)result = instance;
801		break;
802	default:
803		break;
804	}
805	return (DDI_SUCCESS);
806}
807
808
809/*
810 * Called by sata module to probe a port.  Port and device state
811 * are not changed here... only reported back to the sata module.
812 *
813 * If probe confirms a device is present for the first time, it will
814 * initiate a device reset, then probe will be called again and the
815 * signature will be check.  If the signature is valid, data structures
816 * will be initialized.
817 */
818static int
819nv_sata_probe(dev_info_t *dip, sata_device_t *sd)
820{
821	nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip));
822	uint8_t cport = sd->satadev_addr.cport;
823	uint8_t pmport = sd->satadev_addr.pmport;
824	uint8_t qual = sd->satadev_addr.qual;
825	clock_t nv_lbolt = ddi_get_lbolt();
826	nv_port_t *nvp;
827
828	if (cport >= NV_MAX_PORTS(nvc)) {
829		sd->satadev_type = SATA_DTYPE_NONE;
830		sd->satadev_state = SATA_STATE_UNKNOWN;
831
832		return (SATA_FAILURE);
833	}
834
835	ASSERT(nvc->nvc_port != NULL);
836	nvp = &(nvc->nvc_port[cport]);
837	ASSERT(nvp != NULL);
838
839	NVLOG((NVDBG_PROBE, nvc, nvp,
840	    "nv_sata_probe: enter cport: 0x%x, pmport: 0x%x, "
841	    "qual: 0x%x", cport, pmport, qual));
842
843	mutex_enter(&nvp->nvp_mutex);
844
845	/*
846	 * This check seems to be done in the SATA module.
847	 * It may not be required here
848	 */
849	if (nvp->nvp_state & NV_PORT_INACTIVE) {
850		nv_cmn_err(CE_WARN, nvc, nvp,
851		    "port inactive.  Use cfgadm to activate");
852		sd->satadev_type = SATA_DTYPE_UNKNOWN;
853		sd->satadev_state = SATA_PSTATE_SHUTDOWN;
854		mutex_exit(&nvp->nvp_mutex);
855
856		return (SATA_FAILURE);
857	}
858
859	if (qual == SATA_ADDR_PMPORT) {
860		sd->satadev_type = SATA_DTYPE_NONE;
861		sd->satadev_state = SATA_STATE_UNKNOWN;
862		mutex_exit(&nvp->nvp_mutex);
863		nv_cmn_err(CE_WARN, nvc, nvp,
864		    "controller does not support port multiplier");
865
866		return (SATA_FAILURE);
867	}
868
869	sd->satadev_state = SATA_PSTATE_PWRON;
870
871	nv_copy_registers(nvp, sd, NULL);
872
873	/*
874	 * determine link status
875	 */
876	if (nv_check_link(sd->satadev_scr.sstatus) == B_FALSE) {
877		uint8_t det;
878
879		/*
880		 * Reset will cause the link to go down for a short period of
881		 * time.  If link is lost for less than 2 seconds ignore it
882		 * so that the reset can progress.
883		 */
884		if (nvp->nvp_state & NV_PORT_RESET_PROBE) {
885
886			if (nvp->nvp_link_lost_time == 0) {
887				nvp->nvp_link_lost_time = nv_lbolt;
888			}
889
890			if (TICK_TO_SEC(nv_lbolt -
891			    nvp->nvp_link_lost_time) < NV_LINK_LOST_OK) {
892				NVLOG((NVDBG_ALWAYS, nvp->nvp_ctlp, nvp,
893				    "probe: intermittent link lost while"
894				    " resetting"));
895				/*
896				 * fake status of link so that probe continues
897				 */
898				SSTATUS_SET_IPM(sd->satadev_scr.sstatus,
899				    SSTATUS_IPM_ACTIVE);
900				SSTATUS_SET_DET(sd->satadev_scr.sstatus,
901				    SSTATUS_DET_DEVPRE_PHYCOM);
902				sd->satadev_type = SATA_DTYPE_UNKNOWN;
903				mutex_exit(&nvp->nvp_mutex);
904
905				return (SATA_SUCCESS);
906			} else {
907				nvp->nvp_state &=
908				    ~(NV_PORT_RESET_PROBE|NV_PORT_RESET);
909			}
910		}
911
912		/*
913		 * no link, so tear down port and abort all active packets
914		 */
915
916		det = (sd->satadev_scr.sstatus & SSTATUS_DET) >>
917		    SSTATUS_DET_SHIFT;
918
919		switch (det) {
920		case SSTATUS_DET_NODEV:
921		case SSTATUS_DET_PHYOFFLINE:
922			sd->satadev_type = SATA_DTYPE_NONE;
923			break;
924		default:
925			sd->satadev_type = SATA_DTYPE_UNKNOWN;
926			break;
927		}
928
929		NVLOG((NVDBG_PROBE, nvp->nvp_ctlp, nvp,
930		    "probe: link lost invoking nv_abort_active"));
931
932		(void) nv_abort_active(nvp, NULL, SATA_PKT_TIMEOUT);
933		nv_uninit_port(nvp);
934
935		mutex_exit(&nvp->nvp_mutex);
936
937		return (SATA_SUCCESS);
938	} else {
939		nvp->nvp_link_lost_time = 0;
940	}
941
942	/*
943	 * A device is present so clear hotremoved flag
944	 */
945	nvp->nvp_state &= ~NV_PORT_HOTREMOVED;
946
947	/*
948	 * If the signature was acquired previously there is no need to
949	 * do it again.
950	 */
951	if (nvp->nvp_signature != 0) {
952		NVLOG((NVDBG_PROBE, nvp->nvp_ctlp, nvp,
953		    "probe: signature acquired previously"));
954		sd->satadev_type = nvp->nvp_type;
955		mutex_exit(&nvp->nvp_mutex);
956
957		return (SATA_SUCCESS);
958	}
959
960	/*
961	 * If NV_PORT_RESET is not set, this is the first time through
962	 * so perform reset and return.
963	 */
964	if ((nvp->nvp_state & NV_PORT_RESET) == 0) {
965		NVLOG((NVDBG_PROBE, nvp->nvp_ctlp, nvp,
966		    "probe: first reset to get sig"));
967		nvp->nvp_state |= NV_PORT_RESET_PROBE;
968		nv_reset(nvp);
969		sd->satadev_type = nvp->nvp_type = SATA_DTYPE_UNKNOWN;
970		nvp->nvp_probe_time = nv_lbolt;
971		mutex_exit(&nvp->nvp_mutex);
972
973		return (SATA_SUCCESS);
974	}
975
976	/*
977	 * Reset was done previously.  see if the signature is
978	 * available.
979	 */
980	nv_read_signature(nvp);
981	sd->satadev_type = nvp->nvp_type;
982
983	/*
984	 * Some drives may require additional resets to get a
985	 * valid signature.  If a drive was not just powered up, the signature
986	 * should arrive within half a second of reset.  Therefore if more
987	 * than 5 seconds has elapsed while waiting for a signature, reset
988	 * again.  These extra resets do not appear to create problems when
989	 * the drive is spinning up for more than this reset period.
990	 */
991	if (nvp->nvp_signature == 0) {
992		if (TICK_TO_SEC(nv_lbolt - nvp->nvp_reset_time) > 5) {
993			NVLOG((NVDBG_PROBE, nvc, nvp, "additional reset"
994			    " during signature acquisition"));
995			nv_reset(nvp);
996		}
997
998		mutex_exit(&nvp->nvp_mutex);
999
1000		return (SATA_SUCCESS);
1001	}
1002
1003	NVLOG((NVDBG_PROBE, nvc, nvp, "signature acquired after %d ms",
1004	    TICK_TO_MSEC(nv_lbolt - nvp->nvp_probe_time)));
1005
1006	/*
1007	 * nv_sata only deals with ATA disks so far.  If it is
1008	 * not an ATA disk, then just return.
1009	 */
1010	if (nvp->nvp_type != SATA_DTYPE_ATADISK) {
1011		nv_cmn_err(CE_WARN, nvc, nvp, "Driver currently handles only"
1012		    " disks.  Signature acquired was %X", nvp->nvp_signature);
1013		mutex_exit(&nvp->nvp_mutex);
1014
1015		return (SATA_SUCCESS);
1016	}
1017
1018	/*
1019	 * make sure structures are initialized
1020	 */
1021	if (nv_init_port(nvp) == NV_SUCCESS) {
1022		NVLOG((NVDBG_PROBE, nvc, nvp,
1023		    "device detected and set up at port %d", cport));
1024		mutex_exit(&nvp->nvp_mutex);
1025
1026		return (SATA_SUCCESS);
1027	} else {
1028		nv_cmn_err(CE_WARN, nvc, nvp, "failed to set up data "
1029		    "structures for port %d", cport);
1030		mutex_exit(&nvp->nvp_mutex);
1031
1032		return (SATA_FAILURE);
1033	}
1034	/*NOTREACHED*/
1035}
1036
1037
1038/*
1039 * Called by sata module to start a new command.
1040 */
1041static int
1042nv_sata_start(dev_info_t *dip, sata_pkt_t *spkt)
1043{
1044	int cport = spkt->satapkt_device.satadev_addr.cport;
1045	nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip));
1046	nv_port_t *nvp = &(nvc->nvc_port[cport]);
1047	int ret;
1048
1049	NVLOG((NVDBG_ENTRY, nvc, nvp, "nv_sata_start: opmode: 0x%x cmd=%x",
1050	    spkt->satapkt_op_mode, spkt->satapkt_cmd.satacmd_cmd_reg));
1051
1052	mutex_enter(&nvp->nvp_mutex);
1053
1054	/*
1055	 * hotremoved is an intermediate state where the link was lost,
1056	 * but the hotplug event has not yet been processed by the sata
1057	 * module.  Fail the request.
1058	 */
1059	if (nvp->nvp_state & NV_PORT_HOTREMOVED) {
1060		spkt->satapkt_reason = SATA_PKT_PORT_ERROR;
1061		spkt->satapkt_device.satadev_state = SATA_STATE_UNKNOWN;
1062		NVLOG((NVDBG_ERRS, nvc, nvp,
1063		    "nv_sata_start: NV_PORT_HOTREMOVED"));
1064		nv_copy_registers(nvp, &spkt->satapkt_device, NULL);
1065		mutex_exit(&nvp->nvp_mutex);
1066
1067		return (SATA_TRAN_PORT_ERROR);
1068	}
1069
1070	if (nvp->nvp_state & NV_PORT_RESET) {
1071		NVLOG((NVDBG_ERRS, nvc, nvp,
1072		    "still waiting for reset completion"));
1073		spkt->satapkt_reason = SATA_PKT_BUSY;
1074		mutex_exit(&nvp->nvp_mutex);
1075
1076		/*
1077		 * If in panic, timeouts do not occur, so fake one
1078		 * so that the signature can be acquired to complete
1079		 * the reset handling.
1080		 */
1081		if (ddi_in_panic()) {
1082			nv_timeout(nvp);
1083		}
1084
1085		return (SATA_TRAN_BUSY);
1086	}
1087
1088	if (nvp->nvp_type == SATA_DTYPE_NONE) {
1089		spkt->satapkt_reason = SATA_PKT_PORT_ERROR;
1090		NVLOG((NVDBG_ERRS, nvc, nvp,
1091		    "nv_sata_start: SATA_DTYPE_NONE"));
1092		nv_copy_registers(nvp, &spkt->satapkt_device, NULL);
1093		mutex_exit(&nvp->nvp_mutex);
1094
1095		return (SATA_TRAN_PORT_ERROR);
1096	}
1097
1098	if (spkt->satapkt_device.satadev_type == SATA_DTYPE_ATAPICD) {
1099		ASSERT(nvp->nvp_type == SATA_DTYPE_ATAPICD);
1100		nv_cmn_err(CE_WARN, nvc, nvp,
1101		    "optical devices not supported");
1102		spkt->satapkt_reason = SATA_PKT_CMD_UNSUPPORTED;
1103		mutex_exit(&nvp->nvp_mutex);
1104
1105		return (SATA_TRAN_CMD_UNSUPPORTED);
1106	}
1107
1108	if (spkt->satapkt_device.satadev_type == SATA_DTYPE_PMULT) {
1109		ASSERT(nvp->nvp_type == SATA_DTYPE_PMULT);
1110		nv_cmn_err(CE_WARN, nvc, nvp,
1111		    "port multipliers not supported by controller");
1112		spkt->satapkt_reason = SATA_PKT_CMD_UNSUPPORTED;
1113		mutex_exit(&nvp->nvp_mutex);
1114
1115		return (SATA_TRAN_CMD_UNSUPPORTED);
1116	}
1117
1118	if ((nvp->nvp_state & NV_PORT_INIT) == 0) {
1119		spkt->satapkt_reason = SATA_PKT_PORT_ERROR;
1120		NVLOG((NVDBG_ERRS, nvc, nvp,
1121		    "nv_sata_start: port not yet initialized"));
1122		nv_copy_registers(nvp, &spkt->satapkt_device, NULL);
1123		mutex_exit(&nvp->nvp_mutex);
1124
1125		return (SATA_TRAN_PORT_ERROR);
1126	}
1127
1128	if (nvp->nvp_state & NV_PORT_INACTIVE) {
1129		spkt->satapkt_reason = SATA_PKT_PORT_ERROR;
1130		NVLOG((NVDBG_ERRS, nvc, nvp,
1131		    "nv_sata_start: NV_PORT_INACTIVE"));
1132		nv_copy_registers(nvp, &spkt->satapkt_device, NULL);
1133		mutex_exit(&nvp->nvp_mutex);
1134
1135		return (SATA_TRAN_PORT_ERROR);
1136	}
1137
1138	if (nvp->nvp_state & NV_PORT_FAILED) {
1139		spkt->satapkt_reason = SATA_PKT_PORT_ERROR;
1140		NVLOG((NVDBG_ERRS, nvc, nvp,
1141		    "nv_sata_start: NV_PORT_FAILED state"));
1142		nv_copy_registers(nvp, &spkt->satapkt_device, NULL);
1143		mutex_exit(&nvp->nvp_mutex);
1144
1145		return (SATA_TRAN_PORT_ERROR);
1146	}
1147
1148	/*
1149	 * after a device reset, and then when sata module restore processing
1150	 * is complete, the sata module will set sata_clear_dev_reset which
1151	 * indicates that restore processing has completed and normal
1152	 * non-restore related commands should be processed.
1153	 */
1154	if (spkt->satapkt_cmd.satacmd_flags.sata_clear_dev_reset) {
1155		nvp->nvp_state &= ~NV_PORT_RESTORE;
1156		NVLOG((NVDBG_ENTRY, nvc, nvp,
1157		    "nv_sata_start: clearing NV_PORT_RESTORE"));
1158	}
1159
1160	/*
1161	 * if the device was recently reset as indicated by NV_PORT_RESTORE,
1162	 * only allow commands which restore device state.  The sata module
1163	 * marks such commands with with sata_ignore_dev_reset.
1164	 *
1165	 * during coredump, nv_reset is called and but then the restore
1166	 * doesn't happen.  For now, workaround by ignoring the wait for
1167	 * restore if the system is panicing.
1168	 */
1169	if ((nvp->nvp_state & NV_PORT_RESTORE) &&
1170	    !(spkt->satapkt_cmd.satacmd_flags.sata_ignore_dev_reset) &&
1171	    (ddi_in_panic() == 0)) {
1172		spkt->satapkt_reason = SATA_PKT_BUSY;
1173		NVLOG((NVDBG_ENTRY, nvc, nvp,
1174		    "nv_sata_start: waiting for restore "));
1175		mutex_exit(&nvp->nvp_mutex);
1176
1177		return (SATA_TRAN_BUSY);
1178	}
1179
1180	if (nvp->nvp_state & NV_PORT_ABORTING) {
1181		spkt->satapkt_reason = SATA_PKT_BUSY;
1182		NVLOG((NVDBG_ERRS, nvc, nvp,
1183		    "nv_sata_start: NV_PORT_ABORTING"));
1184		mutex_exit(&nvp->nvp_mutex);
1185
1186		return (SATA_TRAN_BUSY);
1187	}
1188
1189	if (spkt->satapkt_op_mode &
1190	    (SATA_OPMODE_POLLING|SATA_OPMODE_SYNCH)) {
1191
1192		ret = nv_start_sync(nvp, spkt);
1193
1194		mutex_exit(&nvp->nvp_mutex);
1195
1196		return (ret);
1197	}
1198
1199	/*
1200	 * start command asynchronous command
1201	 */
1202	ret = nv_start_async(nvp, spkt);
1203
1204	mutex_exit(&nvp->nvp_mutex);
1205
1206	return (ret);
1207}
1208
1209
1210/*
1211 * SATA_OPMODE_POLLING implies the driver is in a
1212 * synchronous mode, and SATA_OPMODE_SYNCH is also set.
1213 * If only SATA_OPMODE_SYNCH is set, the driver can use
1214 * interrupts and sleep wait on a cv.
1215 *
1216 * If SATA_OPMODE_POLLING is set, the driver can't use
1217 * interrupts and must busy wait and simulate the
1218 * interrupts by waiting for BSY to be cleared.
1219 *
1220 * Synchronous mode has to return BUSY if there are
1221 * any other commands already on the drive.
1222 */
1223static int
1224nv_start_sync(nv_port_t *nvp, sata_pkt_t *spkt)
1225{
1226	nv_ctl_t *nvc = nvp->nvp_ctlp;
1227	int ret;
1228
1229	NVLOG((NVDBG_SYNC, nvp->nvp_ctlp, nvp, "nv_sata_satapkt_sync: entry"));
1230
1231	if (nvp->nvp_ncq_run != 0 || nvp->nvp_non_ncq_run != 0) {
1232		spkt->satapkt_reason = SATA_PKT_BUSY;
1233		NVLOG((NVDBG_SYNC, nvp->nvp_ctlp, nvp,
1234		    "nv_sata_satapkt_sync: device is busy, sync cmd rejected"
1235		    "ncq_run: %d non_ncq_run: %d  spkt: %p",
1236		    nvp->nvp_ncq_run, nvp->nvp_non_ncq_run,
1237		    (&(nvp->nvp_slot[0]))->nvslot_spkt));
1238
1239		return (SATA_TRAN_BUSY);
1240	}
1241
1242	/*
1243	 * if SYNC but not POLL, verify that this is not on interrupt thread.
1244	 */
1245	if (!(spkt->satapkt_op_mode & SATA_OPMODE_POLLING) &&
1246	    servicing_interrupt()) {
1247		spkt->satapkt_reason = SATA_PKT_BUSY;
1248		nv_cmn_err(CE_WARN, nvp->nvp_ctlp, nvp,
1249		    "SYNC mode not allowed during interrupt");
1250
1251		return (SATA_TRAN_BUSY);
1252
1253	}
1254
1255	/*
1256	 * disable interrupt generation if in polled mode
1257	 */
1258	if (spkt->satapkt_op_mode & SATA_OPMODE_POLLING) {
1259		(*(nvc->nvc_set_intr))(nvp, NV_INTR_DISABLE);
1260	}
1261
1262	if ((ret = nv_start_common(nvp, spkt)) != SATA_TRAN_ACCEPTED) {
1263		if (spkt->satapkt_op_mode & SATA_OPMODE_POLLING) {
1264			(*(nvc->nvc_set_intr))(nvp, NV_INTR_ENABLE);
1265		}
1266
1267		return (ret);
1268	}
1269
1270	if (spkt->satapkt_op_mode & SATA_OPMODE_POLLING) {
1271		mutex_exit(&nvp->nvp_mutex);
1272		ret = nv_poll_wait(nvp, spkt);
1273		mutex_enter(&nvp->nvp_mutex);
1274
1275		(*(nvc->nvc_set_intr))(nvp, NV_INTR_ENABLE);
1276
1277		NVLOG((NVDBG_SYNC, nvp->nvp_ctlp, nvp, "nv_sata_satapkt_sync:"
1278		    " done % reason %d", ret));
1279
1280		return (ret);
1281	}
1282
1283	/*
1284	 * non-polling synchronous mode handling.  The interrupt will signal
1285	 * when the IO is completed.
1286	 */
1287	cv_wait(&nvp->nvp_poll_cv, &nvp->nvp_mutex);
1288
1289	if (spkt->satapkt_reason != SATA_PKT_COMPLETED) {
1290
1291		spkt->satapkt_reason = SATA_PKT_TIMEOUT;
1292	}
1293
1294	NVLOG((NVDBG_SYNC, nvp->nvp_ctlp, nvp, "nv_sata_satapkt_sync:"
1295	    " done % reason %d", spkt->satapkt_reason));
1296
1297	return (SATA_TRAN_ACCEPTED);
1298}
1299
1300
1301static int
1302nv_poll_wait(nv_port_t *nvp, sata_pkt_t *spkt)
1303{
1304	int ret;
1305	nv_ctl_t *nvc = nvp->nvp_ctlp;
1306#if ! defined(__lock_lint)
1307	nv_slot_t *nv_slotp = &(nvp->nvp_slot[0]); /* not NCQ aware */
1308#endif
1309
1310	NVLOG((NVDBG_SYNC, nvc, nvp, "nv_poll_wait: enter"));
1311
1312	for (;;) {
1313
1314		NV_DELAY_NSEC(400);
1315
1316		NVLOG((NVDBG_SYNC, nvc, nvp, "nv_poll_wait: before nv_wait"));
1317		if (nv_wait(nvp, 0, SATA_STATUS_BSY,
1318		    NV_SEC2USEC(spkt->satapkt_time), NV_NOSLEEP) == B_FALSE) {
1319			mutex_enter(&nvp->nvp_mutex);
1320			spkt->satapkt_reason = SATA_PKT_TIMEOUT;
1321			nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
1322			nv_reset(nvp);
1323			nv_complete_io(nvp, spkt, 0);
1324			mutex_exit(&nvp->nvp_mutex);
1325			NVLOG((NVDBG_SYNC, nvc, nvp, "nv_poll_wait: "
1326			    "SATA_STATUS_BSY"));
1327
1328			return (SATA_TRAN_ACCEPTED);
1329		}
1330
1331		NVLOG((NVDBG_SYNC, nvc, nvp, "nv_poll_wait: before nvc_intr"));
1332
1333		/*
1334		 * Simulate interrupt.
1335		 */
1336		ret = (*(nvc->nvc_interrupt))((caddr_t)nvc, NULL);
1337		NVLOG((NVDBG_SYNC, nvc, nvp, "nv_poll_wait: after nvc_intr"));
1338
1339		if (ret != DDI_INTR_CLAIMED) {
1340			NVLOG((NVDBG_SYNC, nvc, nvp, "nv_poll_wait:"
1341			    " unclaimed -- resetting"));
1342			mutex_enter(&nvp->nvp_mutex);
1343			nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
1344			nv_reset(nvp);
1345			spkt->satapkt_reason = SATA_PKT_TIMEOUT;
1346			nv_complete_io(nvp, spkt, 0);
1347			mutex_exit(&nvp->nvp_mutex);
1348
1349			return (SATA_TRAN_ACCEPTED);
1350		}
1351
1352#if ! defined(__lock_lint)
1353		if (nv_slotp->nvslot_flags == NVSLOT_COMPLETE) {
1354			/*
1355			 * packet is complete
1356			 */
1357			return (SATA_TRAN_ACCEPTED);
1358		}
1359#endif
1360	}
1361	/*NOTREACHED*/
1362}
1363
1364
1365/*
1366 * Called by sata module to abort outstanding packets.
1367 */
1368/*ARGSUSED*/
1369static int
1370nv_sata_abort(dev_info_t *dip, sata_pkt_t *spkt, int flag)
1371{
1372	int cport = spkt->satapkt_device.satadev_addr.cport;
1373	nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip));
1374	nv_port_t *nvp = &(nvc->nvc_port[cport]);
1375	int c_a, ret;
1376
1377	ASSERT(cport < NV_MAX_PORTS(nvc));
1378	NVLOG((NVDBG_ENTRY, nvc, nvp, "nv_sata_abort %d %p", flag, spkt));
1379
1380	mutex_enter(&nvp->nvp_mutex);
1381
1382	if (nvp->nvp_state & NV_PORT_INACTIVE) {
1383		mutex_exit(&nvp->nvp_mutex);
1384		nv_cmn_err(CE_WARN, nvc, nvp,
1385		    "abort request failed: port inactive");
1386
1387		return (SATA_FAILURE);
1388	}
1389
1390	/*
1391	 * spkt == NULL then abort all commands
1392	 */
1393	c_a = nv_abort_active(nvp, spkt, SATA_PKT_ABORTED);
1394
1395	if (c_a) {
1396		NVLOG((NVDBG_ENTRY, nvc, nvp,
1397		    "packets aborted running=%d", c_a));
1398		ret = SATA_SUCCESS;
1399	} else {
1400		if (spkt == NULL) {
1401			NVLOG((NVDBG_ENTRY, nvc, nvp, "no spkts to abort"));
1402		} else {
1403			NVLOG((NVDBG_ENTRY, nvc, nvp,
1404			    "can't find spkt to abort"));
1405		}
1406		ret = SATA_FAILURE;
1407	}
1408
1409	mutex_exit(&nvp->nvp_mutex);
1410
1411	return (ret);
1412}
1413
1414
1415/*
1416 * if spkt == NULL abort all pkts running, otherwise
1417 * abort the requested packet.  must be called with nv_mutex
1418 * held and returns with it held.  Not NCQ aware.
1419 */
1420static int
1421nv_abort_active(nv_port_t *nvp, sata_pkt_t *spkt, int abort_reason)
1422{
1423	int aborted = 0, i, reset_once = B_FALSE;
1424	struct nv_slot *nv_slotp;
1425	sata_pkt_t *spkt_slot;
1426
1427	ASSERT(MUTEX_HELD(&nvp->nvp_mutex));
1428
1429	/*
1430	 * return if the port is not configured
1431	 */
1432	if (nvp->nvp_slot == NULL) {
1433		NVLOG((NVDBG_ENTRY, nvp->nvp_ctlp, nvp,
1434		    "nv_abort_active: not configured so returning"));
1435
1436		return (0);
1437	}
1438
1439	NVLOG((NVDBG_ENTRY, nvp->nvp_ctlp, nvp, "nv_abort_active"));
1440
1441	nvp->nvp_state |= NV_PORT_ABORTING;
1442
1443	for (i = 0; i < nvp->nvp_queue_depth; i++) {
1444
1445		nv_slotp = &(nvp->nvp_slot[i]);
1446		spkt_slot = nv_slotp->nvslot_spkt;
1447
1448		/*
1449		 * skip if not active command in slot
1450		 */
1451		if (spkt_slot == NULL) {
1452			continue;
1453		}
1454
1455		/*
1456		 * if a specific packet was requested, skip if
1457		 * this is not a match
1458		 */
1459		if ((spkt != NULL) && (spkt != spkt_slot)) {
1460			continue;
1461		}
1462
1463		/*
1464		 * stop the hardware.  This could need reworking
1465		 * when NCQ is enabled in the driver.
1466		 */
1467		if (reset_once == B_FALSE) {
1468			ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl;
1469
1470			/*
1471			 * stop DMA engine
1472			 */
1473			nv_put8(bmhdl, nvp->nvp_bmicx,  0);
1474
1475			nv_reset(nvp);
1476			reset_once = B_TRUE;
1477		}
1478
1479		spkt_slot->satapkt_reason = abort_reason;
1480		nv_complete_io(nvp, spkt_slot, i);
1481		aborted++;
1482	}
1483
1484	nvp->nvp_state &= ~NV_PORT_ABORTING;
1485
1486	return (aborted);
1487}
1488
1489
1490/*
1491 * Called by sata module to reset a port, device, or the controller.
1492 */
1493static int
1494nv_sata_reset(dev_info_t *dip, sata_device_t *sd)
1495{
1496	int cport = sd->satadev_addr.cport;
1497	nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip));
1498	nv_port_t *nvp = &(nvc->nvc_port[cport]);
1499	int ret = SATA_SUCCESS;
1500
1501	ASSERT(cport < NV_MAX_PORTS(nvc));
1502
1503	NVLOG((NVDBG_ENTRY, nvc, nvp, "nv_sata_reset"));
1504
1505	mutex_enter(&nvp->nvp_mutex);
1506
1507	switch (sd->satadev_addr.qual) {
1508
1509	case SATA_ADDR_CPORT:
1510		/*FALLTHROUGH*/
1511	case SATA_ADDR_DCPORT:
1512		nv_reset(nvp);
1513		(void) nv_abort_active(nvp, NULL, SATA_PKT_RESET);
1514
1515		break;
1516	case SATA_ADDR_CNTRL:
1517		NVLOG((NVDBG_ENTRY, nvc, nvp,
1518		    "nv_sata_reset: constroller reset not supported"));
1519
1520		break;
1521	case SATA_ADDR_PMPORT:
1522	case SATA_ADDR_DPMPORT:
1523		NVLOG((NVDBG_ENTRY, nvc, nvp,
1524		    "nv_sata_reset: port multipliers not supported"));
1525		/*FALLTHROUGH*/
1526	default:
1527		/*
1528		 * unsupported case
1529		 */
1530		ret = SATA_FAILURE;
1531		break;
1532	}
1533
1534	if (ret == SATA_SUCCESS) {
1535		/*
1536		 * If the port is inactive, do a quiet reset and don't attempt
1537		 * to wait for reset completion or do any post reset processing
1538		 */
1539		if (nvp->nvp_state & NV_PORT_INACTIVE) {
1540			nvp->nvp_state &= ~NV_PORT_RESET;
1541			nvp->nvp_reset_time = 0;
1542		}
1543
1544		/*
1545		 * clear the port failed flag
1546		 */
1547		nvp->nvp_state &= ~NV_PORT_FAILED;
1548	}
1549
1550	mutex_exit(&nvp->nvp_mutex);
1551
1552	return (ret);
1553}
1554
1555
1556/*
1557 * Sata entry point to handle port activation.  cfgadm -c connect
1558 */
1559static int
1560nv_sata_activate(dev_info_t *dip, sata_device_t *sd)
1561{
1562	int cport = sd->satadev_addr.cport;
1563	nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip));
1564	nv_port_t *nvp = &(nvc->nvc_port[cport]);
1565
1566	ASSERT(cport < NV_MAX_PORTS(nvc));
1567	NVLOG((NVDBG_ENTRY, nvc, nvp, "nv_sata_activate"));
1568
1569	mutex_enter(&nvp->nvp_mutex);
1570
1571	sd->satadev_state = SATA_STATE_READY;
1572
1573	nv_copy_registers(nvp, sd, NULL);
1574
1575	(*(nvc->nvc_set_intr))(nvp, NV_INTR_ENABLE);
1576
1577	nvp->nvp_state = 0;
1578
1579	mutex_exit(&nvp->nvp_mutex);
1580
1581	return (SATA_SUCCESS);
1582}
1583
1584
1585/*
1586 * Sata entry point to handle port deactivation.  cfgadm -c disconnect
1587 */
1588static int
1589nv_sata_deactivate(dev_info_t *dip, sata_device_t *sd)
1590{
1591	int cport = sd->satadev_addr.cport;
1592	nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip));
1593	nv_port_t *nvp = &(nvc->nvc_port[cport]);
1594
1595	ASSERT(cport < NV_MAX_PORTS(nvc));
1596	NVLOG((NVDBG_ENTRY, nvc, nvp, "nv_sata_deactivate"));
1597
1598	mutex_enter(&nvp->nvp_mutex);
1599
1600	(void) nv_abort_active(nvp, NULL, SATA_PKT_RESET);
1601
1602	/*
1603	 * mark the device as inaccessible
1604	 */
1605	nvp->nvp_state &= ~NV_PORT_INACTIVE;
1606
1607	/*
1608	 * disable the interrupts on port
1609	 */
1610	(*(nvc->nvc_set_intr))(nvp, NV_INTR_DISABLE);
1611
1612	nv_uninit_port(nvp);
1613
1614	sd->satadev_state = SATA_PSTATE_SHUTDOWN;
1615	nv_copy_registers(nvp, sd, NULL);
1616
1617	mutex_exit(&nvp->nvp_mutex);
1618
1619	return (SATA_SUCCESS);
1620}
1621
1622
1623/*
1624 * find an empty slot in the driver's queue, increment counters,
1625 * and then invoke the appropriate PIO or DMA start routine.
1626 */
1627static int
1628nv_start_common(nv_port_t *nvp, sata_pkt_t *spkt)
1629{
1630	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
1631	int on_bit = 0x01, slot, sactive, ret, ncq = 0;
1632	uint8_t cmd = spkt->satapkt_cmd.satacmd_cmd_reg;
1633	int direction = sata_cmdp->satacmd_flags.sata_data_direction;
1634	nv_ctl_t *nvc = nvp->nvp_ctlp;
1635	nv_slot_t *nv_slotp;
1636	boolean_t dma_cmd;
1637
1638	NVLOG((NVDBG_DELIVER, nvc, nvp, "nv_start_common  entered: cmd: 0x%x",
1639	    sata_cmdp->satacmd_cmd_reg));
1640
1641	if ((cmd == SATAC_WRITE_FPDMA_QUEUED) ||
1642	    (cmd == SATAC_READ_FPDMA_QUEUED)) {
1643		nvp->nvp_ncq_run++;
1644		/*
1645		 * search for an empty NCQ slot.  by the time, it's already
1646		 * been determined by the caller that there is room on the
1647		 * queue.
1648		 */
1649		for (slot = 0; slot < nvp->nvp_queue_depth; slot++,
1650		    on_bit <<= 1) {
1651			if ((nvp->nvp_sactive_cache & on_bit) == 0) {
1652				break;
1653			}
1654		}
1655
1656		/*
1657		 * the first empty slot found, should not exceed the queue
1658		 * depth of the drive.  if it does it's an error.
1659		 */
1660		ASSERT(slot != nvp->nvp_queue_depth);
1661
1662		sactive = nv_get32(nvc->nvc_bar_hdl[5],
1663		    nvp->nvp_sactive);
1664		ASSERT((sactive & on_bit) == 0);
1665		nv_put32(nvc->nvc_bar_hdl[5], nvp->nvp_sactive, on_bit);
1666		NVLOG((NVDBG_INIT, nvc, nvp, "setting SACTIVE onbit: %X",
1667		    on_bit));
1668		nvp->nvp_sactive_cache |= on_bit;
1669
1670		ncq = NVSLOT_NCQ;
1671
1672	} else {
1673		nvp->nvp_non_ncq_run++;
1674		slot = 0;
1675	}
1676
1677	nv_slotp = (nv_slot_t *)&nvp->nvp_slot[slot];
1678
1679	ASSERT(nv_slotp->nvslot_spkt == NULL);
1680
1681	nv_slotp->nvslot_spkt = spkt;
1682	nv_slotp->nvslot_flags = ncq;
1683
1684	/*
1685	 * the sata module doesn't indicate which commands utilize the
1686	 * DMA engine, so find out using this switch table.
1687	 */
1688	switch (spkt->satapkt_cmd.satacmd_cmd_reg) {
1689	case SATAC_READ_DMA_EXT:
1690	case SATAC_WRITE_DMA_EXT:
1691	case SATAC_WRITE_DMA:
1692	case SATAC_READ_DMA:
1693	case SATAC_READ_DMA_QUEUED:
1694	case SATAC_READ_DMA_QUEUED_EXT:
1695	case SATAC_WRITE_DMA_QUEUED:
1696	case SATAC_WRITE_DMA_QUEUED_EXT:
1697	case SATAC_READ_FPDMA_QUEUED:
1698	case SATAC_WRITE_FPDMA_QUEUED:
1699		dma_cmd = B_TRUE;
1700		break;
1701	default:
1702		dma_cmd = B_FALSE;
1703	}
1704
1705	if (sata_cmdp->satacmd_num_dma_cookies != 0 && dma_cmd == B_TRUE) {
1706		NVLOG((NVDBG_DELIVER, nvc,  nvp, "DMA command"));
1707		nv_slotp->nvslot_start = nv_start_dma;
1708		nv_slotp->nvslot_intr = nv_intr_dma;
1709	} else if (direction == SATA_DIR_NODATA_XFER) {
1710		NVLOG((NVDBG_DELIVER, nvc, nvp, "non-data command"));
1711		nv_slotp->nvslot_start = nv_start_nodata;
1712		nv_slotp->nvslot_intr = nv_intr_nodata;
1713	} else if (direction == SATA_DIR_READ) {
1714		NVLOG((NVDBG_DELIVER, nvc, nvp, "pio in command"));
1715		nv_slotp->nvslot_start = nv_start_pio_in;
1716		nv_slotp->nvslot_intr = nv_intr_pio_in;
1717		nv_slotp->nvslot_byte_count =
1718		    spkt->satapkt_cmd.satacmd_bp->b_bcount;
1719		nv_slotp->nvslot_v_addr =
1720		    spkt->satapkt_cmd.satacmd_bp->b_un.b_addr;
1721	} else if (direction == SATA_DIR_WRITE) {
1722		NVLOG((NVDBG_DELIVER, nvc, nvp, "pio out command"));
1723		nv_slotp->nvslot_start = nv_start_pio_out;
1724		nv_slotp->nvslot_intr = nv_intr_pio_out;
1725		nv_slotp->nvslot_byte_count =
1726		    spkt->satapkt_cmd.satacmd_bp->b_bcount;
1727		nv_slotp->nvslot_v_addr =
1728		    spkt->satapkt_cmd.satacmd_bp->b_un.b_addr;
1729	} else {
1730		nv_cmn_err(CE_WARN, nvc, nvp, "malformed command: direction"
1731		    " %d cookies %d cmd %x",
1732		    sata_cmdp->satacmd_flags.sata_data_direction,
1733		    sata_cmdp->satacmd_num_dma_cookies,  cmd);
1734		spkt->satapkt_reason = SATA_PKT_CMD_UNSUPPORTED;
1735		ret = SATA_TRAN_CMD_UNSUPPORTED;
1736
1737		goto fail;
1738	}
1739
1740	if ((ret = (*nv_slotp->nvslot_start)(nvp, slot)) ==
1741	    SATA_TRAN_ACCEPTED) {
1742		nv_slotp->nvslot_stime = ddi_get_lbolt();
1743
1744		/*
1745		 * start timer if it's not already running and this packet
1746		 * is not requesting polled mode.
1747		 */
1748		if ((nvp->nvp_timeout_id == 0) &&
1749		    ((spkt->satapkt_op_mode & SATA_OPMODE_POLLING) == 0)) {
1750			nvp->nvp_timeout_id = timeout(nv_timeout, (void *)nvp,
1751			    drv_usectohz(NV_ONE_SEC));
1752		}
1753
1754		return (SATA_TRAN_ACCEPTED);
1755	}
1756
1757	fail:
1758
1759	spkt->satapkt_reason = SATA_TRAN_PORT_ERROR;
1760
1761	if (ncq == NVSLOT_NCQ) {
1762		nvp->nvp_ncq_run--;
1763		nvp->nvp_sactive_cache &= ~on_bit;
1764	} else {
1765		nvp->nvp_non_ncq_run--;
1766	}
1767	nv_slotp->nvslot_spkt = NULL;
1768	nv_slotp->nvslot_flags = 0;
1769
1770	return (ret);
1771}
1772
1773
1774/*
1775 * Check if the signature is ready and if non-zero translate
1776 * it into a solaris sata defined type.
1777 */
1778static void
1779nv_read_signature(nv_port_t *nvp)
1780{
1781	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
1782
1783	nvp->nvp_signature = nv_get8(cmdhdl, nvp->nvp_count);
1784	nvp->nvp_signature |= (nv_get8(cmdhdl, nvp->nvp_sect) << 8);
1785	nvp->nvp_signature |= (nv_get8(cmdhdl, nvp->nvp_lcyl) << 16);
1786	nvp->nvp_signature |= (nv_get8(cmdhdl, nvp->nvp_hcyl) << 24);
1787
1788	switch (nvp->nvp_signature) {
1789
1790	case NV_SIG_DISK:
1791		NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp, "drive is a disk"));
1792		nvp->nvp_type = SATA_DTYPE_ATADISK;
1793		break;
1794	case NV_SIG_ATAPI:
1795		NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp,
1796		    "drive is an optical device"));
1797		nvp->nvp_type = SATA_DTYPE_ATAPICD;
1798		break;
1799	case NV_SIG_PM:
1800		NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp,
1801		    "device is a port multiplier"));
1802		nvp->nvp_type = SATA_DTYPE_PMULT;
1803		break;
1804	case NV_SIG_NOTREADY:
1805		NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp,
1806		    "signature not ready"));
1807		nvp->nvp_type = SATA_DTYPE_UNKNOWN;
1808		break;
1809	default:
1810		nv_cmn_err(CE_WARN, nvp->nvp_ctlp, nvp, "signature %X not"
1811		    " recognized", nvp->nvp_signature);
1812		nvp->nvp_type = SATA_DTYPE_UNKNOWN;
1813		break;
1814	}
1815
1816	if (nvp->nvp_signature) {
1817		nvp->nvp_state &= ~(NV_PORT_RESET_PROBE|NV_PORT_RESET);
1818	}
1819}
1820
1821
1822/*
1823 * Reset the port
1824 */
1825static void
1826nv_reset(nv_port_t *nvp)
1827{
1828	ddi_acc_handle_t bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5];
1829	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
1830	nv_ctl_t *nvc = nvp->nvp_ctlp;
1831	uint32_t sctrl;
1832
1833	NVLOG((NVDBG_ENTRY, nvc, nvp, "nv_reset()"));
1834
1835	ASSERT(mutex_owned(&nvp->nvp_mutex));
1836
1837	/*
1838	 * clear signature registers
1839	 */
1840	nv_put8(cmdhdl, nvp->nvp_sect, 0);
1841	nv_put8(cmdhdl, nvp->nvp_lcyl, 0);
1842	nv_put8(cmdhdl, nvp->nvp_hcyl, 0);
1843	nv_put8(cmdhdl, nvp->nvp_count, 0);
1844
1845	nvp->nvp_signature = 0;
1846	nvp->nvp_type = 0;
1847	nvp->nvp_state |= NV_PORT_RESET;
1848	nvp->nvp_reset_time = ddi_get_lbolt();
1849	nvp->nvp_link_lost_time = 0;
1850
1851	/*
1852	 * assert reset in PHY by writing a 1 to bit 0 scontrol
1853	 */
1854	sctrl = nv_get32(bar5_hdl, nvp->nvp_sctrl);
1855
1856	nv_put32(bar5_hdl, nvp->nvp_sctrl, sctrl | SCONTROL_DET_COMRESET);
1857
1858	/*
1859	 * wait 1ms
1860	 */
1861	drv_usecwait(1000);
1862
1863	/*
1864	 * de-assert reset in PHY
1865	 */
1866	nv_put32(bar5_hdl, nvp->nvp_sctrl, sctrl);
1867
1868	/*
1869	 * make sure timer is running
1870	 */
1871	if (nvp->nvp_timeout_id == 0) {
1872		nvp->nvp_timeout_id = timeout(nv_timeout, (void *)nvp,
1873		    drv_usectohz(NV_ONE_SEC));
1874	}
1875}
1876
1877
1878/*
1879 * Initialize register handling specific to mcp55
1880 */
1881/* ARGSUSED */
1882static void
1883mcp55_reg_init(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle)
1884{
1885	nv_port_t *nvp;
1886	uchar_t *bar5  = nvc->nvc_bar_addr[5];
1887	uint8_t off, port;
1888
1889	nvc->nvc_mcp55_ctl = (uint32_t *)(bar5 + MCP55_CTL);
1890	nvc->nvc_mcp55_ncq = (uint32_t *)(bar5 + MCP55_NCQ);
1891
1892	for (port = 0, off = 0; port < NV_MAX_PORTS(nvc); port++, off += 2) {
1893		nvp = &(nvc->nvc_port[port]);
1894		nvp->nvp_mcp55_int_status =
1895		    (uint16_t *)(bar5 + MCP55_INT_STATUS + off);
1896		nvp->nvp_mcp55_int_ctl =
1897		    (uint16_t *)(bar5 + MCP55_INT_CTL + off);
1898
1899		/*
1900		 * clear any previous interrupts asserted
1901		 */
1902		nv_put16(nvc->nvc_bar_hdl[5], nvp->nvp_mcp55_int_status,
1903		    MCP55_INT_CLEAR);
1904
1905		/*
1906		 * These are the interrupts to accept for now.  The spec
1907		 * says these are enable bits, but nvidia has indicated
1908		 * these are masking bits.  Even though they may be masked
1909		 * out to prevent asserting the main interrupt, they can
1910		 * still be asserted while reading the interrupt status
1911		 * register, so that needs to be considered in the interrupt
1912		 * handler.
1913		 */
1914		nv_put16(nvc->nvc_bar_hdl[5], nvp->nvp_mcp55_int_ctl,
1915		    ~(MCP55_INT_IGNORE));
1916	}
1917
1918	/*
1919	 * Allow the driver to program the BM on the first command instead
1920	 * of waiting for an interrupt.
1921	 */
1922#ifdef NCQ
1923	flags = MCP_SATA_AE_NCQ_PDEV_FIRST_CMD | MCP_SATA_AE_NCQ_SDEV_FIRST_CMD;
1924	nv_put32(nvc->nvc_bar_hdl[5], nvc->nvc_mcp55_ncq, flags);
1925	flags = MCP_SATA_AE_CTL_PRI_SWNCQ | MCP_SATA_AE_CTL_SEC_SWNCQ;
1926	nv_put32(nvc->nvc_bar_hdl[5], nvc->nvc_mcp55_ctl, flags);
1927#endif
1928
1929
1930#if 0
1931	/*
1932	 * This caused problems on some but not all mcp55 based systems.
1933	 * DMA writes would never complete.  This happens even on small
1934	 * mem systems, and only setting NV_40BIT_PRD below and not
1935	 * buffer_dma_attr.dma_attr_addr_hi, so it seems to be a hardware
1936	 * issue that needs further investigation.
1937	 */
1938
1939	/*
1940	 * mcp55 rev A03 and above supports 40-bit physical addressing.
1941	 * Enable DMA to take advantage of that.
1942	 *
1943	 */
1944	if (nvc->nvc_revid >= 0xa3) {
1945		uint32_t reg32;
1946		NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp, "rev id is %X and"
1947		    " is capable of 40-bit addressing", nvc->nvc_revid));
1948		buffer_dma_attr.dma_attr_addr_hi = 0xffffffffffull;
1949		reg32 = pci_config_get32(pci_conf_handle, NV_SATA_CFG_20);
1950		pci_config_put32(pci_conf_handle, NV_SATA_CFG_20,
1951		    reg32 |NV_40BIT_PRD);
1952	} else {
1953		NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp, "rev is %X and is "
1954		    "not capable of 40-bit addressing", nvc->nvc_revid));
1955	}
1956#endif
1957
1958}
1959
1960
1961/*
1962 * Initialize register handling specific to mcp04
1963 */
1964static void
1965mcp04_reg_init(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle)
1966{
1967	uchar_t *bar5  = nvc->nvc_bar_addr[5];
1968	uint32_t reg32;
1969	uint16_t reg16;
1970	nv_port_t *nvp;
1971	int j;
1972
1973	/*
1974	 * delay hotplug interrupts until PHYRDY.
1975	 */
1976	reg32 = pci_config_get32(pci_conf_handle, NV_SATA_CFG_42);
1977	pci_config_put32(pci_conf_handle, NV_SATA_CFG_42,
1978	    reg32 | MCP04_CFG_DELAY_HOTPLUG_INTR);
1979
1980	/*
1981	 * enable hot plug interrupts for channel x and y
1982	 */
1983	reg16 = nv_get16(nvc->nvc_bar_hdl[5],
1984	    (uint16_t *)(bar5 + NV_ADMACTL_X));
1985	nv_put16(nvc->nvc_bar_hdl[5], (uint16_t *)(bar5 + NV_ADMACTL_X),
1986	    NV_HIRQ_EN | reg16);
1987
1988
1989	reg16 = nv_get16(nvc->nvc_bar_hdl[5],
1990	    (uint16_t *)(bar5 + NV_ADMACTL_Y));
1991	nv_put16(nvc->nvc_bar_hdl[5], (uint16_t *)(bar5 + NV_ADMACTL_Y),
1992	    NV_HIRQ_EN | reg16);
1993
1994	nvc->nvc_mcp04_int_status = (uint8_t *)(bar5 + MCP04_SATA_INT_STATUS);
1995
1996	/*
1997	 * clear any existing interrupt pending then enable
1998	 */
1999	for (j = 0; j < NV_MAX_PORTS(nvc); j++) {
2000		nvp = &(nvc->nvc_port[j]);
2001		mutex_enter(&nvp->nvp_mutex);
2002		(*(nvp->nvp_ctlp->nvc_set_intr))(nvp,
2003		    NV_INTR_CLEAR_ALL|NV_INTR_ENABLE);
2004		mutex_exit(&nvp->nvp_mutex);
2005	}
2006}
2007
2008
2009/*
2010 * Initialize the controller and set up driver data structures.
2011 * determine if ck804 or mcp55 class.
2012 */
2013static int
2014nv_init_ctl(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle)
2015{
2016	struct sata_hba_tran stran;
2017	nv_port_t *nvp;
2018	int j, ck804 = B_TRUE;
2019	uchar_t *cmd_addr, *ctl_addr, *bm_addr;
2020	ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5];
2021	uchar_t *bar5  = nvc->nvc_bar_addr[5];
2022	uint32_t reg32;
2023	uint8_t reg8, reg8_save;
2024
2025	NVLOG((NVDBG_INIT, nvc, NULL, "nv_init_ctl entered"));
2026
2027	/*
2028	 * Need to set bit 2 to 1 at config offset 0x50
2029	 * to enable access to the bar5 registers.
2030	 */
2031	reg32 = pci_config_get32(pci_conf_handle, NV_SATA_CFG_20);
2032	pci_config_put32(pci_conf_handle, NV_SATA_CFG_20,
2033	    reg32 | NV_BAR5_SPACE_EN);
2034
2035	/*
2036	 * Determine if this is ck804 or mcp55.  ck804 will map in the
2037	 * task file registers into bar5 while mcp55 won't.  The offset of
2038	 * the task file registers in mcp55's space is unused, so it will
2039	 * return zero.  So check one of the task file registers to see if it is
2040	 * writable and reads back what was written.  If it's mcp55 it will
2041	 * return back 0xff whereas ck804 will return the value written.
2042	 */
2043	reg8_save = nv_get8(bar5_hdl,
2044	    (uint8_t *)(bar5 + NV_BAR5_TRAN_LEN_CH_X));
2045
2046
2047	for (j = 1; j < 3; j++) {
2048
2049		nv_put8(bar5_hdl, (uint8_t *)(bar5 + NV_BAR5_TRAN_LEN_CH_X), j);
2050		reg8 = nv_get8(bar5_hdl,
2051		    (uint8_t *)(bar5 + NV_BAR5_TRAN_LEN_CH_X));
2052
2053		if (reg8 != j) {
2054			ck804 = B_FALSE;
2055			break;
2056		}
2057	}
2058
2059	nv_put8(bar5_hdl, (uint8_t *)(bar5 + NV_BAR5_TRAN_LEN_CH_X), reg8_save);
2060
2061	if (ck804 == B_TRUE) {
2062		NVLOG((NVDBG_INIT, nvc, NULL, "controller is CK804"));
2063		nvc->nvc_interrupt = mcp04_intr;
2064		nvc->nvc_reg_init = mcp04_reg_init;
2065		nvc->nvc_set_intr = mcp04_set_intr;
2066	} else {
2067		NVLOG((NVDBG_INIT, nvc, NULL, "controller is MCP55"));
2068		nvc->nvc_interrupt = mcp55_intr;
2069		nvc->nvc_reg_init = mcp55_reg_init;
2070		nvc->nvc_set_intr = mcp55_set_intr;
2071	}
2072
2073
2074	stran.sata_tran_hba_rev = SATA_TRAN_HBA_REV;
2075	stran.sata_tran_hba_dip = nvc->nvc_dip;
2076	stran.sata_tran_hba_dma_attr = &buffer_dma_attr;
2077	stran.sata_tran_hba_num_cports = NV_NUM_CPORTS;
2078	stran.sata_tran_hba_features_support =
2079	    SATA_CTLF_HOTPLUG | SATA_CTLF_ASN;
2080	stran.sata_tran_hba_qdepth = NV_QUEUE_SLOTS;
2081	stran.sata_tran_probe_port = nv_sata_probe;
2082	stran.sata_tran_start = nv_sata_start;
2083	stran.sata_tran_abort = nv_sata_abort;
2084	stran.sata_tran_reset_dport = nv_sata_reset;
2085	stran.sata_tran_selftest = NULL;
2086	stran.sata_tran_hotplug_ops = &nv_hotplug_ops;
2087	stran.sata_tran_pwrmgt_ops = NULL;
2088	stran.sata_tran_ioctl = NULL;
2089	nvc->nvc_sata_hba_tran = stran;
2090
2091	nvc->nvc_port = kmem_zalloc(sizeof (nv_port_t) * NV_MAX_PORTS(nvc),
2092	    KM_SLEEP);
2093
2094	/*
2095	 * initialize registers common to all chipsets
2096	 */
2097	nv_common_reg_init(nvc);
2098
2099	for (j = 0; j < NV_MAX_PORTS(nvc); j++) {
2100		nvp = &(nvc->nvc_port[j]);
2101
2102		cmd_addr = nvp->nvp_cmd_addr;
2103		ctl_addr = nvp->nvp_ctl_addr;
2104		bm_addr = nvp->nvp_bm_addr;
2105
2106		mutex_init(&nvp->nvp_mutex, NULL, MUTEX_DRIVER,
2107		    DDI_INTR_PRI(nvc->nvc_intr_pri));
2108
2109		cv_init(&nvp->nvp_poll_cv, NULL, CV_DRIVER, NULL);
2110
2111		nvp->nvp_data	= cmd_addr + NV_DATA;
2112		nvp->nvp_error	= cmd_addr + NV_ERROR;
2113		nvp->nvp_feature = cmd_addr + NV_FEATURE;
2114		nvp->nvp_count	= cmd_addr + NV_COUNT;
2115		nvp->nvp_sect	= cmd_addr + NV_SECT;
2116		nvp->nvp_lcyl	= cmd_addr + NV_LCYL;
2117		nvp->nvp_hcyl	= cmd_addr + NV_HCYL;
2118		nvp->nvp_drvhd	= cmd_addr + NV_DRVHD;
2119		nvp->nvp_status	= cmd_addr + NV_STATUS;
2120		nvp->nvp_cmd	= cmd_addr + NV_CMD;
2121		nvp->nvp_altstatus = ctl_addr + NV_ALTSTATUS;
2122		nvp->nvp_devctl	= ctl_addr + NV_DEVCTL;
2123
2124		nvp->nvp_bmicx	= bm_addr + BMICX_REG;
2125		nvp->nvp_bmisx	= bm_addr + BMISX_REG;
2126		nvp->nvp_bmidtpx = (uint32_t *)(bm_addr + BMIDTPX_REG);
2127
2128		nvp->nvp_state = 0;
2129	}
2130
2131	/*
2132	 * initialize register by calling chip specific reg initialization
2133	 */
2134	(*(nvc->nvc_reg_init))(nvc, pci_conf_handle);
2135
2136	return (NV_SUCCESS);
2137}
2138
2139
2140/*
2141 * Initialize data structures with enough slots to handle queuing, if
2142 * enabled.  NV_QUEUE_SLOTS will be set to 1 or 32, depending on whether
2143 * NCQ support is built into the driver and enabled.  It might have been
2144 * better to derive the true size from the drive itself, but the sata
2145 * module only sends down that information on the first NCQ command,
2146 * which means possibly re-sizing the structures on an interrupt stack,
2147 * making error handling more messy.  The easy way is to just allocate
2148 * all 32 slots, which is what most drives support anyway.
2149 */
2150static int
2151nv_init_port(nv_port_t *nvp)
2152{
2153	nv_ctl_t *nvc = nvp->nvp_ctlp;
2154	size_t	prd_size = sizeof (prde_t) * NV_DMA_NSEGS;
2155	dev_info_t *dip = nvc->nvc_dip;
2156	ddi_device_acc_attr_t dev_attr;
2157	size_t buf_size;
2158	ddi_dma_cookie_t cookie;
2159	uint_t count;
2160	int rc, i;
2161
2162	dev_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
2163	dev_attr.devacc_attr_endian_flags = DDI_NEVERSWAP_ACC;
2164	dev_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
2165
2166	if (nvp->nvp_state & NV_PORT_INIT) {
2167		NVLOG((NVDBG_INIT, nvc, nvp,
2168		    "nv_init_port previously initialized"));
2169
2170		return (NV_SUCCESS);
2171	} else {
2172		NVLOG((NVDBG_INIT, nvc, nvp, "nv_init_port initializing"));
2173	}
2174
2175	nvp->nvp_sg_dma_hdl = kmem_zalloc(sizeof (ddi_dma_handle_t) *
2176	    NV_QUEUE_SLOTS, KM_SLEEP);
2177
2178	nvp->nvp_sg_acc_hdl = kmem_zalloc(sizeof (ddi_acc_handle_t) *
2179	    NV_QUEUE_SLOTS, KM_SLEEP);
2180
2181	nvp->nvp_sg_addr = kmem_zalloc(sizeof (caddr_t) *
2182	    NV_QUEUE_SLOTS, KM_SLEEP);
2183
2184	nvp->nvp_sg_paddr = kmem_zalloc(sizeof (uint32_t) *
2185	    NV_QUEUE_SLOTS, KM_SLEEP);
2186
2187	nvp->nvp_slot = kmem_zalloc(sizeof (nv_slot_t) * NV_QUEUE_SLOTS,
2188	    KM_SLEEP);
2189
2190	for (i = 0; i < NV_QUEUE_SLOTS; i++) {
2191
2192		rc = ddi_dma_alloc_handle(dip, &nv_prd_dma_attr,
2193		    DDI_DMA_SLEEP, NULL, &(nvp->nvp_sg_dma_hdl[i]));
2194
2195		if (rc != DDI_SUCCESS) {
2196			nv_uninit_port(nvp);
2197
2198			return (NV_FAILURE);
2199		}
2200
2201		rc = ddi_dma_mem_alloc(nvp->nvp_sg_dma_hdl[i], prd_size,
2202		    &dev_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
2203		    NULL, &(nvp->nvp_sg_addr[i]), &buf_size,
2204		    &(nvp->nvp_sg_acc_hdl[i]));
2205
2206		if (rc != DDI_SUCCESS) {
2207			nv_uninit_port(nvp);
2208
2209			return (NV_FAILURE);
2210		}
2211
2212		rc = ddi_dma_addr_bind_handle(nvp->nvp_sg_dma_hdl[i], NULL,
2213		    nvp->nvp_sg_addr[i], buf_size,
2214		    DDI_DMA_WRITE | DDI_DMA_CONSISTENT,
2215		    DDI_DMA_SLEEP, NULL, &cookie, &count);
2216
2217		if (rc != DDI_DMA_MAPPED) {
2218			nv_uninit_port(nvp);
2219
2220			return (NV_FAILURE);
2221		}
2222
2223		ASSERT(count == 1);
2224		ASSERT((cookie.dmac_address & (sizeof (int) - 1)) == 0);
2225
2226		ASSERT(cookie.dmac_laddress <= UINT32_MAX);
2227
2228		nvp->nvp_sg_paddr[i] = cookie.dmac_address;
2229	}
2230
2231	/*
2232	 * nvp_queue_depth represents the actual drive queue depth, not the
2233	 * number of slots allocated in the structures (which may be more).
2234	 * Actual queue depth is only learned after the first NCQ command, so
2235	 * initialize it to 1 for now.
2236	 */
2237	nvp->nvp_queue_depth = 1;
2238
2239	nvp->nvp_state |= NV_PORT_INIT;
2240
2241	return (NV_SUCCESS);
2242}
2243
2244
2245/*
2246 * Free dynamically allocated structures for port.
2247 */
2248static void
2249nv_uninit_port(nv_port_t *nvp)
2250{
2251	int i;
2252
2253	/*
2254	 * It is possible to reach here before a port has been initialized or
2255	 * after it has already been uninitialized.  Just return in that case.
2256	 */
2257	if (nvp->nvp_slot == NULL) {
2258
2259		return;
2260	}
2261
2262	NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp,
2263	    "nv_uninit_port uninitializing"));
2264
2265	nvp->nvp_type = SATA_DTYPE_NONE;
2266
2267	for (i = 0; i < NV_QUEUE_SLOTS; i++) {
2268		if (nvp->nvp_sg_paddr[i]) {
2269			(void) ddi_dma_unbind_handle(nvp->nvp_sg_dma_hdl[i]);
2270		}
2271
2272		if (nvp->nvp_sg_acc_hdl[i] != NULL) {
2273			ddi_dma_mem_free(&(nvp->nvp_sg_acc_hdl[i]));
2274		}
2275
2276		if (nvp->nvp_sg_dma_hdl[i] != NULL) {
2277			ddi_dma_free_handle(&(nvp->nvp_sg_dma_hdl[i]));
2278		}
2279	}
2280
2281	kmem_free(nvp->nvp_slot, sizeof (nv_slot_t) * NV_QUEUE_SLOTS);
2282	nvp->nvp_slot = NULL;
2283
2284	kmem_free(nvp->nvp_sg_dma_hdl,
2285	    sizeof (ddi_dma_handle_t) * NV_QUEUE_SLOTS);
2286	nvp->nvp_sg_dma_hdl = NULL;
2287
2288	kmem_free(nvp->nvp_sg_acc_hdl,
2289	    sizeof (ddi_acc_handle_t) * NV_QUEUE_SLOTS);
2290	nvp->nvp_sg_acc_hdl = NULL;
2291
2292	kmem_free(nvp->nvp_sg_addr, sizeof (caddr_t) * NV_QUEUE_SLOTS);
2293	nvp->nvp_sg_addr = NULL;
2294
2295	kmem_free(nvp->nvp_sg_paddr, sizeof (uint32_t) * NV_QUEUE_SLOTS);
2296	nvp->nvp_sg_paddr = NULL;
2297
2298	nvp->nvp_state &= ~NV_PORT_INIT;
2299	nvp->nvp_signature = 0;
2300}
2301
2302
2303/*
2304 * Cache register offsets and access handles to frequently accessed registers
2305 * which are common to either chipset.
2306 */
2307static void
2308nv_common_reg_init(nv_ctl_t *nvc)
2309{
2310	uchar_t *bar5_addr = nvc->nvc_bar_addr[5];
2311	uchar_t *bm_addr_offset, *sreg_offset;
2312	uint8_t bar, port;
2313	nv_port_t *nvp;
2314
2315	for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
2316		if (port == 0) {
2317			bar = NV_BAR_0;
2318			bm_addr_offset = 0;
2319			sreg_offset = (uchar_t *)(CH0_SREG_OFFSET + bar5_addr);
2320		} else {
2321			bar = NV_BAR_2;
2322			bm_addr_offset = (uchar_t *)8;
2323			sreg_offset = (uchar_t *)(CH1_SREG_OFFSET + bar5_addr);
2324		}
2325
2326		nvp = &(nvc->nvc_port[port]);
2327		nvp->nvp_ctlp = nvc;
2328		nvp->nvp_port_num = port;
2329		NVLOG((NVDBG_INIT, nvc, nvp, "setting up port mappings"));
2330
2331		nvp->nvp_cmd_hdl = nvc->nvc_bar_hdl[bar];
2332		nvp->nvp_cmd_addr = nvc->nvc_bar_addr[bar];
2333		nvp->nvp_ctl_hdl = nvc->nvc_bar_hdl[bar + 1];
2334		nvp->nvp_ctl_addr = nvc->nvc_bar_addr[bar + 1];
2335		nvp->nvp_bm_hdl = nvc->nvc_bar_hdl[NV_BAR_4];
2336		nvp->nvp_bm_addr = nvc->nvc_bar_addr[NV_BAR_4] +
2337		    (long)bm_addr_offset;
2338
2339		nvp->nvp_sstatus = (uint32_t *)(sreg_offset + NV_SSTATUS);
2340		nvp->nvp_serror = (uint32_t *)(sreg_offset + NV_SERROR);
2341		nvp->nvp_sactive = (uint32_t *)(sreg_offset + NV_SACTIVE);
2342		nvp->nvp_sctrl = (uint32_t *)(sreg_offset + NV_SCTRL);
2343	}
2344}
2345
2346
2347static void
2348nv_uninit_ctl(nv_ctl_t *nvc)
2349{
2350	int port;
2351	nv_port_t *nvp;
2352
2353	NVLOG((NVDBG_INIT, nvc, NULL, "nv_uninit_ctl entered"));
2354
2355	for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
2356		nvp = &(nvc->nvc_port[port]);
2357		mutex_enter(&nvp->nvp_mutex);
2358		NVLOG((NVDBG_INIT, nvc, nvp, "uninitializing port"));
2359		nv_uninit_port(nvp);
2360		mutex_exit(&nvp->nvp_mutex);
2361		mutex_destroy(&nvp->nvp_mutex);
2362		cv_destroy(&nvp->nvp_poll_cv);
2363	}
2364
2365	kmem_free(nvc->nvc_port, NV_MAX_PORTS(nvc) * sizeof (nv_port_t));
2366	nvc->nvc_port = NULL;
2367}
2368
2369
2370/*
2371 * mcp04 interrupt.  This is a wrapper around mcp04_intr_process so
2372 * that interrupts from other devices can be disregarded while dtracing.
2373 */
2374/* ARGSUSED */
2375static uint_t
2376mcp04_intr(caddr_t arg1, caddr_t arg2)
2377{
2378	nv_ctl_t *nvc = (nv_ctl_t *)arg1;
2379	uint8_t intr_status;
2380	ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5];
2381
2382	intr_status = ddi_get8(bar5_hdl, nvc->nvc_mcp04_int_status);
2383
2384	if (intr_status == 0) {
2385
2386		return (DDI_INTR_UNCLAIMED);
2387	}
2388
2389	mcp04_intr_process(nvc, intr_status);
2390
2391	return (DDI_INTR_CLAIMED);
2392}
2393
2394
2395/*
2396 * Main interrupt handler for ck804.  handles normal device
2397 * interrupts as well as port hot plug and remove interrupts.
2398 *
2399 */
2400static void
2401mcp04_intr_process(nv_ctl_t *nvc, uint8_t intr_status)
2402{
2403
2404	int port, i;
2405	nv_port_t *nvp;
2406	nv_slot_t *nv_slotp;
2407	uchar_t	status;
2408	sata_pkt_t *spkt;
2409	uint8_t bmstatus, clear_bits;
2410	ddi_acc_handle_t bmhdl;
2411	int nvcleared = 0;
2412	ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5];
2413	uint32_t sstatus;
2414	int port_mask_hot[] = {
2415		MCP04_INT_PDEV_HOT, MCP04_INT_SDEV_HOT,
2416	};
2417	int port_mask_pm[] = {
2418		MCP04_INT_PDEV_PM, MCP04_INT_SDEV_PM,
2419	};
2420
2421	NVLOG((NVDBG_INTR, nvc, NULL,
2422	    "mcp04_intr_process entered intr_status=%x", intr_status));
2423
2424	/*
2425	 * For command completion interrupt, explicit clear is not required.
2426	 * however, for the error cases explicit clear is performed.
2427	 */
2428	for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
2429
2430		int port_mask[] = {MCP04_INT_PDEV_INT, MCP04_INT_SDEV_INT};
2431
2432		if ((port_mask[port] & intr_status) == 0) {
2433			continue;
2434		}
2435
2436		NVLOG((NVDBG_INTR, nvc, NULL,
2437		    "mcp04_intr_process interrupt on port %d", port));
2438
2439		nvp = &(nvc->nvc_port[port]);
2440
2441		mutex_enter(&nvp->nvp_mutex);
2442
2443		/*
2444		 * there was a corner case found where an interrupt
2445		 * arrived before nvp_slot was set.  Should
2446		 * probably should track down why that happens and try
2447		 * to eliminate that source and then get rid of this
2448		 * check.
2449		 */
2450		if (nvp->nvp_slot == NULL) {
2451			status = nv_get8(nvp->nvp_ctl_hdl, nvp->nvp_status);
2452			NVLOG((NVDBG_ALWAYS, nvc, nvp, "spurious interrupt "
2453			    "received before initialization "
2454			    "completed status=%x", status));
2455			mutex_exit(&nvp->nvp_mutex);
2456
2457			/*
2458			 * clear interrupt bits
2459			 */
2460			nv_put8(bar5_hdl, nvc->nvc_mcp04_int_status,
2461			    port_mask[port]);
2462
2463			continue;
2464		}
2465
2466		if ((&(nvp->nvp_slot[0]))->nvslot_spkt == NULL)  {
2467			status = nv_get8(nvp->nvp_ctl_hdl, nvp->nvp_status);
2468			NVLOG((NVDBG_ALWAYS, nvc, nvp, "spurious interrupt "
2469			    " no command in progress status=%x", status));
2470			mutex_exit(&nvp->nvp_mutex);
2471
2472			/*
2473			 * clear interrupt bits
2474			 */
2475			nv_put8(bar5_hdl, nvc->nvc_mcp04_int_status,
2476			    port_mask[port]);
2477
2478			continue;
2479		}
2480
2481		bmhdl = nvp->nvp_bm_hdl;
2482		bmstatus = nv_get8(bmhdl, nvp->nvp_bmisx);
2483
2484		if (!(bmstatus & BMISX_IDEINTS)) {
2485			mutex_exit(&nvp->nvp_mutex);
2486
2487			continue;
2488		}
2489
2490		status = nv_get8(nvp->nvp_ctl_hdl, nvp->nvp_altstatus);
2491
2492		if (status & SATA_STATUS_BSY) {
2493			mutex_exit(&nvp->nvp_mutex);
2494
2495			continue;
2496		}
2497
2498		nv_slotp = &(nvp->nvp_slot[0]);
2499
2500		ASSERT(nv_slotp);
2501
2502		spkt = nv_slotp->nvslot_spkt;
2503
2504		if (spkt == NULL) {
2505			mutex_exit(&nvp->nvp_mutex);
2506
2507			continue;
2508		}
2509
2510		(*nv_slotp->nvslot_intr)(nvp, nv_slotp);
2511
2512		nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
2513
2514		/*
2515		 * If there is no link cannot be certain about the completion
2516		 * of the packet, so abort it.
2517		 */
2518		if (nv_check_link((&spkt->satapkt_device)->
2519		    satadev_scr.sstatus) == B_FALSE) {
2520
2521			(void) nv_abort_active(nvp, NULL, SATA_PKT_PORT_ERROR);
2522
2523		} else if (nv_slotp->nvslot_flags == NVSLOT_COMPLETE) {
2524
2525			nv_complete_io(nvp, spkt, 0);
2526		}
2527
2528		mutex_exit(&nvp->nvp_mutex);
2529	}
2530
2531	/*
2532	 * mcp04 often doesn't correctly distinguish hot add/remove
2533	 * interrupts.  Frequently both the ADD and the REMOVE bits
2534	 * are asserted, whether it was a remove or add.  Use sstatus
2535	 * to distinguish hot add from hot remove.
2536	 */
2537
2538	for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
2539		clear_bits = 0;
2540
2541		nvp = &(nvc->nvc_port[port]);
2542		mutex_enter(&nvp->nvp_mutex);
2543
2544		if ((port_mask_pm[port] & intr_status) != 0) {
2545			clear_bits = port_mask_pm[port];
2546			NVLOG((NVDBG_HOT, nvc, nvp,
2547			    "clearing PM interrupt bit: %x",
2548			    intr_status & port_mask_pm[port]));
2549		}
2550
2551		if ((port_mask_hot[port] & intr_status) == 0) {
2552			if (clear_bits != 0) {
2553				goto clear;
2554			} else {
2555				mutex_exit(&nvp->nvp_mutex);
2556				continue;
2557			}
2558		}
2559
2560		/*
2561		 * reaching here means there was a hot add or remove.
2562		 */
2563		clear_bits |= port_mask_hot[port];
2564
2565		ASSERT(nvc->nvc_port[port].nvp_sstatus);
2566
2567		sstatus = nv_get32(bar5_hdl,
2568		    nvc->nvc_port[port].nvp_sstatus);
2569
2570		if ((sstatus & SSTATUS_DET_DEVPRE_PHYCOM) ==
2571		    SSTATUS_DET_DEVPRE_PHYCOM) {
2572			nv_report_add_remove(nvp, 0);
2573		} else {
2574			nv_report_add_remove(nvp, NV_PORT_HOTREMOVED);
2575		}
2576	clear:
2577		/*
2578		 * clear interrupt bits.  explicit interrupt clear is
2579		 * required for hotplug interrupts.
2580		 */
2581		nv_put8(bar5_hdl, nvc->nvc_mcp04_int_status, clear_bits);
2582
2583		/*
2584		 * make sure it's flushed and cleared.  If not try
2585		 * again.  Sometimes it has been observed to not clear
2586		 * on the first try.
2587		 */
2588		intr_status = nv_get8(bar5_hdl, nvc->nvc_mcp04_int_status);
2589
2590		/*
2591		 * make 10 additional attempts to clear the interrupt
2592		 */
2593		for (i = 0; (intr_status & clear_bits) && (i < 10); i++) {
2594			NVLOG((NVDBG_ALWAYS, nvc, nvp, "inst_status=%x "
2595			    "still not clear try=%d", intr_status,
2596			    ++nvcleared));
2597			nv_put8(bar5_hdl, nvc->nvc_mcp04_int_status,
2598			    clear_bits);
2599			intr_status = nv_get8(bar5_hdl,
2600			    nvc->nvc_mcp04_int_status);
2601		}
2602
2603		/*
2604		 * if still not clear, log a message and disable the
2605		 * port. highly unlikely that this path is taken, but it
2606		 * gives protection against a wedged interrupt.
2607		 */
2608		if (intr_status & clear_bits) {
2609			(*(nvc->nvc_set_intr))(nvp, NV_INTR_DISABLE);
2610			nv_port_state_change(nvp, SATA_EVNT_PORT_FAILED,
2611			    SATA_ADDR_CPORT, SATA_PSTATE_FAILED);
2612			nvp->nvp_state |= NV_PORT_FAILED;
2613			(void) nv_abort_active(nvp, NULL, SATA_PKT_DEV_ERROR);
2614			nv_cmn_err(CE_WARN, nvc, nvp, "unable to clear "
2615			    "interrupt.  disabling port intr_status=%X",
2616			    intr_status);
2617		}
2618
2619		mutex_exit(&nvp->nvp_mutex);
2620	}
2621}
2622
2623
2624/*
2625 * Interrupt handler for mcp55.  It is invoked by the wrapper for each port
2626 * on the controller, to handle completion and hot plug and remove events.
2627 *
2628 */
2629static uint_t
2630mcp55_intr_port(nv_port_t *nvp)
2631{
2632	nv_ctl_t *nvc = nvp->nvp_ctlp;
2633	ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5];
2634	uint8_t clear = 0, intr_cycles = 0;
2635	int ret = DDI_INTR_UNCLAIMED;
2636	uint16_t int_status;
2637
2638	NVLOG((NVDBG_INTR, nvc, nvp, "mcp55_intr_port entered"));
2639
2640	for (;;) {
2641		/*
2642		 * read current interrupt status
2643		 */
2644		int_status = nv_get16(bar5_hdl, nvp->nvp_mcp55_int_status);
2645
2646		NVLOG((NVDBG_INTR, nvc, nvp, "int_status = %x", int_status));
2647
2648		/*
2649		 * MCP55_INT_IGNORE interrupts will show up in the status,
2650		 * but are masked out from causing an interrupt to be generated
2651		 * to the processor.  Ignore them here by masking them out.
2652		 */
2653		int_status &= ~(MCP55_INT_IGNORE);
2654
2655		/*
2656		 * exit the loop when no more interrupts to process
2657		 */
2658		if (int_status == 0) {
2659
2660			break;
2661		}
2662
2663		if (int_status & MCP55_INT_COMPLETE) {
2664			NVLOG((NVDBG_INTR, nvc, nvp,
2665			    "mcp55_packet_complete_intr"));
2666			/*
2667			 * since int_status was set, return DDI_INTR_CLAIMED
2668			 * from the DDI's perspective even though the packet
2669			 * completion may not have succeeded.  If it fails,
2670			 * need to manually clear the interrupt, otherwise
2671			 * clearing is implicit.
2672			 */
2673			ret = DDI_INTR_CLAIMED;
2674			if (mcp55_packet_complete_intr(nvc, nvp) ==
2675			    NV_FAILURE) {
2676				clear = MCP55_INT_COMPLETE;
2677			} else {
2678				intr_cycles = 0;
2679			}
2680		}
2681
2682		if (int_status & MCP55_INT_DMA_SETUP) {
2683			NVLOG((NVDBG_INTR, nvc, nvp, "mcp55_dma_setup_intr"));
2684
2685			/*
2686			 * Needs to be cleared before starting the BM, so do it
2687			 * now.  make sure this is still working.
2688			 */
2689			nv_put16(bar5_hdl, nvp->nvp_mcp55_int_status,
2690			    MCP55_INT_DMA_SETUP);
2691#ifdef NCQ
2692			ret = mcp55_dma_setup_intr(nvc, nvp);
2693#endif
2694		}
2695
2696		if (int_status & MCP55_INT_REM) {
2697			NVLOG((NVDBG_INTR, nvc, nvp, "mcp55 device removed"));
2698			clear = MCP55_INT_REM;
2699			ret = DDI_INTR_CLAIMED;
2700
2701			mutex_enter(&nvp->nvp_mutex);
2702			nv_report_add_remove(nvp, NV_PORT_HOTREMOVED);
2703			mutex_exit(&nvp->nvp_mutex);
2704
2705		} else if (int_status & MCP55_INT_ADD) {
2706			NVLOG((NVDBG_HOT, nvc, nvp, "mcp55 device added"));
2707			clear = MCP55_INT_ADD;
2708			ret = DDI_INTR_CLAIMED;
2709
2710			mutex_enter(&nvp->nvp_mutex);
2711			nv_report_add_remove(nvp, 0);
2712			mutex_exit(&nvp->nvp_mutex);
2713		}
2714
2715		if (clear) {
2716			nv_put16(bar5_hdl, nvp->nvp_mcp55_int_status, clear);
2717			clear = 0;
2718		}
2719
2720		if (intr_cycles++ == NV_MAX_INTR_LOOP) {
2721			nv_cmn_err(CE_WARN, nvc, nvp, "excessive interrupt "
2722			    "processing.  Disabling port int_status=%X"
2723			    " clear=%X", int_status, clear);
2724			mutex_enter(&nvp->nvp_mutex);
2725			(*(nvc->nvc_set_intr))(nvp, NV_INTR_DISABLE);
2726			nv_port_state_change(nvp, SATA_EVNT_PORT_FAILED,
2727			    SATA_ADDR_CPORT, SATA_PSTATE_FAILED);
2728			nvp->nvp_state |= NV_PORT_FAILED;
2729			(void) nv_abort_active(nvp, NULL, SATA_PKT_DEV_ERROR);
2730			mutex_exit(&nvp->nvp_mutex);
2731		}
2732	}
2733
2734	NVLOG((NVDBG_INTR, nvc, nvp, "mcp55_intr_port: finished ret=%d", ret));
2735
2736	return (ret);
2737}
2738
2739
2740/* ARGSUSED */
2741static uint_t
2742mcp55_intr(caddr_t arg1, caddr_t arg2)
2743{
2744	nv_ctl_t *nvc = (nv_ctl_t *)arg1;
2745	int ret;
2746
2747	ret = mcp55_intr_port(&(nvc->nvc_port[0]));
2748	ret |= mcp55_intr_port(&(nvc->nvc_port[1]));
2749
2750	return (ret);
2751}
2752
2753
2754#ifdef NCQ
2755/*
2756 * with software driven NCQ on mcp55, an interrupt occurs right
2757 * before the drive is ready to do a DMA transfer.  At this point,
2758 * the PRD table needs to be programmed and the DMA engine enabled
2759 * and ready to go.
2760 *
2761 * -- MCP_SATA_AE_INT_STATUS_SDEV_DMA_SETUP indicates the interrupt
2762 * -- MCP_SATA_AE_NCQ_PDEV_DMA_SETUP_TAG shows which command is ready
2763 * -- clear bit 0 of master command reg
2764 * -- program PRD
2765 * -- clear the interrupt status bit for the DMA Setup FIS
2766 * -- set bit 0 of the bus master command register
2767 */
2768static int
2769mcp55_dma_setup_intr(nv_ctl_t *nvc, nv_port_t *nvp)
2770{
2771	int slot;
2772	ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl;
2773	uint8_t bmicx;
2774	int port = nvp->nvp_port_num;
2775	uint8_t tag_shift[] = {MCP_SATA_AE_NCQ_PDEV_DMA_SETUP_TAG_SHIFT,
2776	    MCP_SATA_AE_NCQ_SDEV_DMA_SETUP_TAG_SHIFT};
2777
2778	nv_cmn_err(CE_PANIC, nvc, nvp,
2779	    "this is should not be executed at all until NCQ");
2780
2781	mutex_enter(&nvp->nvp_mutex);
2782
2783	slot = nv_get32(nvc->nvc_bar_hdl[5], nvc->nvc_mcp55_ncq);
2784
2785	slot = (slot >> tag_shift[port]) & MCP_SATA_AE_NCQ_DMA_SETUP_TAG_MASK;
2786
2787	NVLOG((NVDBG_INTR, nvc, nvp, "mcp55_dma_setup_intr slot %d"
2788	    " nvp_slot_sactive %X", slot, nvp->nvp_sactive_cache));
2789
2790	/*
2791	 * halt the DMA engine.  This step is necessary according to
2792	 * the mcp55 spec, probably since there may have been a "first" packet
2793	 * that already programmed the DMA engine, but may not turn out to
2794	 * be the first one processed.
2795	 */
2796	bmicx = nv_get8(bmhdl, nvp->nvp_bmicx);
2797
2798#if 0
2799	if (bmicx & BMICX_SSBM) {
2800		NVLOG((NVDBG_INTR, nvc, nvp, "BM was already enabled for "
2801		    "another packet.  Cancelling and reprogramming"));
2802		nv_put8(bmhdl, nvp->nvp_bmicx,  bmicx & ~BMICX_SSBM);
2803	}
2804#endif
2805	nv_put8(bmhdl, nvp->nvp_bmicx,  bmicx & ~BMICX_SSBM);
2806
2807	nv_start_dma_engine(nvp, slot);
2808
2809	mutex_exit(&nvp->nvp_mutex);
2810
2811	return (DDI_INTR_CLAIMED);
2812}
2813#endif /* NCQ */
2814
2815
2816/*
2817 * packet completion interrupt.  If the packet is complete, invoke
2818 * the packet completion callback.
2819 */
2820static int
2821mcp55_packet_complete_intr(nv_ctl_t *nvc, nv_port_t *nvp)
2822{
2823	uint8_t status, bmstatus;
2824	ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl;
2825	int sactive;
2826	int active_pkt_bit = 0, active_pkt = 0, ncq_command = B_FALSE;
2827	sata_pkt_t *spkt;
2828	nv_slot_t *nv_slotp;
2829
2830	mutex_enter(&nvp->nvp_mutex);
2831
2832	bmstatus = nv_get8(bmhdl, nvp->nvp_bmisx);
2833
2834	if (!(bmstatus & BMISX_IDEINTS)) {
2835		NVLOG((NVDBG_INTR, nvc, nvp, "BMISX_IDEINTS not set"));
2836		mutex_exit(&nvp->nvp_mutex);
2837
2838		return (NV_FAILURE);
2839	}
2840
2841	/*
2842	 * If the just completed item is a non-ncq command, the busy
2843	 * bit should not be set
2844	 */
2845	if (nvp->nvp_non_ncq_run) {
2846		status = nv_get8(nvp->nvp_ctl_hdl, nvp->nvp_altstatus);
2847		if (status & SATA_STATUS_BSY) {
2848			nv_cmn_err(CE_WARN, nvc, nvp,
2849			    "unexpected SATA_STATUS_BSY set");
2850			mutex_exit(&nvp->nvp_mutex);
2851			/*
2852			 * calling function will clear interrupt.  then
2853			 * the real interrupt will either arrive or the
2854			 * packet timeout handling will take over and
2855			 * reset.
2856			 */
2857			return (NV_FAILURE);
2858		}
2859
2860	} else {
2861		/*
2862		 * NCQ check for BSY here and wait if still bsy before
2863		 * continuing. Rather than wait for it to be cleared
2864		 * when starting a packet and wasting CPU time, the starting
2865		 * thread can exit immediate, but might have to spin here
2866		 * for a bit possibly.  Needs more work and experimentation.
2867		 */
2868		ASSERT(nvp->nvp_ncq_run);
2869	}
2870
2871
2872	if (nvp->nvp_ncq_run) {
2873		ncq_command = B_TRUE;
2874		ASSERT(nvp->nvp_non_ncq_run == 0);
2875	} else {
2876		ASSERT(nvp->nvp_non_ncq_run != 0);
2877	}
2878
2879	/*
2880	 * active_pkt_bit will represent the bitmap of the single completed
2881	 * packet.  Because of the nature of sw assisted NCQ, only one
2882	 * command will complete per interrupt.
2883	 */
2884
2885	if (ncq_command == B_FALSE) {
2886		active_pkt = 0;
2887	} else {
2888		/*
2889		 * NCQ: determine which command just completed, by examining
2890		 * which bit cleared in the register since last written.
2891		 */
2892		sactive = nv_get32(nvc->nvc_bar_hdl[5], nvp->nvp_sactive);
2893
2894		active_pkt_bit = ~sactive & nvp->nvp_sactive_cache;
2895
2896		ASSERT(active_pkt_bit);
2897
2898
2899		/*
2900		 * this failure path needs more work to handle the
2901		 * error condition and recovery.
2902		 */
2903		if (active_pkt_bit == 0) {
2904			ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
2905
2906			nv_cmn_err(CE_CONT, nvc, nvp, "ERROR sactive = %X  "
2907			    "nvp->nvp_sactive %X", sactive,
2908			    nvp->nvp_sactive_cache);
2909
2910			(void) nv_get8(cmdhdl, nvp->nvp_status);
2911
2912			mutex_exit(&nvp->nvp_mutex);
2913
2914			return (NV_FAILURE);
2915		}
2916
2917		for (active_pkt = 0; (active_pkt_bit & 0x1) != 0x1;
2918		    active_pkt++, active_pkt_bit >>= 1) {
2919		}
2920
2921		/*
2922		 * make sure only one bit is ever turned on
2923		 */
2924		ASSERT(active_pkt_bit == 1);
2925
2926		nvp->nvp_sactive_cache &= ~(0x01 << active_pkt);
2927	}
2928
2929	nv_slotp = &(nvp->nvp_slot[active_pkt]);
2930
2931	spkt = nv_slotp->nvslot_spkt;
2932
2933	ASSERT(spkt != NULL);
2934
2935	(*nv_slotp->nvslot_intr)(nvp, nv_slotp);
2936
2937	nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
2938
2939	/*
2940	 * If there is no link cannot be certain about the completion
2941	 * of the packet, so abort it.
2942	 */
2943	if (nv_check_link((&spkt->satapkt_device)->
2944	    satadev_scr.sstatus) == B_FALSE) {
2945		(void) nv_abort_active(nvp, NULL, SATA_PKT_PORT_ERROR);
2946
2947	} else if (nv_slotp->nvslot_flags == NVSLOT_COMPLETE) {
2948
2949		nv_complete_io(nvp, spkt, active_pkt);
2950	}
2951
2952	mutex_exit(&nvp->nvp_mutex);
2953
2954	return (NV_SUCCESS);
2955}
2956
2957
2958static void
2959nv_complete_io(nv_port_t *nvp, sata_pkt_t *spkt, int slot)
2960{
2961
2962	ASSERT(MUTEX_HELD(&nvp->nvp_mutex));
2963
2964	if ((&(nvp->nvp_slot[slot]))->nvslot_flags & NVSLOT_NCQ) {
2965		nvp->nvp_ncq_run--;
2966	} else {
2967		nvp->nvp_non_ncq_run--;
2968	}
2969
2970	/*
2971	 * mark the packet slot idle so it can be reused.  Do this before
2972	 * calling satapkt_comp so the slot can be reused.
2973	 */
2974	(&(nvp->nvp_slot[slot]))->nvslot_spkt = NULL;
2975
2976	if (spkt->satapkt_op_mode & SATA_OPMODE_SYNCH) {
2977		/*
2978		 * If this is not timed polled mode cmd, which has an
2979		 * active thread monitoring for completion, then need
2980		 * to signal the sleeping thread that the cmd is complete.
2981		 */
2982		if ((spkt->satapkt_op_mode & SATA_OPMODE_POLLING) == 0) {
2983			cv_signal(&nvp->nvp_poll_cv);
2984		}
2985
2986		return;
2987	}
2988
2989	if (spkt->satapkt_comp != NULL) {
2990		mutex_exit(&nvp->nvp_mutex);
2991		(*spkt->satapkt_comp)(spkt);
2992		mutex_enter(&nvp->nvp_mutex);
2993	}
2994}
2995
2996
2997/*
2998 * check whether packet is ncq command or not.  for ncq command,
2999 * start it if there is still room on queue.  for non-ncq command only
3000 * start if no other command is running.
3001 */
3002static int
3003nv_start_async(nv_port_t *nvp, sata_pkt_t *spkt)
3004{
3005	uint8_t cmd, ncq;
3006
3007	NVLOG((NVDBG_ENTRY, nvp->nvp_ctlp, nvp, "nv_start_async: entry"));
3008
3009	cmd = spkt->satapkt_cmd.satacmd_cmd_reg;
3010
3011	ncq = ((cmd == SATAC_WRITE_FPDMA_QUEUED) ||
3012	    (cmd == SATAC_READ_FPDMA_QUEUED));
3013
3014	if (ncq == B_FALSE) {
3015
3016		if ((nvp->nvp_non_ncq_run == 1) ||
3017		    (nvp->nvp_ncq_run > 0)) {
3018			/*
3019			 * next command is non-ncq which can't run
3020			 * concurrently.  exit and return queue full.
3021			 */
3022			spkt->satapkt_reason = SATA_PKT_QUEUE_FULL;
3023
3024			return (SATA_TRAN_QUEUE_FULL);
3025		}
3026
3027		return (nv_start_common(nvp, spkt));
3028	}
3029
3030	/*
3031	 * ncq == B_TRUE
3032	 */
3033	if (nvp->nvp_non_ncq_run == 1) {
3034		/*
3035		 * cannot start any NCQ commands when there
3036		 * is a non-NCQ command running.
3037		 */
3038		spkt->satapkt_reason = SATA_PKT_QUEUE_FULL;
3039
3040		return (SATA_TRAN_QUEUE_FULL);
3041	}
3042
3043#ifdef NCQ
3044	/*
3045	 * this is not compiled for now as satapkt_device.satadev_qdepth
3046	 * is being pulled out until NCQ support is later addressed
3047	 *
3048	 * nvp_queue_depth is initialized by the first NCQ command
3049	 * received.
3050	 */
3051	if (nvp->nvp_queue_depth == 1) {
3052		nvp->nvp_queue_depth =
3053		    spkt->satapkt_device.satadev_qdepth;
3054
3055		ASSERT(nvp->nvp_queue_depth > 1);
3056
3057		NVLOG((NVDBG_ENTRY, nvp->nvp_ctlp, nvp,
3058		    "nv_process_queue: nvp_queue_depth set to %d",
3059		    nvp->nvp_queue_depth));
3060	}
3061#endif
3062
3063	if (nvp->nvp_ncq_run >= nvp->nvp_queue_depth) {
3064		/*
3065		 * max number of NCQ commands already active
3066		 */
3067		spkt->satapkt_reason = SATA_PKT_QUEUE_FULL;
3068
3069		return (SATA_TRAN_QUEUE_FULL);
3070	}
3071
3072	return (nv_start_common(nvp, spkt));
3073}
3074
3075
3076/*
3077 * configure INTx and legacy interrupts
3078 */
3079static int
3080nv_add_legacy_intrs(nv_ctl_t *nvc)
3081{
3082	dev_info_t	*devinfo = nvc->nvc_dip;
3083	int		actual, count = 0;
3084	int		x, y, rc, inum = 0;
3085
3086	NVLOG((NVDBG_ENTRY, nvc, NULL, "nv_add_legacy_intrs"));
3087
3088	/*
3089	 * get number of interrupts
3090	 */
3091	rc = ddi_intr_get_nintrs(devinfo, DDI_INTR_TYPE_FIXED, &count);
3092	if ((rc != DDI_SUCCESS) || (count == 0)) {
3093		NVLOG((NVDBG_INTR, nvc, NULL,
3094		    "ddi_intr_get_nintrs() failed, "
3095		    "rc %d count %d", rc, count));
3096
3097		return (DDI_FAILURE);
3098	}
3099
3100	/*
3101	 * allocate an array of interrupt handles
3102	 */
3103	nvc->nvc_intr_size = count * sizeof (ddi_intr_handle_t);
3104	nvc->nvc_htable = kmem_zalloc(nvc->nvc_intr_size, KM_SLEEP);
3105
3106	/*
3107	 * call ddi_intr_alloc()
3108	 */
3109	rc = ddi_intr_alloc(devinfo, nvc->nvc_htable, DDI_INTR_TYPE_FIXED,
3110	    inum, count, &actual, DDI_INTR_ALLOC_STRICT);
3111
3112	if ((rc != DDI_SUCCESS) || (actual == 0)) {
3113		nv_cmn_err(CE_WARN, nvc, NULL,
3114		    "ddi_intr_alloc() failed, rc %d", rc);
3115		kmem_free(nvc->nvc_htable, nvc->nvc_intr_size);
3116
3117		return (DDI_FAILURE);
3118	}
3119
3120	if (actual < count) {
3121		nv_cmn_err(CE_WARN, nvc, NULL,
3122		    "ddi_intr_alloc: requested: %d, received: %d",
3123		    count, actual);
3124
3125		goto failure;
3126	}
3127
3128	nvc->nvc_intr_cnt = actual;
3129
3130	/*
3131	 * get intr priority
3132	 */
3133	if (ddi_intr_get_pri(nvc->nvc_htable[0], &nvc->nvc_intr_pri) !=
3134	    DDI_SUCCESS) {
3135		nv_cmn_err(CE_WARN, nvc, NULL, "ddi_intr_get_pri() failed");
3136
3137		goto failure;
3138	}
3139
3140	/*
3141	 * Test for high level mutex
3142	 */
3143	if (nvc->nvc_intr_pri >= ddi_intr_get_hilevel_pri()) {
3144		nv_cmn_err(CE_WARN, nvc, NULL,
3145		    "nv_add_legacy_intrs: high level intr not supported");
3146
3147		goto failure;
3148	}
3149
3150	for (x = 0; x < actual; x++) {
3151		if (ddi_intr_add_handler(nvc->nvc_htable[x],
3152		    nvc->nvc_interrupt, (caddr_t)nvc, NULL) != DDI_SUCCESS) {
3153			nv_cmn_err(CE_WARN, nvc, NULL,
3154			    "ddi_intr_add_handler() failed");
3155
3156			goto failure;
3157		}
3158	}
3159
3160	/*
3161	 * call ddi_intr_enable() for legacy interrupts
3162	 */
3163	for (x = 0; x < nvc->nvc_intr_cnt; x++) {
3164		(void) ddi_intr_enable(nvc->nvc_htable[x]);
3165	}
3166
3167	return (DDI_SUCCESS);
3168
3169	failure:
3170	/*
3171	 * free allocated intr and nvc_htable
3172	 */
3173	for (y = 0; y < actual; y++) {
3174		(void) ddi_intr_free(nvc->nvc_htable[y]);
3175	}
3176
3177	kmem_free(nvc->nvc_htable, nvc->nvc_intr_size);
3178
3179	return (DDI_FAILURE);
3180}
3181
3182#ifdef	NV_MSI_SUPPORTED
3183/*
3184 * configure MSI interrupts
3185 */
3186static int
3187nv_add_msi_intrs(nv_ctl_t *nvc)
3188{
3189	dev_info_t	*devinfo = nvc->nvc_dip;
3190	int		count, avail, actual;
3191	int		x, y, rc, inum = 0;
3192
3193	NVLOG((NVDBG_ENTRY, nvc, NULL, "nv_add_msi_intrs"));
3194
3195	/*
3196	 * get number of interrupts
3197	 */
3198	rc = ddi_intr_get_nintrs(devinfo, DDI_INTR_TYPE_MSI, &count);
3199	if ((rc != DDI_SUCCESS) || (count == 0)) {
3200		nv_cmn_err(CE_WARN, nvc, NULL,
3201		    "ddi_intr_get_nintrs() failed, "
3202		    "rc %d count %d", rc, count);
3203
3204		return (DDI_FAILURE);
3205	}
3206
3207	/*
3208	 * get number of available interrupts
3209	 */
3210	rc = ddi_intr_get_navail(devinfo, DDI_INTR_TYPE_MSI, &avail);
3211	if ((rc != DDI_SUCCESS) || (avail == 0)) {
3212		nv_cmn_err(CE_WARN, nvc, NULL,
3213		    "ddi_intr_get_navail() failed, "
3214		    "rc %d avail %d", rc, avail);
3215
3216		return (DDI_FAILURE);
3217	}
3218
3219	if (avail < count) {
3220		nv_cmn_err(CE_WARN, nvc, NULL,
3221		    "ddi_intr_get_nvail returned %d ddi_intr_get_nintrs: %d",
3222		    avail, count);
3223	}
3224
3225	/*
3226	 * allocate an array of interrupt handles
3227	 */
3228	nvc->nvc_intr_size = count * sizeof (ddi_intr_handle_t);
3229	nvc->nvc_htable = kmem_alloc(nvc->nvc_intr_size, KM_SLEEP);
3230
3231	rc = ddi_intr_alloc(devinfo, nvc->nvc_htable, DDI_INTR_TYPE_MSI,
3232	    inum, count, &actual, DDI_INTR_ALLOC_NORMAL);
3233
3234	if ((rc != DDI_SUCCESS) || (actual == 0)) {
3235		nv_cmn_err(CE_WARN, nvc, NULL,
3236		    "ddi_intr_alloc() failed, rc %d", rc);
3237		kmem_free(nvc->nvc_htable, nvc->nvc_intr_size);
3238
3239		return (DDI_FAILURE);
3240	}
3241
3242	/*
3243	 * Use interrupt count returned or abort?
3244	 */
3245	if (actual < count) {
3246		NVLOG((NVDBG_INIT, nvc, NULL,
3247		    "Requested: %d, Received: %d", count, actual));
3248	}
3249
3250	nvc->nvc_intr_cnt = actual;
3251
3252	/*
3253	 * get priority for first msi, assume remaining are all the same
3254	 */
3255	if (ddi_intr_get_pri(nvc->nvc_htable[0], &nvc->nvc_intr_pri) !=
3256	    DDI_SUCCESS) {
3257		nv_cmn_err(CE_WARN, nvc, NULL, "ddi_intr_get_pri() failed");
3258
3259		goto failure;
3260	}
3261
3262	/*
3263	 * test for high level mutex
3264	 */
3265	if (nvc->nvc_intr_pri >= ddi_intr_get_hilevel_pri()) {
3266		nv_cmn_err(CE_WARN, nvc, NULL,
3267		    "nv_add_msi_intrs: high level intr not supported");
3268
3269		goto failure;
3270	}
3271
3272	/*
3273	 * Call ddi_intr_add_handler()
3274	 */
3275	for (x = 0; x < actual; x++) {
3276		if (ddi_intr_add_handler(nvc->nvc_htable[x],
3277		    nvc->nvc_interrupt, (caddr_t)nvc, NULL) != DDI_SUCCESS) {
3278			nv_cmn_err(CE_WARN, nvc, NULL,
3279			    "ddi_intr_add_handler() failed");
3280
3281			goto failure;
3282		}
3283	}
3284
3285	(void) ddi_intr_get_cap(nvc->nvc_htable[0], &nvc->nvc_intr_cap);
3286
3287	if (nvc->nvc_intr_cap & DDI_INTR_FLAG_BLOCK) {
3288		(void) ddi_intr_block_enable(nvc->nvc_htable,
3289		    nvc->nvc_intr_cnt);
3290	} else {
3291		/*
3292		 * Call ddi_intr_enable() for MSI non block enable
3293		 */
3294		for (x = 0; x < nvc->nvc_intr_cnt; x++) {
3295			(void) ddi_intr_enable(nvc->nvc_htable[x]);
3296		}
3297	}
3298
3299	return (DDI_SUCCESS);
3300
3301	failure:
3302	/*
3303	 * free allocated intr and nvc_htable
3304	 */
3305	for (y = 0; y < actual; y++) {
3306		(void) ddi_intr_free(nvc->nvc_htable[y]);
3307	}
3308
3309	kmem_free(nvc->nvc_htable, nvc->nvc_intr_size);
3310
3311	return (DDI_FAILURE);
3312}
3313#endif
3314
3315
3316static void
3317nv_rem_intrs(nv_ctl_t *nvc)
3318{
3319	int x, i;
3320	nv_port_t *nvp;
3321
3322	NVLOG((NVDBG_ENTRY, nvc, NULL, "nv_rem_intrs"));
3323
3324	/*
3325	 * prevent controller from generating interrupts by
3326	 * masking them out.  This is an extra precaution.
3327	 */
3328	for (i = 0; i < NV_MAX_PORTS(nvc); i++) {
3329		nvp = (&nvc->nvc_port[i]);
3330		mutex_enter(&nvp->nvp_mutex);
3331		(*(nvc->nvc_set_intr))(nvp, NV_INTR_DISABLE);
3332		mutex_exit(&nvp->nvp_mutex);
3333	}
3334
3335	/*
3336	 * disable all interrupts
3337	 */
3338	if ((nvc->nvc_intr_type == DDI_INTR_TYPE_MSI) &&
3339	    (nvc->nvc_intr_cap & DDI_INTR_FLAG_BLOCK)) {
3340		(void) ddi_intr_block_disable(nvc->nvc_htable,
3341		    nvc->nvc_intr_cnt);
3342	} else {
3343		for (x = 0; x < nvc->nvc_intr_cnt; x++) {
3344			(void) ddi_intr_disable(nvc->nvc_htable[x]);
3345		}
3346	}
3347
3348	for (x = 0; x < nvc->nvc_intr_cnt; x++) {
3349		(void) ddi_intr_remove_handler(nvc->nvc_htable[x]);
3350		(void) ddi_intr_free(nvc->nvc_htable[x]);
3351	}
3352
3353	kmem_free(nvc->nvc_htable, nvc->nvc_intr_size);
3354}
3355
3356
3357/*
3358 * variable argument wrapper for cmn_err.  prefixes the instance and port
3359 * number if possible
3360 */
3361static void
3362nv_vcmn_err(int ce, nv_ctl_t *nvc, nv_port_t *nvp, char *fmt, va_list ap)
3363{
3364	char port[NV_STRING_10];
3365	char inst[NV_STRING_10];
3366
3367	mutex_enter(&nv_log_mutex);
3368
3369	if (nvc) {
3370		(void) snprintf(inst, NV_STRING_10, "inst %d",
3371		    ddi_get_instance(nvc->nvc_dip));
3372	} else {
3373		inst[0] = '\0';
3374	}
3375
3376	if (nvp) {
3377		(void) sprintf(port, " port %d", nvp->nvp_port_num);
3378	} else {
3379		port[0] = '\0';
3380	}
3381
3382	(void) sprintf(nv_log_buf, "nv_sata %s%s%s", inst, port,
3383	    (inst[0]|port[0] ? ": " :""));
3384
3385	(void) vsnprintf(&nv_log_buf[strlen(nv_log_buf)],
3386	    NV_STRING_512 - strlen(nv_log_buf), fmt, ap);
3387
3388	/*
3389	 * normally set to log to console but in some debug situations it
3390	 * may be useful to log only to a file.
3391	 */
3392	if (nv_log_to_console) {
3393		if (nv_prom_print) {
3394			prom_printf("%s\n", nv_log_buf);
3395		} else {
3396			cmn_err(ce, "%s", nv_log_buf);
3397		}
3398
3399
3400	} else {
3401		cmn_err(ce, "!%s", nv_log_buf);
3402	}
3403
3404	mutex_exit(&nv_log_mutex);
3405}
3406
3407
3408/*
3409 * wrapper for cmn_err
3410 */
3411static void
3412nv_cmn_err(int ce, nv_ctl_t *nvc, nv_port_t *nvp, char *fmt, ...)
3413{
3414	va_list ap;
3415
3416	va_start(ap, fmt);
3417	nv_vcmn_err(ce, nvc, nvp, fmt, ap);
3418	va_end(ap);
3419}
3420
3421
3422#if defined(DEBUG)
3423/*
3424 * prefixes the instance and port number if possible to the debug message
3425 */
3426static void
3427nv_log(uint_t flag, nv_ctl_t *nvc, nv_port_t *nvp, char *fmt, ...)
3428{
3429	va_list ap;
3430
3431	if ((nv_debug_flags & flag) == 0) {
3432		return;
3433	}
3434
3435	va_start(ap, fmt);
3436	nv_vcmn_err(CE_NOTE, nvc, nvp, fmt, ap);
3437	va_end(ap);
3438
3439	/*
3440	 * useful for some debugging situations
3441	 */
3442	if (nv_log_delay) {
3443		drv_usecwait(nv_log_delay);
3444	}
3445
3446}
3447#endif /* DEBUG */
3448
3449
3450/*
3451 * program registers which are common to all commands
3452 */
3453static void
3454nv_program_taskfile_regs(nv_port_t *nvp, int slot)
3455{
3456	nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
3457	sata_pkt_t *spkt;
3458	sata_cmd_t *satacmd;
3459	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
3460	uint8_t cmd, ncq = B_FALSE;
3461
3462	spkt = nv_slotp->nvslot_spkt;
3463	satacmd = &spkt->satapkt_cmd;
3464	cmd = satacmd->satacmd_cmd_reg;
3465
3466	ASSERT(nvp->nvp_slot);
3467
3468	if ((cmd == SATAC_WRITE_FPDMA_QUEUED) ||
3469	    (cmd == SATAC_READ_FPDMA_QUEUED)) {
3470		ncq = B_TRUE;
3471	}
3472
3473	/*
3474	 * select the drive
3475	 */
3476	nv_put8(cmdhdl, nvp->nvp_drvhd, satacmd->satacmd_device_reg);
3477
3478	/*
3479	 * make certain the drive selected
3480	 */
3481	if (nv_wait(nvp, SATA_STATUS_DRDY, SATA_STATUS_BSY,
3482	    NV_SEC2USEC(5), 0) == B_FALSE) {
3483
3484		return;
3485	}
3486
3487	switch (spkt->satapkt_cmd.satacmd_addr_type) {
3488
3489	case ATA_ADDR_LBA:
3490		NVLOG((NVDBG_DELIVER, nvp->nvp_ctlp, nvp, "ATA_ADDR_LBA mode"));
3491
3492		nv_put8(cmdhdl, nvp->nvp_count, satacmd->satacmd_sec_count_lsb);
3493		nv_put8(cmdhdl, nvp->nvp_hcyl, satacmd->satacmd_lba_high_lsb);
3494		nv_put8(cmdhdl, nvp->nvp_lcyl, satacmd->satacmd_lba_mid_lsb);
3495		nv_put8(cmdhdl, nvp->nvp_sect, satacmd->satacmd_lba_low_lsb);
3496
3497		break;
3498
3499	case ATA_ADDR_LBA28:
3500		NVLOG((NVDBG_DELIVER, nvp->nvp_ctlp, nvp,
3501		    "ATA_ADDR_LBA28 mode"));
3502		/*
3503		 * NCQ only uses 48-bit addressing
3504		 */
3505		ASSERT(ncq != B_TRUE);
3506
3507		nv_put8(cmdhdl, nvp->nvp_count, satacmd->satacmd_sec_count_lsb);
3508		nv_put8(cmdhdl, nvp->nvp_hcyl, satacmd->satacmd_lba_high_lsb);
3509		nv_put8(cmdhdl, nvp->nvp_lcyl, satacmd->satacmd_lba_mid_lsb);
3510		nv_put8(cmdhdl, nvp->nvp_sect, satacmd->satacmd_lba_low_lsb);
3511
3512		break;
3513
3514	case ATA_ADDR_LBA48:
3515		NVLOG((NVDBG_DELIVER, nvp->nvp_ctlp, nvp,
3516		    "ATA_ADDR_LBA48 mode"));
3517
3518		/*
3519		 * for NCQ, tag goes into count register and real sector count
3520		 * into features register.  The sata module does the translation
3521		 * in the satacmd.
3522		 */
3523		if (ncq == B_TRUE) {
3524			nv_put8(cmdhdl, nvp->nvp_count, slot << 3);
3525			nv_put8(cmdhdl, nvp->nvp_feature,
3526			    satacmd->satacmd_features_reg_ext);
3527			nv_put8(cmdhdl, nvp->nvp_feature,
3528			    satacmd->satacmd_features_reg);
3529		} else {
3530			nv_put8(cmdhdl, nvp->nvp_count,
3531			    satacmd->satacmd_sec_count_msb);
3532			nv_put8(cmdhdl, nvp->nvp_count,
3533			    satacmd->satacmd_sec_count_lsb);
3534		}
3535
3536		/*
3537		 * send the high-order half first
3538		 */
3539		nv_put8(cmdhdl, nvp->nvp_hcyl, satacmd->satacmd_lba_high_msb);
3540		nv_put8(cmdhdl, nvp->nvp_lcyl, satacmd->satacmd_lba_mid_msb);
3541		nv_put8(cmdhdl, nvp->nvp_sect, satacmd->satacmd_lba_low_msb);
3542		/*
3543		 * Send the low-order half
3544		 */
3545		nv_put8(cmdhdl, nvp->nvp_hcyl, satacmd->satacmd_lba_high_lsb);
3546		nv_put8(cmdhdl, nvp->nvp_lcyl, satacmd->satacmd_lba_mid_lsb);
3547		nv_put8(cmdhdl, nvp->nvp_sect, satacmd->satacmd_lba_low_lsb);
3548
3549		break;
3550
3551	case 0:
3552		/*
3553		 * non-media access commands such as identify and features
3554		 * take this path.
3555		 */
3556		nv_put8(cmdhdl, nvp->nvp_count, satacmd->satacmd_sec_count_lsb);
3557		nv_put8(cmdhdl, nvp->nvp_feature,
3558		    satacmd->satacmd_features_reg);
3559		nv_put8(cmdhdl, nvp->nvp_hcyl, satacmd->satacmd_lba_high_lsb);
3560		nv_put8(cmdhdl, nvp->nvp_lcyl, satacmd->satacmd_lba_mid_lsb);
3561		nv_put8(cmdhdl, nvp->nvp_sect, satacmd->satacmd_lba_low_lsb);
3562
3563		break;
3564
3565	default:
3566		break;
3567	}
3568
3569	ASSERT(nvp->nvp_slot);
3570}
3571
3572
3573/*
3574 * start a command that involves no media access
3575 */
3576static int
3577nv_start_nodata(nv_port_t *nvp, int slot)
3578{
3579	nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
3580	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
3581	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
3582	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
3583
3584	nv_program_taskfile_regs(nvp, slot);
3585
3586	/*
3587	 * This next one sets the controller in motion
3588	 */
3589	nv_put8(cmdhdl, nvp->nvp_cmd, sata_cmdp->satacmd_cmd_reg);
3590
3591	return (SATA_TRAN_ACCEPTED);
3592}
3593
3594
3595int
3596nv_bm_status_clear(nv_port_t *nvp)
3597{
3598	ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl;
3599	uchar_t	status, ret;
3600
3601	/*
3602	 * Get the current BM status
3603	 */
3604	ret = status = nv_get8(bmhdl, nvp->nvp_bmisx);
3605
3606	status = (status & BMISX_MASK) | BMISX_IDERR | BMISX_IDEINTS;
3607
3608	/*
3609	 * Clear the latches (and preserve the other bits)
3610	 */
3611	nv_put8(bmhdl, nvp->nvp_bmisx, status);
3612
3613	return (ret);
3614}
3615
3616
3617/*
3618 * program the bus master DMA engine with the PRD address for
3619 * the active slot command, and start the DMA engine.
3620 */
3621static void
3622nv_start_dma_engine(nv_port_t *nvp, int slot)
3623{
3624	nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
3625	ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl;
3626	uchar_t direction;
3627
3628	ASSERT(nv_slotp->nvslot_spkt != NULL);
3629
3630	if (nv_slotp->nvslot_spkt->satapkt_cmd.satacmd_flags.sata_data_direction
3631	    == SATA_DIR_READ) {
3632		direction = BMICX_RWCON_WRITE_TO_MEMORY;
3633	} else {
3634		direction = BMICX_RWCON_READ_FROM_MEMORY;
3635	}
3636
3637	NVLOG((NVDBG_DELIVER, nvp->nvp_ctlp, nvp,
3638	    "nv_start_dma_engine entered"));
3639
3640	/*
3641	 * reset the controller's interrupt and error status bits
3642	 */
3643	(void) nv_bm_status_clear(nvp);
3644
3645	/*
3646	 * program the PRD table physical start address
3647	 */
3648	nv_put32(bmhdl, nvp->nvp_bmidtpx, nvp->nvp_sg_paddr[slot]);
3649
3650	/*
3651	 * set the direction control and start the DMA controller
3652	 */
3653	nv_put8(bmhdl, nvp->nvp_bmicx, direction | BMICX_SSBM);
3654}
3655
3656/*
3657 * start dma command, either in or out
3658 */
3659static int
3660nv_start_dma(nv_port_t *nvp, int slot)
3661{
3662	nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
3663	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
3664	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
3665	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
3666	uint8_t cmd = sata_cmdp->satacmd_cmd_reg;
3667#ifdef NCQ
3668	uint8_t ncq = B_FALSE;
3669#endif
3670	ddi_acc_handle_t sghdl = nvp->nvp_sg_acc_hdl[slot];
3671	uint_t *dstp = (uint_t *)nvp->nvp_sg_addr[slot];
3672	int sg_count = sata_cmdp->satacmd_num_dma_cookies, idx;
3673	ddi_dma_cookie_t  *srcp = sata_cmdp->satacmd_dma_cookie_list;
3674
3675	ASSERT(sg_count != 0);
3676
3677	if (sata_cmdp->satacmd_num_dma_cookies > NV_DMA_NSEGS) {
3678		nv_cmn_err(CE_WARN, nvp->nvp_ctlp, nvp, "NV_DMA_NSEGS=%d <"
3679		    " satacmd_num_dma_cookies=%d", NV_DMA_NSEGS,
3680		    sata_cmdp->satacmd_num_dma_cookies);
3681
3682		return (NV_FAILURE);
3683	}
3684
3685	nv_program_taskfile_regs(nvp, slot);
3686
3687	/*
3688	 * start the drive in motion
3689	 */
3690	nv_put8(cmdhdl, nvp->nvp_cmd, cmd);
3691
3692	/*
3693	 * the drive starts processing the transaction when the cmd register
3694	 * is written.  This is done here before programming the DMA engine to
3695	 * parallelize and save some time.  In the event that the drive is ready
3696	 * before DMA, it will wait.
3697	 */
3698#ifdef NCQ
3699	if ((cmd == SATAC_WRITE_FPDMA_QUEUED) ||
3700	    (cmd == SATAC_READ_FPDMA_QUEUED)) {
3701		ncq = B_TRUE;
3702	}
3703#endif
3704
3705	/*
3706	 * copy the PRD list to PRD table in DMA accessible memory
3707	 * so that the controller can access it.
3708	 */
3709	for (idx = 0; idx < sg_count; idx++, srcp++) {
3710		uint32_t size;
3711
3712		ASSERT(srcp->dmac_size <= UINT16_MAX);
3713
3714		nv_put32(sghdl, dstp++, srcp->dmac_address);
3715
3716		size = srcp->dmac_size;
3717
3718		/*
3719		 * If this is a 40-bit address, copy bits 32-40 of the
3720		 * physical address to bits 16-24 of the PRD count.
3721		 */
3722		if (srcp->dmac_laddress > UINT32_MAX) {
3723			size |= ((srcp->dmac_laddress & 0xff00000000) >> 16);
3724		}
3725
3726		/*
3727		 * set the end of table flag for the last entry
3728		 */
3729		if (idx == (sg_count - 1)) {
3730			size |= PRDE_EOT;
3731		}
3732
3733		nv_put32(sghdl, dstp++, size);
3734	}
3735
3736	(void) ddi_dma_sync(nvp->nvp_sg_dma_hdl[slot], 0,
3737	    sizeof (prde_t) * NV_DMA_NSEGS, DDI_DMA_SYNC_FORDEV);
3738
3739	nv_start_dma_engine(nvp, slot);
3740
3741#ifdef NCQ
3742	/*
3743	 * optimization:  for SWNCQ, start DMA engine if this is the only
3744	 * command running.  Preliminary NCQ efforts indicated this needs
3745	 * more debugging.
3746	 *
3747	 * if (nvp->nvp_ncq_run <= 1)
3748	 */
3749
3750	if (ncq == B_FALSE) {
3751		NVLOG((NVDBG_DELIVER, nvp->nvp_ctlp, nvp,
3752		    "NOT NCQ so starting DMA NOW non_ncq_commands=%d"
3753		    " cmd = %X", non_ncq_commands++, cmd));
3754		nv_start_dma_engine(nvp, slot);
3755	} else {
3756		NVLOG((NVDBG_DELIVER, nvp->nvp_ctlp, nvp, "?NCQ, so program "
3757		    "DMA later ncq_commands=%d cmd = %X", ncq_commands++, cmd));
3758	}
3759#endif /* NCQ */
3760
3761	return (SATA_TRAN_ACCEPTED);
3762}
3763
3764
3765/*
3766 * start a PIO data-in ATA command
3767 */
3768static int
3769nv_start_pio_in(nv_port_t *nvp, int slot)
3770{
3771
3772	nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
3773	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
3774	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
3775
3776	nv_program_taskfile_regs(nvp, slot);
3777
3778	/*
3779	 * This next one sets the drive in motion
3780	 */
3781	nv_put8(cmdhdl, nvp->nvp_cmd, spkt->satapkt_cmd.satacmd_cmd_reg);
3782
3783	return (SATA_TRAN_ACCEPTED);
3784}
3785
3786
3787/*
3788 * start a PIO data-out ATA command
3789 */
3790static int
3791nv_start_pio_out(nv_port_t *nvp, int slot)
3792{
3793	nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
3794	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
3795	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
3796
3797	nv_program_taskfile_regs(nvp, slot);
3798
3799	/*
3800	 * this next one sets the drive in motion
3801	 */
3802	nv_put8(cmdhdl, nvp->nvp_cmd, spkt->satapkt_cmd.satacmd_cmd_reg);
3803
3804	/*
3805	 * wait for the busy bit to settle
3806	 */
3807	NV_DELAY_NSEC(400);
3808
3809	/*
3810	 * wait for the drive to assert DRQ to send the first chunk
3811	 * of data. Have to busy wait because there's no interrupt for
3812	 * the first chunk. This is bad... uses a lot of cycles if the
3813	 * drive responds too slowly or if the wait loop granularity
3814	 * is too large. It's even worse if the drive is defective and
3815	 * the loop times out.
3816	 */
3817	if (nv_wait3(nvp, SATA_STATUS_DRQ, SATA_STATUS_BSY, /* okay */
3818	    SATA_STATUS_ERR, SATA_STATUS_BSY, /* cmd failed */
3819	    SATA_STATUS_DF, SATA_STATUS_BSY, /* drive failed */
3820	    4000000, 0) == B_FALSE) {
3821		spkt->satapkt_reason = SATA_PKT_TIMEOUT;
3822
3823		goto error;
3824	}
3825
3826	/*
3827	 * send the first block.
3828	 */
3829	nv_intr_pio_out(nvp, nv_slotp);
3830
3831	/*
3832	 * If nvslot_flags is not set to COMPLETE yet, then processing
3833	 * is OK so far, so return.  Otherwise, fall into error handling
3834	 * below.
3835	 */
3836	if (nv_slotp->nvslot_flags != NVSLOT_COMPLETE) {
3837
3838		return (SATA_TRAN_ACCEPTED);
3839	}
3840
3841	error:
3842	/*
3843	 * there was an error so reset the device and complete the packet.
3844	 */
3845	nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
3846	nv_complete_io(nvp, spkt, 0);
3847	nv_reset(nvp);
3848
3849	return (SATA_TRAN_PORT_ERROR);
3850}
3851
3852
3853/*
3854 * Interrupt processing for a non-data ATA command.
3855 */
3856static void
3857nv_intr_nodata(nv_port_t *nvp, nv_slot_t *nv_slotp)
3858{
3859	uchar_t status;
3860	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
3861	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
3862	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
3863	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
3864
3865	NVLOG((NVDBG_INTR, nvp->nvp_ctlp, nvp, "nv_intr_nodata entered"));
3866
3867	status = nv_get8(cmdhdl, nvp->nvp_status);
3868
3869	/*
3870	 * check for errors
3871	 */
3872	if (status & (SATA_STATUS_DF | SATA_STATUS_ERR)) {
3873		spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
3874		sata_cmdp->satacmd_status_reg = nv_get8(ctlhdl,
3875		    nvp->nvp_altstatus);
3876		sata_cmdp->satacmd_error_reg = nv_get8(cmdhdl, nvp->nvp_error);
3877	} else {
3878		spkt->satapkt_reason = SATA_PKT_COMPLETED;
3879	}
3880
3881	nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
3882}
3883
3884
3885/*
3886 * ATA command, PIO data in
3887 */
3888static void
3889nv_intr_pio_in(nv_port_t *nvp, nv_slot_t *nv_slotp)
3890{
3891	uchar_t	status;
3892	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
3893	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
3894	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
3895	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
3896	int count;
3897
3898	status = nv_get8(cmdhdl, nvp->nvp_status);
3899
3900	if (status & SATA_STATUS_BSY) {
3901		spkt->satapkt_reason = SATA_PKT_TIMEOUT;
3902		nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
3903		sata_cmdp->satacmd_status_reg = nv_get8(ctlhdl,
3904		    nvp->nvp_altstatus);
3905		sata_cmdp->satacmd_error_reg = nv_get8(cmdhdl, nvp->nvp_error);
3906		nv_reset(nvp);
3907
3908		return;
3909	}
3910
3911	/*
3912	 * check for errors
3913	 */
3914	if ((status & (SATA_STATUS_DRQ | SATA_STATUS_DF |
3915	    SATA_STATUS_ERR)) != SATA_STATUS_DRQ) {
3916		nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
3917		nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
3918		spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
3919
3920		return;
3921	}
3922
3923	/*
3924	 * read the next chunk of data (if any)
3925	 */
3926	count = min(nv_slotp->nvslot_byte_count, NV_BYTES_PER_SEC);
3927
3928	/*
3929	 * read count bytes
3930	 */
3931	ASSERT(count != 0);
3932
3933	ddi_rep_get16(cmdhdl, (ushort_t *)nv_slotp->nvslot_v_addr,
3934	    (ushort_t *)nvp->nvp_data, (count >> 1), DDI_DEV_NO_AUTOINCR);
3935
3936	nv_slotp->nvslot_v_addr += count;
3937	nv_slotp->nvslot_byte_count -= count;
3938
3939
3940	if (nv_slotp->nvslot_byte_count != 0) {
3941		/*
3942		 * more to transfer.  Wait for next interrupt.
3943		 */
3944		return;
3945	}
3946
3947	/*
3948	 * transfer is complete. wait for the busy bit to settle.
3949	 */
3950	NV_DELAY_NSEC(400);
3951
3952	spkt->satapkt_reason = SATA_PKT_COMPLETED;
3953	nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
3954}
3955
3956
3957/*
3958 * ATA command PIO data out
3959 */
3960static void
3961nv_intr_pio_out(nv_port_t *nvp, nv_slot_t *nv_slotp)
3962{
3963	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
3964	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
3965	uchar_t status;
3966	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
3967	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
3968	int count;
3969
3970	/*
3971	 * clear the IRQ
3972	 */
3973	status = nv_get8(cmdhdl, nvp->nvp_status);
3974
3975	if (status & SATA_STATUS_BSY) {
3976		/*
3977		 * this should not happen
3978		 */
3979		spkt->satapkt_reason = SATA_PKT_TIMEOUT;
3980		nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
3981		sata_cmdp->satacmd_status_reg = nv_get8(ctlhdl,
3982		    nvp->nvp_altstatus);
3983		sata_cmdp->satacmd_error_reg = nv_get8(cmdhdl, nvp->nvp_error);
3984
3985		return;
3986	}
3987
3988	/*
3989	 * check for errors
3990	 */
3991	if (status & (SATA_STATUS_DF | SATA_STATUS_ERR)) {
3992		nv_copy_registers(nvp,  &spkt->satapkt_device, spkt);
3993		nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
3994		spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
3995
3996		return;
3997	}
3998
3999	/*
4000	 * this is the condition which signals the drive is
4001	 * no longer ready to transfer.  Likely that the transfer
4002	 * completed successfully, but check that byte_count is
4003	 * zero.
4004	 */
4005	if ((status & SATA_STATUS_DRQ) == 0) {
4006
4007		if (nv_slotp->nvslot_byte_count == 0) {
4008			/*
4009			 * complete; successful transfer
4010			 */
4011			spkt->satapkt_reason = SATA_PKT_COMPLETED;
4012		} else {
4013			/*
4014			 * error condition, incomplete transfer
4015			 */
4016			nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
4017			spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4018		}
4019		nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4020
4021		return;
4022	}
4023
4024	/*
4025	 * write the next chunk of data
4026	 */
4027	count = min(nv_slotp->nvslot_byte_count, NV_BYTES_PER_SEC);
4028
4029	/*
4030	 * read or write count bytes
4031	 */
4032
4033	ASSERT(count != 0);
4034
4035	ddi_rep_put16(cmdhdl, (ushort_t *)nv_slotp->nvslot_v_addr,
4036	    (ushort_t *)nvp->nvp_data, (count >> 1), DDI_DEV_NO_AUTOINCR);
4037
4038	nv_slotp->nvslot_v_addr += count;
4039	nv_slotp->nvslot_byte_count -= count;
4040}
4041
4042
4043/*
4044 * ATA command, DMA data in/out
4045 */
4046static void
4047nv_intr_dma(nv_port_t *nvp, struct nv_slot *nv_slotp)
4048{
4049	uchar_t status;
4050	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4051	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
4052	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
4053	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4054	ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl;
4055	uchar_t	bmicx;
4056	uchar_t bm_status;
4057
4058	nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4059
4060	/*
4061	 * stop DMA engine.
4062	 */
4063	bmicx = nv_get8(bmhdl, nvp->nvp_bmicx);
4064	nv_put8(bmhdl, nvp->nvp_bmicx,  bmicx & ~BMICX_SSBM);
4065
4066	/*
4067	 * get the status and clear the IRQ, and check for DMA error
4068	 */
4069	status = nv_get8(cmdhdl, nvp->nvp_status);
4070
4071	/*
4072	 * check for drive errors
4073	 */
4074	if (status & (SATA_STATUS_DF | SATA_STATUS_ERR)) {
4075		nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
4076		spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4077		(void) nv_bm_status_clear(nvp);
4078
4079		return;
4080	}
4081
4082	bm_status = nv_bm_status_clear(nvp);
4083
4084	/*
4085	 * check for bus master errors
4086	 */
4087	if (bm_status & BMISX_IDERR) {
4088		spkt->satapkt_reason = SATA_PKT_RESET;
4089		sata_cmdp->satacmd_status_reg = nv_get8(ctlhdl,
4090		    nvp->nvp_altstatus);
4091		sata_cmdp->satacmd_error_reg = nv_get8(cmdhdl, nvp->nvp_error);
4092		nv_reset(nvp);
4093
4094		return;
4095	}
4096
4097	spkt->satapkt_reason = SATA_PKT_COMPLETED;
4098}
4099
4100
4101/*
4102 * Wait for a register of a controller to achieve a specific state.
4103 * To return normally, all the bits in the first sub-mask must be ON,
4104 * all the bits in the second sub-mask must be OFF.
4105 * If timeout_usec microseconds pass without the controller achieving
4106 * the desired bit configuration, return TRUE, else FALSE.
4107 *
4108 * hybrid waiting algorithm: if not in interrupt context, busy looping will
4109 * occur for the first 250 us, then switch over to a sleeping wait.
4110 *
4111 */
4112int
4113nv_wait(nv_port_t *nvp, uchar_t onbits, uchar_t offbits, uint_t timeout_usec,
4114    int type_wait)
4115{
4116	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
4117	hrtime_t end, cur, start_sleep, start;
4118	int first_time = B_TRUE;
4119	ushort_t val;
4120
4121	for (;;) {
4122		val = nv_get8(ctlhdl, nvp->nvp_altstatus);
4123
4124		if ((val & onbits) == onbits && (val & offbits) == 0) {
4125
4126			return (B_TRUE);
4127		}
4128
4129		cur = gethrtime();
4130
4131		/*
4132		 * store the start time and calculate the end
4133		 * time.  also calculate "start_sleep" which is
4134		 * the point after which the driver will stop busy
4135		 * waiting and change to sleep waiting.
4136		 */
4137		if (first_time) {
4138			first_time = B_FALSE;
4139			/*
4140			 * start and end are in nanoseconds
4141			 */
4142			start = cur;
4143			end = start + timeout_usec * 1000;
4144			/*
4145			 * add 1 ms to start
4146			 */
4147			start_sleep =  start + 250000;
4148
4149			if (servicing_interrupt()) {
4150				type_wait = NV_NOSLEEP;
4151			}
4152		}
4153
4154		if (cur > end) {
4155
4156			break;
4157		}
4158
4159		if ((type_wait != NV_NOSLEEP) && (cur > start_sleep)) {
4160#if ! defined(__lock_lint)
4161			delay(1);
4162#endif
4163		} else {
4164			drv_usecwait(nv_usec_delay);
4165		}
4166	}
4167
4168	return (B_FALSE);
4169}
4170
4171
4172/*
4173 * This is a slightly more complicated version that checks
4174 * for error conditions and bails-out rather than looping
4175 * until the timeout is exceeded.
4176 *
4177 * hybrid waiting algorithm: if not in interrupt context, busy looping will
4178 * occur for the first 250 us, then switch over to a sleeping wait.
4179 */
4180int
4181nv_wait3(
4182	nv_port_t	*nvp,
4183	uchar_t		onbits1,
4184	uchar_t		offbits1,
4185	uchar_t		failure_onbits2,
4186	uchar_t		failure_offbits2,
4187	uchar_t		failure_onbits3,
4188	uchar_t		failure_offbits3,
4189	uint_t		timeout_usec,
4190	int		type_wait)
4191{
4192	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
4193	hrtime_t end, cur, start_sleep, start;
4194	int first_time = B_TRUE;
4195	ushort_t val;
4196
4197	for (;;) {
4198		val = nv_get8(ctlhdl, nvp->nvp_altstatus);
4199
4200		/*
4201		 * check for expected condition
4202		 */
4203		if ((val & onbits1) == onbits1 && (val & offbits1) == 0) {
4204
4205			return (B_TRUE);
4206		}
4207
4208		/*
4209		 * check for error conditions
4210		 */
4211		if ((val & failure_onbits2) == failure_onbits2 &&
4212		    (val & failure_offbits2) == 0) {
4213
4214			return (B_FALSE);
4215		}
4216
4217		if ((val & failure_onbits3) == failure_onbits3 &&
4218		    (val & failure_offbits3) == 0) {
4219
4220			return (B_FALSE);
4221		}
4222
4223		/*
4224		 * store the start time and calculate the end
4225		 * time.  also calculate "start_sleep" which is
4226		 * the point after which the driver will stop busy
4227		 * waiting and change to sleep waiting.
4228		 */
4229		if (first_time) {
4230			first_time = B_FALSE;
4231			/*
4232			 * start and end are in nanoseconds
4233			 */
4234			cur = start = gethrtime();
4235			end = start + timeout_usec * 1000;
4236			/*
4237			 * add 1 ms to start
4238			 */
4239			start_sleep =  start + 250000;
4240
4241			if (servicing_interrupt()) {
4242				type_wait = NV_NOSLEEP;
4243			}
4244		} else {
4245			cur = gethrtime();
4246		}
4247
4248		if (cur > end) {
4249
4250			break;
4251		}
4252
4253		if ((type_wait != NV_NOSLEEP) && (cur > start_sleep)) {
4254#if ! defined(__lock_lint)
4255			delay(1);
4256#endif
4257		} else {
4258			drv_usecwait(nv_usec_delay);
4259		}
4260	}
4261
4262	return (B_FALSE);
4263}
4264
4265
4266/*
4267 * nv_check_link() checks if a specified link is active device present
4268 * and communicating.
4269 */
4270static boolean_t
4271nv_check_link(uint32_t sstatus)
4272{
4273	uint8_t det;
4274
4275	det = (sstatus & SSTATUS_DET) >> SSTATUS_DET_SHIFT;
4276
4277	return (det == SSTATUS_DET_DEVPRE_PHYCOM);
4278}
4279
4280
4281/*
4282 * nv_port_state_change() reports the state of the port to the
4283 * sata module by calling sata_hba_event_notify().  This
4284 * function is called any time the state of the port is changed
4285 */
4286static void
4287nv_port_state_change(nv_port_t *nvp, int event, uint8_t addr_type, int state)
4288{
4289	sata_device_t sd;
4290
4291	bzero((void *)&sd, sizeof (sata_device_t));
4292	sd.satadev_rev = SATA_DEVICE_REV;
4293	nv_copy_registers(nvp, &sd, NULL);
4294
4295	/*
4296	 * When NCQ is implemented sactive and snotific field need to be
4297	 * updated.
4298	 */
4299	sd.satadev_addr.cport = nvp->nvp_port_num;
4300	sd.satadev_addr.qual = addr_type;
4301	sd.satadev_state = state;
4302
4303	sata_hba_event_notify(nvp->nvp_ctlp->nvc_dip, &sd, event);
4304}
4305
4306
4307/*
4308 * timeout processing:
4309 *
4310 * Check if any packets have crossed a timeout threshold.  If so, then
4311 * abort the packet.  This function is not NCQ aware.
4312 *
4313 * If reset was invoked in any other place than nv_sata_probe(), then
4314 * monitor for reset completion here.
4315 *
4316 */
4317static void
4318nv_timeout(void *arg)
4319{
4320	nv_port_t *nvp = arg;
4321	nv_slot_t *nv_slotp;
4322	int restart_timeout = B_FALSE;
4323
4324	mutex_enter(&nvp->nvp_mutex);
4325
4326	/*
4327	 * If the probe entry point is driving the reset and signature
4328	 * acquisition, just return.
4329	 */
4330	if (nvp->nvp_state & NV_PORT_RESET_PROBE) {
4331		goto finished;
4332	}
4333
4334	/*
4335	 * If the port is not in the init state, it likely
4336	 * means the link was lost while a timeout was active.
4337	 */
4338	if ((nvp->nvp_state & NV_PORT_INIT) == 0) {
4339		NVLOG((NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp,
4340		    "nv_timeout: port uninitialized"));
4341
4342		goto finished;
4343	}
4344
4345	if (nvp->nvp_state & NV_PORT_RESET) {
4346		ddi_acc_handle_t bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5];
4347		uint32_t sstatus;
4348
4349		NVLOG((NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp,
4350		    "nv_timeout(): port waiting for signature"));
4351
4352		sstatus = nv_get32(bar5_hdl, nvp->nvp_sstatus);
4353
4354		/*
4355		 * check for link presence.  If the link remains
4356		 * missing for more than 2 seconds, send a remove
4357		 * event and abort signature acquisition.
4358		 */
4359		if (nv_check_link(sstatus) == B_FALSE) {
4360			clock_t e_link_lost = ddi_get_lbolt();
4361
4362			if (nvp->nvp_link_lost_time == 0) {
4363				nvp->nvp_link_lost_time = e_link_lost;
4364			}
4365			if (TICK_TO_SEC(e_link_lost -
4366			    nvp->nvp_link_lost_time) < NV_LINK_LOST_OK) {
4367				NVLOG((NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp,
4368				    "probe: intermittent link lost while"
4369				    " resetting"));
4370				restart_timeout = B_TRUE;
4371			} else {
4372				NVLOG((NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp,
4373				    "link lost during signature acquisition."
4374				    "  Giving up"));
4375				nv_port_state_change(nvp,
4376				    SATA_EVNT_DEVICE_DETACHED|
4377				    SATA_EVNT_LINK_LOST,
4378				    SATA_ADDR_CPORT, 0);
4379				nvp->nvp_state |= NV_PORT_HOTREMOVED;
4380				nvp->nvp_state &= ~NV_PORT_RESET;
4381			}
4382
4383			goto finished;
4384		} else {
4385
4386			nvp->nvp_link_lost_time = 0;
4387		}
4388
4389		nv_read_signature(nvp);
4390
4391		if (nvp->nvp_signature != 0) {
4392			if (nvp->nvp_type == SATA_DTYPE_ATADISK) {
4393				nvp->nvp_state |= NV_PORT_RESTORE;
4394				nv_port_state_change(nvp,
4395				    SATA_EVNT_DEVICE_RESET,
4396				    SATA_ADDR_DCPORT,
4397				    SATA_DSTATE_RESET|SATA_DSTATE_PWR_ACTIVE);
4398			}
4399
4400			goto finished;
4401		}
4402
4403		/*
4404		 * Reset if more than 5 seconds has passed without
4405		 * acquiring a signature.
4406		 */
4407		if (TICK_TO_SEC(ddi_get_lbolt() - nvp->nvp_reset_time) > 5) {
4408			nv_reset(nvp);
4409		}
4410
4411		restart_timeout = B_TRUE;
4412		goto finished;
4413	}
4414
4415
4416	/*
4417	 * not yet NCQ aware
4418	 */
4419	nv_slotp = &(nvp->nvp_slot[0]);
4420
4421	/*
4422	 * this happens early on before nv_slotp is set
4423	 * up OR when a device was unexpectedly removed and
4424	 * there was an active packet.
4425	 */
4426	if (nv_slotp == NULL) {
4427		NVLOG((NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp,
4428		    "nv_timeout: nv_slotp == NULL"));
4429
4430		goto finished;
4431	}
4432
4433	/*
4434	 * perform timeout checking and processing only if there is an
4435	 * active packet on the port
4436	 */
4437	if (nv_slotp->nvslot_spkt != NULL)  {
4438		sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4439		sata_cmd_t *satacmd = &spkt->satapkt_cmd;
4440		uint8_t cmd = satacmd->satacmd_cmd_reg;
4441		uint64_t lba;
4442
4443#if ! defined(__lock_lint) && defined(DEBUG)
4444
4445		lba = (uint64_t)satacmd->satacmd_lba_low_lsb |
4446		    ((uint64_t)satacmd->satacmd_lba_mid_lsb << 8) |
4447		    ((uint64_t)satacmd->satacmd_lba_high_lsb << 16) |
4448		    ((uint64_t)satacmd->satacmd_lba_low_msb << 24) |
4449		    ((uint64_t)satacmd->satacmd_lba_mid_msb << 32) |
4450		    ((uint64_t)satacmd->satacmd_lba_high_msb << 40);
4451#endif
4452
4453		/*
4454		 * timeout not needed if there is a polling thread
4455		 */
4456		if (spkt->satapkt_op_mode & SATA_OPMODE_POLLING) {
4457
4458			goto finished;
4459		}
4460
4461		if (TICK_TO_SEC(ddi_get_lbolt() - nv_slotp->nvslot_stime) >
4462		    spkt->satapkt_time) {
4463			NVLOG((NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp,
4464			    "abort timeout: "
4465			    "nvslot_stime: %ld max ticks till timeout: "
4466			    "%ld cur_time: %ld cmd=%x lba=%d",
4467			    nv_slotp->nvslot_stime, drv_usectohz(MICROSEC *
4468			    spkt->satapkt_time), ddi_get_lbolt(), cmd, lba));
4469
4470			(void) nv_abort_active(nvp, spkt, SATA_PKT_TIMEOUT);
4471
4472		} else {
4473			NVLOG((NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp, "nv_timeout:"
4474			    " still in use so restarting timeout"));
4475		}
4476		restart_timeout = B_TRUE;
4477
4478	} else {
4479		/*
4480		 * there was no active packet, so do not re-enable timeout
4481		 */
4482		NVLOG((NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp,
4483		    "nv_timeout: no active packet so not re-arming timeout"));
4484	}
4485
4486	finished:
4487
4488	if (restart_timeout == B_TRUE) {
4489		nvp->nvp_timeout_id = timeout(nv_timeout, (void *)nvp,
4490		    drv_usectohz(NV_ONE_SEC));
4491	} else {
4492		nvp->nvp_timeout_id = 0;
4493	}
4494	mutex_exit(&nvp->nvp_mutex);
4495}
4496
4497
4498/*
4499 * enable or disable the 3 interrupt types the driver is
4500 * interested in: completion, add and remove.
4501 */
4502static void
4503mcp04_set_intr(nv_port_t *nvp, int flag)
4504{
4505	nv_ctl_t *nvc = nvp->nvp_ctlp;
4506	ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5];
4507	uchar_t *bar5  = nvc->nvc_bar_addr[5];
4508	uint8_t intr_bits[] = { MCP04_INT_PDEV_HOT|MCP04_INT_PDEV_INT,
4509	    MCP04_INT_SDEV_HOT|MCP04_INT_SDEV_INT };
4510	uint8_t clear_all_bits[] = { MCP04_INT_PDEV_ALL, MCP04_INT_SDEV_ALL };
4511	uint8_t int_en, port = nvp->nvp_port_num, intr_status;
4512
4513	ASSERT(mutex_owned(&nvp->nvp_mutex));
4514
4515	/*
4516	 * controller level lock also required since access to an 8-bit
4517	 * interrupt register is shared between both channels.
4518	 */
4519	mutex_enter(&nvc->nvc_mutex);
4520
4521	if (flag & NV_INTR_CLEAR_ALL) {
4522		NVLOG((NVDBG_INTR, nvc, nvp,
4523		    "mcp04_set_intr: NV_INTR_CLEAR_ALL"));
4524
4525		intr_status = nv_get8(nvc->nvc_bar_hdl[5],
4526		    (uint8_t *)(nvc->nvc_mcp04_int_status));
4527
4528		if (intr_status & clear_all_bits[port]) {
4529
4530			nv_put8(nvc->nvc_bar_hdl[5],
4531			    (uint8_t *)(nvc->nvc_mcp04_int_status),
4532			    clear_all_bits[port]);
4533
4534			NVLOG((NVDBG_INTR, nvc, nvp,
4535			    "interrupt bits cleared %x",
4536			    intr_status & clear_all_bits[port]));
4537		}
4538	}
4539
4540	if (flag & NV_INTR_DISABLE) {
4541		NVLOG((NVDBG_INTR, nvc, nvp,
4542		    "mcp04_set_intr: NV_INTR_DISABLE"));
4543		int_en = nv_get8(bar5_hdl,
4544		    (uint8_t *)(bar5 + MCP04_SATA_INT_EN));
4545		int_en &= ~intr_bits[port];
4546		nv_put8(bar5_hdl, (uint8_t *)(bar5 + MCP04_SATA_INT_EN),
4547		    int_en);
4548	}
4549
4550	if (flag & NV_INTR_ENABLE) {
4551		NVLOG((NVDBG_INTR, nvc, nvp, "mcp04_set_intr: NV_INTR_ENABLE"));
4552		int_en = nv_get8(bar5_hdl,
4553		    (uint8_t *)(bar5 + MCP04_SATA_INT_EN));
4554		int_en |= intr_bits[port];
4555		nv_put8(bar5_hdl, (uint8_t *)(bar5 + MCP04_SATA_INT_EN),
4556		    int_en);
4557	}
4558
4559	mutex_exit(&nvc->nvc_mutex);
4560}
4561
4562
4563/*
4564 * enable or disable the 3 interrupts the driver is interested in:
4565 * completion interrupt, hot add, and hot remove interrupt.
4566 */
4567static void
4568mcp55_set_intr(nv_port_t *nvp, int flag)
4569{
4570	nv_ctl_t *nvc = nvp->nvp_ctlp;
4571	ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5];
4572	uint16_t intr_bits =
4573	    MCP55_INT_ADD|MCP55_INT_REM|MCP55_INT_COMPLETE;
4574	uint16_t int_en;
4575
4576	ASSERT(mutex_owned(&nvp->nvp_mutex));
4577
4578	NVLOG((NVDBG_HOT, nvc, nvp, "mcp055_set_intr: enter flag: %d", flag));
4579
4580	if (flag & NV_INTR_CLEAR_ALL) {
4581		NVLOG((NVDBG_INTR, nvc, nvp,
4582		    "mcp55_set_intr: NV_INTR_CLEAR_ALL"));
4583		nv_put16(bar5_hdl, nvp->nvp_mcp55_int_status, MCP55_INT_CLEAR);
4584	}
4585
4586	if (flag & NV_INTR_ENABLE) {
4587		NVLOG((NVDBG_INTR, nvc, nvp, "mcp55_set_intr: NV_INTR_ENABLE"));
4588		int_en = nv_get16(bar5_hdl, nvp->nvp_mcp55_int_ctl);
4589		int_en |= intr_bits;
4590		nv_put16(bar5_hdl, nvp->nvp_mcp55_int_ctl, int_en);
4591	}
4592
4593	if (flag & NV_INTR_DISABLE) {
4594		NVLOG((NVDBG_INTR, nvc, nvp,
4595		    "mcp55_set_intr: NV_INTR_DISABLE"));
4596		int_en = nv_get16(bar5_hdl, nvp->nvp_mcp55_int_ctl);
4597		int_en &= ~intr_bits;
4598		nv_put16(bar5_hdl, nvp->nvp_mcp55_int_ctl, int_en);
4599	}
4600}
4601
4602
4603/*
4604 * The PM functions for suspend and resume are incomplete and need additional
4605 * work.  It may or may not work in the current state.
4606 */
4607static void
4608nv_resume(nv_port_t *nvp)
4609{
4610	NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp, "nv_resume()"));
4611
4612	mutex_enter(&nvp->nvp_mutex);
4613
4614	if (nvp->nvp_state & NV_PORT_INACTIVE) {
4615		mutex_exit(&nvp->nvp_mutex);
4616
4617		return;
4618	}
4619
4620	(*(nvp->nvp_ctlp->nvc_set_intr))(nvp, NV_INTR_CLEAR_ALL|NV_INTR_ENABLE);
4621
4622	/*
4623	 * power may have been removed to the port and the
4624	 * drive, and/or a drive may have been added or removed.
4625	 * Force a reset which will cause a probe and re-establish
4626	 * any state needed on the drive.
4627	 * nv_reset(nvp);
4628	 */
4629
4630	nv_reset(nvp);
4631
4632	mutex_exit(&nvp->nvp_mutex);
4633}
4634
4635/*
4636 * The PM functions for suspend and resume are incomplete and need additional
4637 * work.  It may or may not work in the current state.
4638 */
4639static void
4640nv_suspend(nv_port_t *nvp)
4641{
4642	NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp, "nv_suspend()"));
4643
4644	mutex_enter(&nvp->nvp_mutex);
4645
4646	if (nvp->nvp_state & NV_PORT_INACTIVE) {
4647		mutex_exit(&nvp->nvp_mutex);
4648
4649		return;
4650	}
4651
4652	(*(nvp->nvp_ctlp->nvc_set_intr))(nvp, NV_INTR_DISABLE);
4653
4654	/*
4655	 * power may have been removed to the port and the
4656	 * drive, and/or a drive may have been added or removed.
4657	 * Force a reset which will cause a probe and re-establish
4658	 * any state needed on the drive.
4659	 * nv_reset(nvp);
4660	 */
4661
4662	mutex_exit(&nvp->nvp_mutex);
4663}
4664
4665
4666static void
4667nv_copy_registers(nv_port_t *nvp, sata_device_t *sd, sata_pkt_t *spkt)
4668{
4669	ddi_acc_handle_t bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5];
4670	sata_cmd_t *scmd = &spkt->satapkt_cmd;
4671	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
4672	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4673	uchar_t status;
4674	struct sata_cmd_flags flags;
4675
4676	NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp, "nv_copy_registers()"));
4677
4678	sd->satadev_scr.sstatus = nv_get32(bar5_hdl, nvp->nvp_sstatus);
4679	sd->satadev_scr.serror = nv_get32(bar5_hdl, nvp->nvp_serror);
4680	sd->satadev_scr.scontrol = nv_get32(bar5_hdl, nvp->nvp_sctrl);
4681
4682	if (spkt == NULL) {
4683
4684		return;
4685	}
4686
4687	/*
4688	 * in the error case, implicitly set the return of regs needed
4689	 * for error handling.
4690	 */
4691	status = scmd->satacmd_status_reg = nv_get8(ctlhdl,
4692	    nvp->nvp_altstatus);
4693
4694	flags = scmd->satacmd_flags;
4695
4696	if (status & SATA_STATUS_ERR) {
4697		flags.sata_copy_out_lba_low_msb = B_TRUE;
4698		flags.sata_copy_out_lba_mid_msb = B_TRUE;
4699		flags.sata_copy_out_lba_high_msb = B_TRUE;
4700		flags.sata_copy_out_lba_low_lsb = B_TRUE;
4701		flags.sata_copy_out_lba_mid_lsb = B_TRUE;
4702		flags.sata_copy_out_lba_high_lsb = B_TRUE;
4703		flags.sata_copy_out_error_reg = B_TRUE;
4704		flags.sata_copy_out_sec_count_msb = B_TRUE;
4705		flags.sata_copy_out_sec_count_lsb = B_TRUE;
4706		scmd->satacmd_status_reg = status;
4707	}
4708
4709	if (scmd->satacmd_addr_type & ATA_ADDR_LBA48) {
4710
4711		/*
4712		 * set HOB so that high byte will be read
4713		 */
4714		nv_put8(ctlhdl, nvp->nvp_devctl, ATDC_HOB|ATDC_D3);
4715
4716		/*
4717		 * get the requested high bytes
4718		 */
4719		if (flags.sata_copy_out_sec_count_msb) {
4720			scmd->satacmd_sec_count_msb =
4721			    nv_get8(cmdhdl, nvp->nvp_count);
4722		}
4723
4724		if (flags.sata_copy_out_lba_low_msb) {
4725			scmd->satacmd_lba_low_msb =
4726			    nv_get8(cmdhdl, nvp->nvp_sect);
4727		}
4728
4729		if (flags.sata_copy_out_lba_mid_msb) {
4730			scmd->satacmd_lba_mid_msb =
4731			    nv_get8(cmdhdl, nvp->nvp_lcyl);
4732		}
4733
4734		if (flags.sata_copy_out_lba_high_msb) {
4735			scmd->satacmd_lba_high_msb =
4736			    nv_get8(cmdhdl, nvp->nvp_hcyl);
4737		}
4738	}
4739
4740	/*
4741	 * disable HOB so that low byte is read
4742	 */
4743	nv_put8(ctlhdl, nvp->nvp_devctl, ATDC_D3);
4744
4745	/*
4746	 * get the requested low bytes
4747	 */
4748	if (flags.sata_copy_out_sec_count_lsb) {
4749		scmd->satacmd_sec_count_lsb = nv_get8(cmdhdl, nvp->nvp_count);
4750	}
4751
4752	if (flags.sata_copy_out_lba_low_lsb) {
4753		scmd->satacmd_lba_low_lsb = nv_get8(cmdhdl, nvp->nvp_sect);
4754	}
4755
4756	if (flags.sata_copy_out_lba_mid_lsb) {
4757		scmd->satacmd_lba_mid_lsb = nv_get8(cmdhdl, nvp->nvp_lcyl);
4758	}
4759
4760	if (flags.sata_copy_out_lba_high_lsb) {
4761		scmd->satacmd_lba_high_lsb = nv_get8(cmdhdl, nvp->nvp_hcyl);
4762	}
4763
4764	/*
4765	 * get the device register if requested
4766	 */
4767	if (flags.sata_copy_out_device_reg) {
4768		scmd->satacmd_device_reg =  nv_get8(cmdhdl, nvp->nvp_drvhd);
4769	}
4770
4771	/*
4772	 * get the error register if requested
4773	 */
4774	if (flags.sata_copy_out_error_reg) {
4775		scmd->satacmd_error_reg = nv_get8(cmdhdl, nvp->nvp_error);
4776	}
4777}
4778
4779
4780/*
4781 * Hot plug and remove interrupts can occur when the device is reset.  Just
4782 * masking the interrupt doesn't always work well because if a
4783 * different interrupt arrives on the other port, the driver can still
4784 * end up checking the state of the other port and discover the hot
4785 * interrupt flag is set even though it was masked.  Checking for recent
4786 * reset activity and then ignoring turns out to be the easiest way.
4787 */
4788static void
4789nv_report_add_remove(nv_port_t *nvp, int flags)
4790{
4791	ddi_acc_handle_t bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5];
4792	clock_t time_diff = ddi_get_lbolt() - nvp->nvp_reset_time;
4793	uint32_t sstatus;
4794	int i;
4795
4796	/*
4797	 * If reset within last 1 second ignore.  This should be
4798	 * reworked and improved instead of having this somewhat
4799	 * heavy handed clamping job.
4800	 */
4801	if (time_diff < drv_usectohz(NV_ONE_SEC)) {
4802		NVLOG((NVDBG_HOT, nvp->nvp_ctlp, nvp, "nv_report_add_remove()"
4803		    "ignoring plug interrupt was %dms ago",
4804		    TICK_TO_MSEC(time_diff)));
4805
4806		return;
4807	}
4808
4809	/*
4810	 * wait up to 1ms for sstatus to settle and reflect the true
4811	 * status of the port.  Failure to do so can create confusion
4812	 * in probe, where the incorrect sstatus value can still
4813	 * persist.
4814	 */
4815	for (i = 0; i < 1000; i++) {
4816		sstatus = nv_get32(bar5_hdl, nvp->nvp_sstatus);
4817
4818		if ((flags == NV_PORT_HOTREMOVED) &&
4819		    ((sstatus & SSTATUS_DET_DEVPRE_PHYCOM) !=
4820		    SSTATUS_DET_DEVPRE_PHYCOM)) {
4821			break;
4822		}
4823
4824		if ((flags != NV_PORT_HOTREMOVED) &&
4825		    ((sstatus & SSTATUS_DET_DEVPRE_PHYCOM) ==
4826		    SSTATUS_DET_DEVPRE_PHYCOM)) {
4827			break;
4828		}
4829		drv_usecwait(1);
4830	}
4831
4832	NVLOG((NVDBG_HOT, nvp->nvp_ctlp, nvp,
4833	    "sstatus took %i us for DEVPRE_PHYCOM to settle", i));
4834
4835	if (flags == NV_PORT_HOTREMOVED) {
4836		NVLOG((NVDBG_HOT, nvp->nvp_ctlp, nvp,
4837		    "nv_report_add_remove() hot removed"));
4838		nv_port_state_change(nvp,
4839		    SATA_EVNT_DEVICE_DETACHED,
4840		    SATA_ADDR_CPORT, 0);
4841
4842		nvp->nvp_state |= NV_PORT_HOTREMOVED;
4843	} else {
4844		NVLOG((NVDBG_HOT, nvp->nvp_ctlp, nvp,
4845		    "nv_report_add_remove() hot plugged"));
4846		nv_port_state_change(nvp, SATA_EVNT_DEVICE_ATTACHED,
4847		    SATA_ADDR_CPORT, 0);
4848	}
4849}
4850