nv_sata.c revision 10823:8e74955a6eca
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22/*
23 * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24 * Use is subject to license terms.
25 */
26
27/*
28 *
29 * nv_sata is a combo SATA HBA driver for ck804/mcp5x (mcp5x = mcp55/mcp51)
30 * based chipsets.
31 *
32 * NCQ
33 * ---
34 *
35 * A portion of the NCQ is in place, but is incomplete.  NCQ is disabled
36 * and is likely to be revisited in the future.
37 *
38 *
39 * Power Management
40 * ----------------
41 *
42 * Normally power management would be responsible for ensuring the device
43 * is quiescent and then changing power states to the device, such as
44 * powering down parts or all of the device.  mcp5x/ck804 is unique in
45 * that it is only available as part of a larger southbridge chipset, so
46 * removing power to the device isn't possible.  Switches to control
47 * power management states D0/D3 in the PCI configuration space appear to
48 * be supported but changes to these states are apparently are ignored.
49 * The only further PM that the driver _could_ do is shut down the PHY,
50 * but in order to deliver the first rev of the driver sooner than later,
51 * that will be deferred until some future phase.
52 *
53 * Since the driver currently will not directly change any power state to
54 * the device, no power() entry point will be required.  However, it is
55 * possible that in ACPI power state S3, aka suspend to RAM, that power
56 * can be removed to the device, and the driver cannot rely on BIOS to
57 * have reset any state.  For the time being, there is no known
58 * non-default configurations that need to be programmed.  This judgement
59 * is based on the port of the legacy ata driver not having any such
60 * functionality and based on conversations with the PM team.  If such a
61 * restoration is later deemed necessary it can be incorporated into the
62 * DDI_RESUME processing.
63 *
64 */
65
66#include <sys/scsi/scsi.h>
67#include <sys/pci.h>
68#include <sys/byteorder.h>
69#include <sys/sunddi.h>
70#include <sys/sata/sata_hba.h>
71#ifdef SGPIO_SUPPORT
72#include <sys/sata/adapters/nv_sata/nv_sgpio.h>
73#include <sys/devctl.h>
74#include <sys/sdt.h>
75#endif
76#include <sys/sata/adapters/nv_sata/nv_sata.h>
77#include <sys/disp.h>
78#include <sys/note.h>
79#include <sys/promif.h>
80
81
82/*
83 * Function prototypes for driver entry points
84 */
85static int nv_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
86static int nv_detach(dev_info_t *dip, ddi_detach_cmd_t cmd);
87static int nv_quiesce(dev_info_t *dip);
88static int nv_getinfo(dev_info_t *dip, ddi_info_cmd_t infocmd,
89    void *arg, void **result);
90
91/*
92 * Function prototypes for entry points from sata service module
93 * These functions are distinguished from other local functions
94 * by the prefix "nv_sata_"
95 */
96static int nv_sata_start(dev_info_t *dip, sata_pkt_t *spkt);
97static int nv_sata_abort(dev_info_t *dip, sata_pkt_t *spkt, int);
98static int nv_sata_reset(dev_info_t *dip, sata_device_t *sd);
99static int nv_sata_activate(dev_info_t *dip, sata_device_t *sd);
100static int nv_sata_deactivate(dev_info_t *dip, sata_device_t *sd);
101
102/*
103 * Local function prototypes
104 */
105static uint_t mcp5x_intr(caddr_t arg1, caddr_t arg2);
106static uint_t ck804_intr(caddr_t arg1, caddr_t arg2);
107static int nv_add_legacy_intrs(nv_ctl_t *nvc);
108#ifdef NV_MSI_SUPPORTED
109static int nv_add_msi_intrs(nv_ctl_t *nvc);
110#endif
111static void nv_rem_intrs(nv_ctl_t *nvc);
112static int nv_start_common(nv_port_t *nvp, sata_pkt_t *spkt);
113static int nv_start_nodata(nv_port_t *nvp, int slot);
114static void nv_intr_nodata(nv_port_t *nvp, nv_slot_t *spkt);
115static int nv_start_pio_in(nv_port_t *nvp, int slot);
116static int nv_start_pio_out(nv_port_t *nvp, int slot);
117static void nv_intr_pio_in(nv_port_t *nvp, nv_slot_t *spkt);
118static void nv_intr_pio_out(nv_port_t *nvp, nv_slot_t *spkt);
119static int nv_start_pkt_pio(nv_port_t *nvp, int slot);
120static void nv_intr_pkt_pio(nv_port_t *nvp, nv_slot_t *nv_slotp);
121static int nv_start_dma(nv_port_t *nvp, int slot);
122static void nv_intr_dma(nv_port_t *nvp, struct nv_slot *spkt);
123static void nv_log(uint_t flag, nv_ctl_t *nvc, nv_port_t *nvp, char *fmt, ...);
124static void nv_uninit_ctl(nv_ctl_t *nvc);
125static void mcp5x_reg_init(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle);
126static void ck804_reg_init(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle);
127static void nv_uninit_port(nv_port_t *nvp);
128static int nv_init_port(nv_port_t *nvp);
129static int nv_init_ctl(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle);
130static int mcp5x_packet_complete_intr(nv_ctl_t *nvc, nv_port_t *nvp);
131#ifdef NCQ
132static int mcp5x_dma_setup_intr(nv_ctl_t *nvc, nv_port_t *nvp);
133#endif
134static void nv_start_dma_engine(nv_port_t *nvp, int slot);
135static void nv_port_state_change(nv_port_t *nvp, int event, uint8_t addr_type,
136    int state);
137static void nv_common_reg_init(nv_ctl_t *nvc);
138static void ck804_intr_process(nv_ctl_t *nvc, uint8_t intr_status);
139static void nv_reset(nv_port_t *nvp);
140static void nv_complete_io(nv_port_t *nvp,  sata_pkt_t *spkt, int slot);
141static void nv_timeout(void *);
142static int nv_poll_wait(nv_port_t *nvp, sata_pkt_t *spkt);
143static void nv_cmn_err(int ce, nv_ctl_t *nvc, nv_port_t *nvp, char *fmt, ...);
144static void nv_read_signature(nv_port_t *nvp);
145static void mcp5x_set_intr(nv_port_t *nvp, int flag);
146static void ck804_set_intr(nv_port_t *nvp, int flag);
147static void nv_resume(nv_port_t *nvp);
148static void nv_suspend(nv_port_t *nvp);
149static int nv_start_sync(nv_port_t *nvp, sata_pkt_t *spkt);
150static int nv_abort_active(nv_port_t *nvp, sata_pkt_t *spkt, int abort_reason,
151    int flag);
152static void nv_copy_registers(nv_port_t *nvp, sata_device_t *sd,
153    sata_pkt_t *spkt);
154static void nv_report_add_remove(nv_port_t *nvp, int flags);
155static int nv_start_async(nv_port_t *nvp, sata_pkt_t *spkt);
156static int nv_wait3(nv_port_t *nvp, uchar_t onbits1, uchar_t offbits1,
157    uchar_t failure_onbits2, uchar_t failure_offbits2,
158    uchar_t failure_onbits3, uchar_t failure_offbits3,
159    uint_t timeout_usec, int type_wait);
160static int nv_wait(nv_port_t *nvp, uchar_t onbits, uchar_t offbits,
161    uint_t timeout_usec, int type_wait);
162static int nv_start_rqsense_pio(nv_port_t *nvp, nv_slot_t *nv_slotp);
163static void nv_init_port_link_processing(nv_ctl_t *nvc);
164static void nv_setup_timeout(nv_port_t *nvp, int time);
165static void nv_monitor_reset(nv_port_t *nvp);
166static int nv_bm_status_clear(nv_port_t *nvp);
167
168#ifdef SGPIO_SUPPORT
169static int nv_open(dev_t *devp, int flag, int otyp, cred_t *credp);
170static int nv_close(dev_t dev, int flag, int otyp, cred_t *credp);
171static int nv_ioctl(dev_t dev, int cmd, intptr_t arg, int mode,
172    cred_t *credp, int *rvalp);
173
174static void nv_sgp_led_init(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle);
175static int nv_sgp_detect(ddi_acc_handle_t pci_conf_handle, uint16_t *csrpp,
176    uint32_t *cbpp);
177static int nv_sgp_init(nv_ctl_t *nvc);
178static int nv_sgp_check_set_cmn(nv_ctl_t *nvc);
179static int nv_sgp_csr_read(nv_ctl_t *nvc);
180static void nv_sgp_csr_write(nv_ctl_t *nvc, uint32_t val);
181static int nv_sgp_write_data(nv_ctl_t *nvc);
182static void nv_sgp_activity_led_ctl(void *arg);
183static void nv_sgp_drive_connect(nv_ctl_t *nvc, int drive);
184static void nv_sgp_drive_disconnect(nv_ctl_t *nvc, int drive);
185static void nv_sgp_drive_active(nv_ctl_t *nvc, int drive);
186static void nv_sgp_locate(nv_ctl_t *nvc, int drive, int value);
187static void nv_sgp_error(nv_ctl_t *nvc, int drive, int value);
188static void nv_sgp_cleanup(nv_ctl_t *nvc);
189#endif
190
191
192/*
193 * DMA attributes for the data buffer for x86.  dma_attr_burstsizes is unused.
194 * Verify if needed if ported to other ISA.
195 */
196static ddi_dma_attr_t buffer_dma_attr = {
197	DMA_ATTR_V0,		/* dma_attr_version */
198	0,			/* dma_attr_addr_lo: lowest bus address */
199	0xffffffffull,		/* dma_attr_addr_hi: */
200	NV_BM_64K_BOUNDARY - 1,	/* dma_attr_count_max i.e for one cookie */
201	4,			/* dma_attr_align */
202	1,			/* dma_attr_burstsizes. */
203	1,			/* dma_attr_minxfer */
204	0xffffffffull,		/* dma_attr_maxxfer including all cookies */
205	0xffffffffull,		/* dma_attr_seg */
206	NV_DMA_NSEGS,		/* dma_attr_sgllen */
207	512,			/* dma_attr_granular */
208	0,			/* dma_attr_flags */
209};
210static ddi_dma_attr_t buffer_dma_40bit_attr = {
211	DMA_ATTR_V0,		/* dma_attr_version */
212	0,			/* dma_attr_addr_lo: lowest bus address */
213	0xffffffffffull,	/* dma_attr_addr_hi: */
214	NV_BM_64K_BOUNDARY - 1,	/* dma_attr_count_max i.e for one cookie */
215	4,			/* dma_attr_align */
216	1,			/* dma_attr_burstsizes. */
217	1,			/* dma_attr_minxfer */
218	0xffffffffull,		/* dma_attr_maxxfer including all cookies */
219	0xffffffffull,		/* dma_attr_seg */
220	NV_DMA_NSEGS,		/* dma_attr_sgllen */
221	512,			/* dma_attr_granular */
222	0,			/* dma_attr_flags */
223};
224
225
226/*
227 * DMA attributes for PRD tables
228 */
229ddi_dma_attr_t nv_prd_dma_attr = {
230	DMA_ATTR_V0,		/* dma_attr_version */
231	0,			/* dma_attr_addr_lo */
232	0xffffffffull,		/* dma_attr_addr_hi */
233	NV_BM_64K_BOUNDARY - 1,	/* dma_attr_count_max */
234	4,			/* dma_attr_align */
235	1,			/* dma_attr_burstsizes */
236	1,			/* dma_attr_minxfer */
237	NV_BM_64K_BOUNDARY,	/* dma_attr_maxxfer */
238	NV_BM_64K_BOUNDARY - 1,	/* dma_attr_seg */
239	1,			/* dma_attr_sgllen */
240	1,			/* dma_attr_granular */
241	0			/* dma_attr_flags */
242};
243
244/*
245 * Device access attributes
246 */
247static ddi_device_acc_attr_t accattr = {
248    DDI_DEVICE_ATTR_V0,
249    DDI_STRUCTURE_LE_ACC,
250    DDI_STRICTORDER_ACC
251};
252
253
254#ifdef SGPIO_SUPPORT
255static struct cb_ops nv_cb_ops = {
256	nv_open,		/* open */
257	nv_close,		/* close */
258	nodev,			/* strategy (block) */
259	nodev,			/* print (block) */
260	nodev,			/* dump (block) */
261	nodev,			/* read */
262	nodev,			/* write */
263	nv_ioctl,		/* ioctl */
264	nodev,			/* devmap */
265	nodev,			/* mmap */
266	nodev,			/* segmap */
267	nochpoll,		/* chpoll */
268	ddi_prop_op,		/* prop_op */
269	NULL,			/* streams */
270	D_NEW | D_MP |
271	D_64BIT | D_HOTPLUG,	/* flags */
272	CB_REV			/* rev */
273};
274#endif  /* SGPIO_SUPPORT */
275
276
277static struct dev_ops nv_dev_ops = {
278	DEVO_REV,		/* devo_rev */
279	0,			/* refcnt  */
280	nv_getinfo,		/* info */
281	nulldev,		/* identify */
282	nulldev,		/* probe */
283	nv_attach,		/* attach */
284	nv_detach,		/* detach */
285	nodev,			/* no reset */
286#ifdef SGPIO_SUPPORT
287	&nv_cb_ops,		/* driver operations */
288#else
289	(struct cb_ops *)0,	/* driver operations */
290#endif
291	NULL,			/* bus operations */
292	NULL,			/* power */
293	nv_quiesce		/* quiesce */
294};
295
296
297/*
298 * Request Sense CDB for ATAPI
299 */
300static const uint8_t nv_rqsense_cdb[16] = {
301	SCMD_REQUEST_SENSE,
302	0,
303	0,
304	0,
305	SATA_ATAPI_MIN_RQSENSE_LEN,
306	0,
307	0, 0, 0, 0, 0, 0, 0, 0, 0, 0	/* pad out to max CDB length */
308};
309
310
311static sata_tran_hotplug_ops_t nv_hotplug_ops;
312
313extern struct mod_ops mod_driverops;
314
315static  struct modldrv modldrv = {
316	&mod_driverops,	/* driverops */
317	"Nvidia ck804/mcp51/mcp55 HBA",
318	&nv_dev_ops,	/* driver ops */
319};
320
321static  struct modlinkage modlinkage = {
322	MODREV_1,
323	&modldrv,
324	NULL
325};
326
327
328/*
329 * Wait for a signature.
330 * If this variable is non-zero, the driver will wait for a device signature
331 * before reporting a device reset to the sata module.
332 * Some (most?) drives will not process commands sent to them before D2H FIS
333 * is sent to a host.
334 */
335int nv_wait_for_signature = 1;
336
337/*
338 * Check for a signature availability.
339 * If this variable is non-zero, the driver will check task file error register
340 * for indication of a signature availability before reading a signature.
341 * Task file error register bit 0 set to 1 indicates that the drive
342 * is ready and it has sent the D2H FIS with a signature.
343 * This behavior of the error register is not reliable in the mcp5x controller.
344 */
345int nv_check_tfr_error = 0;
346
347/*
348 * Max signature acquisition time, in milliseconds.
349 * The driver will try to acquire a device signature within specified time and
350 * quit acquisition operation if signature was not acquired.
351 */
352long nv_sig_acquisition_time = NV_SIG_ACQUISITION_TIME;
353
354/*
355 * If this variable is non-zero, the driver will wait for a signature in the
356 * nv_monitor_reset function without any time limit.
357 * Used for debugging and drive evaluation.
358 */
359int nv_wait_here_forever = 0;
360
361/*
362 * Reset after hotplug.
363 * If this variable is non-zero, driver will reset device after hotplug
364 * (device attached) interrupt.
365 * If the variable is zero, driver will not reset the new device nor will it
366 * try to read device signature.
367 * Chipset is generating a hotplug (device attached) interrupt with a delay, so
368 * the device should have already sent the D2H FIS with the signature.
369 */
370int nv_reset_after_hotplug = 1;
371
372/*
373 * Delay after device hotplug.
374 * It specifies the time between detecting a hotplugged device and sending
375 * a notification to the SATA module.
376 * It is used when device is not reset after hotpugging and acquiring signature
377 * may be unreliable. The delay should be long enough for a device to become
378 * ready to accept commands.
379 */
380int nv_hotplug_delay = NV_HOTPLUG_DELAY;
381
382
383/*
384 * Maximum number of consecutive interrupts processed in the loop in the
385 * single invocation of the port interrupt routine.
386 */
387int nv_max_intr_loops = NV_MAX_INTR_PER_DEV;
388
389
390
391/*
392 * wait between checks of reg status
393 */
394int nv_usec_delay = NV_WAIT_REG_CHECK;
395
396/*
397 * The following is needed for nv_vcmn_err()
398 */
399static kmutex_t nv_log_mutex; /* protects nv_log_buf */
400static char nv_log_buf[NV_STRING_512];
401int nv_debug_flags = NVDBG_ALWAYS;
402int nv_log_to_console = B_FALSE;
403
404int nv_log_delay = 0;
405int nv_prom_print = B_FALSE;
406
407/*
408 * for debugging
409 */
410#ifdef DEBUG
411int ncq_commands = 0;
412int non_ncq_commands = 0;
413#endif
414
415/*
416 * Opaque state pointer to be initialized by ddi_soft_state_init()
417 */
418static void *nv_statep	= NULL;
419
420/*
421 * Map from CBP to shared space
422 *
423 * When a MCP55/IO55 parts supports SGPIO, there is a single CBP (SGPIO
424 * Control Block Pointer as well as the corresponding Control Block) that
425 * is shared across all driver instances associated with that part.  The
426 * Control Block is used to update and query the LED state for the devices
427 * on the controllers associated with those instances.  There is also some
428 * driver state (called the 'common' area here) associated with each SGPIO
429 * Control Block.  The nv_sgp_cpb2cmn is used to map a given CBP to its
430 * control area.
431 *
432 * The driver can also use this mapping array to determine whether the
433 * common area for a given CBP has been initialized, and, if it isn't
434 * initialized, initialize it.
435 *
436 * When a driver instance with a CBP value that is already in the array is
437 * initialized, it will use the pointer to the previously initialized common
438 * area associated with that SGPIO CBP value, rather than initialize it
439 * itself.
440 *
441 * nv_sgp_c2c_mutex is used to synchronize access to this mapping array.
442 */
443#ifdef SGPIO_SUPPORT
444static kmutex_t nv_sgp_c2c_mutex;
445static struct nv_sgp_cbp2cmn nv_sgp_cbp2cmn[NV_MAX_CBPS];
446#endif
447
448/* We still have problems in 40-bit DMA support, so disable it by default */
449int nv_sata_40bit_dma = B_FALSE;
450
451static sata_tran_hotplug_ops_t nv_hotplug_ops = {
452	SATA_TRAN_HOTPLUG_OPS_REV_1,	/* structure version */
453	nv_sata_activate,	/* activate port. cfgadm -c connect */
454	nv_sata_deactivate	/* deactivate port. cfgadm -c disconnect */
455};
456
457
458/*
459 *  nv module initialization
460 */
461int
462_init(void)
463{
464	int	error;
465#ifdef SGPIO_SUPPORT
466	int	i;
467#endif
468
469	error = ddi_soft_state_init(&nv_statep, sizeof (nv_ctl_t), 0);
470
471	if (error != 0) {
472
473		return (error);
474	}
475
476	mutex_init(&nv_log_mutex, NULL, MUTEX_DRIVER, NULL);
477#ifdef SGPIO_SUPPORT
478	mutex_init(&nv_sgp_c2c_mutex, NULL, MUTEX_DRIVER, NULL);
479
480	for (i = 0; i < NV_MAX_CBPS; i++) {
481		nv_sgp_cbp2cmn[i].c2cm_cbp = 0;
482		nv_sgp_cbp2cmn[i].c2cm_cmn = NULL;
483	}
484#endif
485
486	if ((error = sata_hba_init(&modlinkage)) != 0) {
487		ddi_soft_state_fini(&nv_statep);
488		mutex_destroy(&nv_log_mutex);
489
490		return (error);
491	}
492
493	error = mod_install(&modlinkage);
494	if (error != 0) {
495		sata_hba_fini(&modlinkage);
496		ddi_soft_state_fini(&nv_statep);
497		mutex_destroy(&nv_log_mutex);
498
499		return (error);
500	}
501
502	return (error);
503}
504
505
506/*
507 * nv module uninitialize
508 */
509int
510_fini(void)
511{
512	int	error;
513
514	error = mod_remove(&modlinkage);
515
516	if (error != 0) {
517		return (error);
518	}
519
520	/*
521	 * remove the resources allocated in _init()
522	 */
523	mutex_destroy(&nv_log_mutex);
524#ifdef SGPIO_SUPPORT
525	mutex_destroy(&nv_sgp_c2c_mutex);
526#endif
527	sata_hba_fini(&modlinkage);
528	ddi_soft_state_fini(&nv_statep);
529
530	return (error);
531}
532
533
534/*
535 * nv _info entry point
536 */
537int
538_info(struct modinfo *modinfop)
539{
540	return (mod_info(&modlinkage, modinfop));
541}
542
543
544/*
545 * these wrappers for ddi_{get,put}8 are for observability
546 * with dtrace
547 */
548#ifdef DEBUG
549
550static void
551nv_put8(ddi_acc_handle_t handle, uint8_t *dev_addr, uint8_t value)
552{
553	ddi_put8(handle, dev_addr, value);
554}
555
556static void
557nv_put32(ddi_acc_handle_t handle, uint32_t *dev_addr, uint32_t value)
558{
559	ddi_put32(handle, dev_addr, value);
560}
561
562static uint32_t
563nv_get32(ddi_acc_handle_t handle, uint32_t *dev_addr)
564{
565	return (ddi_get32(handle, dev_addr));
566}
567
568static void
569nv_put16(ddi_acc_handle_t handle, uint16_t *dev_addr, uint16_t value)
570{
571	ddi_put16(handle, dev_addr, value);
572}
573
574static uint16_t
575nv_get16(ddi_acc_handle_t handle, uint16_t *dev_addr)
576{
577	return (ddi_get16(handle, dev_addr));
578}
579
580static uint8_t
581nv_get8(ddi_acc_handle_t handle, uint8_t *dev_addr)
582{
583	return (ddi_get8(handle, dev_addr));
584}
585
586#else
587
588#define	nv_put8 ddi_put8
589#define	nv_put32 ddi_put32
590#define	nv_get32 ddi_get32
591#define	nv_put16 ddi_put16
592#define	nv_get16 ddi_get16
593#define	nv_get8 ddi_get8
594
595#endif
596
597
598/*
599 * Driver attach
600 */
601static int
602nv_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
603{
604	int status, attach_state, intr_types, bar, i, command;
605	int inst = ddi_get_instance(dip);
606	ddi_acc_handle_t pci_conf_handle;
607	nv_ctl_t *nvc;
608	uint8_t subclass;
609	uint32_t reg32;
610#ifdef SGPIO_SUPPORT
611	pci_regspec_t *regs;
612	int rlen;
613#endif
614
615	switch (cmd) {
616
617	case DDI_ATTACH:
618
619		NVLOG((NVDBG_INIT, NULL, NULL,
620		    "nv_attach(): DDI_ATTACH inst %d", inst));
621
622		attach_state = ATTACH_PROGRESS_NONE;
623
624		status = ddi_soft_state_zalloc(nv_statep, inst);
625
626		if (status != DDI_SUCCESS) {
627			break;
628		}
629
630		nvc = ddi_get_soft_state(nv_statep, inst);
631
632		nvc->nvc_dip = dip;
633
634		attach_state |= ATTACH_PROGRESS_STATEP_ALLOC;
635
636		if (pci_config_setup(dip, &pci_conf_handle) == DDI_SUCCESS) {
637			nvc->nvc_revid = pci_config_get8(pci_conf_handle,
638			    PCI_CONF_REVID);
639			NVLOG((NVDBG_INIT, NULL, NULL,
640			    "inst %d: silicon revid is %x nv_debug_flags=%x",
641			    inst, nvc->nvc_revid, nv_debug_flags));
642		} else {
643			break;
644		}
645
646		attach_state |= ATTACH_PROGRESS_CONF_HANDLE;
647
648		/*
649		 * Set the PCI command register: enable IO/MEM/Master.
650		 */
651		command = pci_config_get16(pci_conf_handle, PCI_CONF_COMM);
652		pci_config_put16(pci_conf_handle, PCI_CONF_COMM,
653		    command|PCI_COMM_IO|PCI_COMM_MAE|PCI_COMM_ME);
654
655		subclass = pci_config_get8(pci_conf_handle, PCI_CONF_SUBCLASS);
656
657		if (subclass & PCI_MASS_RAID) {
658			cmn_err(CE_WARN,
659			    "attach failed: RAID mode not supported");
660			break;
661		}
662
663		/*
664		 * the 6 bars of the controller are:
665		 * 0: port 0 task file
666		 * 1: port 0 status
667		 * 2: port 1 task file
668		 * 3: port 1 status
669		 * 4: bus master for both ports
670		 * 5: extended registers for SATA features
671		 */
672		for (bar = 0; bar < 6; bar++) {
673			status = ddi_regs_map_setup(dip, bar + 1,
674			    (caddr_t *)&nvc->nvc_bar_addr[bar], 0, 0, &accattr,
675			    &nvc->nvc_bar_hdl[bar]);
676
677			if (status != DDI_SUCCESS) {
678				NVLOG((NVDBG_INIT, nvc, NULL,
679				    "ddi_regs_map_setup failure for bar"
680				    " %d status = %d", bar, status));
681				break;
682			}
683		}
684
685		attach_state |= ATTACH_PROGRESS_BARS;
686
687		/*
688		 * initialize controller structures
689		 */
690		status = nv_init_ctl(nvc, pci_conf_handle);
691
692		if (status == NV_FAILURE) {
693			NVLOG((NVDBG_INIT, nvc, NULL, "nv_init_ctl failed"));
694
695			break;
696		}
697
698		attach_state |= ATTACH_PROGRESS_CTL_SETUP;
699
700		/*
701		 * initialize mutexes
702		 */
703		mutex_init(&nvc->nvc_mutex, NULL, MUTEX_DRIVER,
704		    DDI_INTR_PRI(nvc->nvc_intr_pri));
705
706		attach_state |= ATTACH_PROGRESS_MUTEX_INIT;
707
708		/*
709		 * get supported interrupt types
710		 */
711		if (ddi_intr_get_supported_types(dip, &intr_types) !=
712		    DDI_SUCCESS) {
713			nv_cmn_err(CE_WARN, nvc, NULL,
714			    "!ddi_intr_get_supported_types failed");
715			NVLOG((NVDBG_INIT, nvc, NULL,
716			    "interrupt supported types failed"));
717
718			break;
719		}
720
721		NVLOG((NVDBG_INIT, nvc, NULL,
722		    "ddi_intr_get_supported_types() returned: 0x%x",
723		    intr_types));
724
725#ifdef NV_MSI_SUPPORTED
726		if (intr_types & DDI_INTR_TYPE_MSI) {
727			NVLOG((NVDBG_INIT, nvc, NULL,
728			    "using MSI interrupt type"));
729
730			/*
731			 * Try MSI first, but fall back to legacy if MSI
732			 * attach fails
733			 */
734			if (nv_add_msi_intrs(nvc) == DDI_SUCCESS) {
735				nvc->nvc_intr_type = DDI_INTR_TYPE_MSI;
736				attach_state |= ATTACH_PROGRESS_INTR_ADDED;
737				NVLOG((NVDBG_INIT, nvc, NULL,
738				    "MSI interrupt setup done"));
739			} else {
740				nv_cmn_err(CE_CONT, nvc, NULL,
741				    "!MSI registration failed "
742				    "will try Legacy interrupts");
743			}
744		}
745#endif
746
747		/*
748		 * Either the MSI interrupt setup has failed or only
749		 * the fixed interrupts are available on the system.
750		 */
751		if (!(attach_state & ATTACH_PROGRESS_INTR_ADDED) &&
752		    (intr_types & DDI_INTR_TYPE_FIXED)) {
753
754			NVLOG((NVDBG_INIT, nvc, NULL,
755			    "using Legacy interrupt type"));
756
757			if (nv_add_legacy_intrs(nvc) == DDI_SUCCESS) {
758				nvc->nvc_intr_type = DDI_INTR_TYPE_FIXED;
759				attach_state |= ATTACH_PROGRESS_INTR_ADDED;
760				NVLOG((NVDBG_INIT, nvc, NULL,
761				    "Legacy interrupt setup done"));
762			} else {
763				nv_cmn_err(CE_WARN, nvc, NULL,
764				    "!legacy interrupt setup failed");
765				NVLOG((NVDBG_INIT, nvc, NULL,
766				    "legacy interrupt setup failed"));
767				break;
768			}
769		}
770
771		if (!(attach_state & ATTACH_PROGRESS_INTR_ADDED)) {
772			NVLOG((NVDBG_INIT, nvc, NULL,
773			    "no interrupts registered"));
774			break;
775		}
776
777#ifdef SGPIO_SUPPORT
778		/*
779		 * save off the controller number
780		 */
781		(void) ddi_getlongprop(DDI_DEV_T_NONE, dip, DDI_PROP_DONTPASS,
782		    "reg", (caddr_t)&regs, &rlen);
783		nvc->nvc_ctlr_num = PCI_REG_FUNC_G(regs->pci_phys_hi);
784		kmem_free(regs, rlen);
785
786		/*
787		 * initialize SGPIO
788		 */
789		nv_sgp_led_init(nvc, pci_conf_handle);
790#endif	/* SGPIO_SUPPORT */
791
792		/*
793		 * Initiate link processing and device identification
794		 */
795		nv_init_port_link_processing(nvc);
796		/*
797		 * attach to sata module
798		 */
799		if (sata_hba_attach(nvc->nvc_dip,
800		    &nvc->nvc_sata_hba_tran,
801		    DDI_ATTACH) != DDI_SUCCESS) {
802			attach_state |= ATTACH_PROGRESS_SATA_MODULE;
803
804			break;
805		}
806
807		pci_config_teardown(&pci_conf_handle);
808
809		NVLOG((NVDBG_INIT, nvc, NULL, "nv_attach DDI_SUCCESS"));
810
811		return (DDI_SUCCESS);
812
813	case DDI_RESUME:
814
815		nvc = ddi_get_soft_state(nv_statep, inst);
816
817		NVLOG((NVDBG_INIT, nvc, NULL,
818		    "nv_attach(): DDI_RESUME inst %d", inst));
819
820		if (pci_config_setup(dip, &pci_conf_handle) != DDI_SUCCESS) {
821			return (DDI_FAILURE);
822		}
823
824		/*
825		 * Set the PCI command register: enable IO/MEM/Master.
826		 */
827		command = pci_config_get16(pci_conf_handle, PCI_CONF_COMM);
828		pci_config_put16(pci_conf_handle, PCI_CONF_COMM,
829		    command|PCI_COMM_IO|PCI_COMM_MAE|PCI_COMM_ME);
830
831		/*
832		 * Need to set bit 2 to 1 at config offset 0x50
833		 * to enable access to the bar5 registers.
834		 */
835		reg32 = pci_config_get32(pci_conf_handle, NV_SATA_CFG_20);
836
837		if ((reg32 & NV_BAR5_SPACE_EN) != NV_BAR5_SPACE_EN) {
838			pci_config_put32(pci_conf_handle, NV_SATA_CFG_20,
839			    reg32 | NV_BAR5_SPACE_EN);
840		}
841
842		nvc->nvc_state &= ~NV_CTRL_SUSPEND;
843
844		for (i = 0; i < NV_MAX_PORTS(nvc); i++) {
845			nv_resume(&(nvc->nvc_port[i]));
846		}
847
848		pci_config_teardown(&pci_conf_handle);
849
850		return (DDI_SUCCESS);
851
852	default:
853		return (DDI_FAILURE);
854	}
855
856
857	/*
858	 * DDI_ATTACH failure path starts here
859	 */
860
861	if (attach_state & ATTACH_PROGRESS_INTR_ADDED) {
862		nv_rem_intrs(nvc);
863	}
864
865	if (attach_state & ATTACH_PROGRESS_SATA_MODULE) {
866		/*
867		 * Remove timers
868		 */
869		int port = 0;
870		nv_port_t *nvp;
871
872		for (; port < NV_MAX_PORTS(nvc); port++) {
873			nvp = &(nvc->nvc_port[port]);
874			if (nvp->nvp_timeout_id != 0) {
875				(void) untimeout(nvp->nvp_timeout_id);
876			}
877		}
878	}
879
880	if (attach_state & ATTACH_PROGRESS_MUTEX_INIT) {
881		mutex_destroy(&nvc->nvc_mutex);
882	}
883
884	if (attach_state & ATTACH_PROGRESS_CTL_SETUP) {
885		nv_uninit_ctl(nvc);
886	}
887
888	if (attach_state & ATTACH_PROGRESS_BARS) {
889		while (--bar >= 0) {
890			ddi_regs_map_free(&nvc->nvc_bar_hdl[bar]);
891		}
892	}
893
894	if (attach_state & ATTACH_PROGRESS_STATEP_ALLOC) {
895		ddi_soft_state_free(nv_statep, inst);
896	}
897
898	if (attach_state & ATTACH_PROGRESS_CONF_HANDLE) {
899		pci_config_teardown(&pci_conf_handle);
900	}
901
902	cmn_err(CE_WARN, "nv_sata%d attach failed", inst);
903
904	return (DDI_FAILURE);
905}
906
907
908static int
909nv_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
910{
911	int i, port, inst = ddi_get_instance(dip);
912	nv_ctl_t *nvc;
913	nv_port_t *nvp;
914
915	nvc = ddi_get_soft_state(nv_statep, inst);
916
917	switch (cmd) {
918
919	case DDI_DETACH:
920
921		NVLOG((NVDBG_INIT, nvc, NULL, "nv_detach: DDI_DETACH"));
922
923		/*
924		 * Remove interrupts
925		 */
926		nv_rem_intrs(nvc);
927
928		/*
929		 * Remove timers
930		 */
931		for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
932			nvp = &(nvc->nvc_port[port]);
933			if (nvp->nvp_timeout_id != 0) {
934				(void) untimeout(nvp->nvp_timeout_id);
935			}
936		}
937
938		/*
939		 * Remove maps
940		 */
941		for (i = 0; i < 6; i++) {
942			ddi_regs_map_free(&nvc->nvc_bar_hdl[i]);
943		}
944
945		/*
946		 * Destroy mutexes
947		 */
948		mutex_destroy(&nvc->nvc_mutex);
949
950		/*
951		 * Uninitialize the controller structures
952		 */
953		nv_uninit_ctl(nvc);
954
955#ifdef SGPIO_SUPPORT
956		/*
957		 * release SGPIO resources
958		 */
959		nv_sgp_cleanup(nvc);
960#endif
961
962		/*
963		 * unregister from the sata module
964		 */
965		(void) sata_hba_detach(nvc->nvc_dip, DDI_DETACH);
966
967		/*
968		 * Free soft state
969		 */
970		ddi_soft_state_free(nv_statep, inst);
971
972		return (DDI_SUCCESS);
973
974	case DDI_SUSPEND:
975
976		NVLOG((NVDBG_INIT, nvc, NULL, "nv_detach: DDI_SUSPEND"));
977
978		for (i = 0; i < NV_MAX_PORTS(nvc); i++) {
979			nv_suspend(&(nvc->nvc_port[i]));
980		}
981
982		nvc->nvc_state |= NV_CTRL_SUSPEND;
983
984		return (DDI_SUCCESS);
985
986	default:
987		return (DDI_FAILURE);
988	}
989}
990
991
992/*ARGSUSED*/
993static int
994nv_getinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
995{
996	nv_ctl_t *nvc;
997	int instance;
998	dev_t dev;
999
1000	dev = (dev_t)arg;
1001	instance = getminor(dev);
1002
1003	switch (infocmd) {
1004	case DDI_INFO_DEVT2DEVINFO:
1005		nvc = ddi_get_soft_state(nv_statep,  instance);
1006		if (nvc != NULL) {
1007			*result = nvc->nvc_dip;
1008			return (DDI_SUCCESS);
1009		} else {
1010			*result = NULL;
1011			return (DDI_FAILURE);
1012		}
1013	case DDI_INFO_DEVT2INSTANCE:
1014		*(int *)result = instance;
1015		break;
1016	default:
1017		break;
1018	}
1019	return (DDI_SUCCESS);
1020}
1021
1022
1023#ifdef SGPIO_SUPPORT
1024/* ARGSUSED */
1025static int
1026nv_open(dev_t *devp, int flag, int otyp, cred_t *credp)
1027{
1028	nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, getminor(*devp));
1029
1030	if (nvc == NULL) {
1031		return (ENXIO);
1032	}
1033
1034	return (0);
1035}
1036
1037
1038/* ARGSUSED */
1039static int
1040nv_close(dev_t dev, int flag, int otyp, cred_t *credp)
1041{
1042	return (0);
1043}
1044
1045
1046/* ARGSUSED */
1047static int
1048nv_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp, int *rvalp)
1049{
1050	nv_ctl_t *nvc;
1051	int inst;
1052	int status;
1053	int ctlr, port;
1054	int drive;
1055	uint8_t curr_led;
1056	struct dc_led_ctl led;
1057
1058	inst = getminor(dev);
1059	if (inst == -1) {
1060		return (EBADF);
1061	}
1062
1063	nvc = ddi_get_soft_state(nv_statep, inst);
1064	if (nvc == NULL) {
1065		return (EBADF);
1066	}
1067
1068	if ((nvc->nvc_sgp_cbp == NULL) || (nvc->nvc_sgp_cmn == NULL)) {
1069		return (EIO);
1070	}
1071
1072	switch (cmd) {
1073	case DEVCTL_SET_LED:
1074		status = ddi_copyin((void *)arg, &led,
1075		    sizeof (struct dc_led_ctl), mode);
1076		if (status != 0)
1077			return (EFAULT);
1078
1079		/*
1080		 * Since only the first two controller currently support
1081		 * SGPIO (as per NVIDIA docs), this code will as well.
1082		 * Note that this validate the port value within led_state
1083		 * as well.
1084		 */
1085
1086		ctlr = SGP_DRV_TO_CTLR(led.led_number);
1087		if ((ctlr != 0) && (ctlr != 1))
1088			return (ENXIO);
1089
1090		if ((led.led_state & DCL_STATE_FAST_BLNK) ||
1091		    (led.led_state & DCL_STATE_SLOW_BLNK)) {
1092			return (EINVAL);
1093		}
1094
1095		drive = led.led_number;
1096
1097		if ((led.led_ctl_active == DCL_CNTRL_OFF) ||
1098		    (led.led_state == DCL_STATE_OFF)) {
1099
1100			if (led.led_type == DCL_TYPE_DEVICE_FAIL) {
1101				nv_sgp_error(nvc, drive, TR_ERROR_DISABLE);
1102			} else if (led.led_type == DCL_TYPE_DEVICE_OK2RM) {
1103				nv_sgp_locate(nvc, drive, TR_LOCATE_DISABLE);
1104			} else {
1105				return (ENXIO);
1106			}
1107
1108			port = SGP_DRV_TO_PORT(led.led_number);
1109			nvc->nvc_port[port].nvp_sgp_ioctl_mod |= led.led_type;
1110		}
1111
1112		if (led.led_ctl_active == DCL_CNTRL_ON) {
1113			if (led.led_type == DCL_TYPE_DEVICE_FAIL) {
1114				nv_sgp_error(nvc, drive, TR_ERROR_ENABLE);
1115			} else if (led.led_type == DCL_TYPE_DEVICE_OK2RM) {
1116				nv_sgp_locate(nvc, drive, TR_LOCATE_ENABLE);
1117			} else {
1118				return (ENXIO);
1119			}
1120
1121			port = SGP_DRV_TO_PORT(led.led_number);
1122			nvc->nvc_port[port].nvp_sgp_ioctl_mod |= led.led_type;
1123		}
1124
1125		break;
1126
1127	case DEVCTL_GET_LED:
1128		status = ddi_copyin((void *)arg, &led,
1129		    sizeof (struct dc_led_ctl), mode);
1130		if (status != 0)
1131			return (EFAULT);
1132
1133		/*
1134		 * Since only the first two controller currently support
1135		 * SGPIO (as per NVIDIA docs), this code will as well.
1136		 * Note that this validate the port value within led_state
1137		 * as well.
1138		 */
1139
1140		ctlr = SGP_DRV_TO_CTLR(led.led_number);
1141		if ((ctlr != 0) && (ctlr != 1))
1142			return (ENXIO);
1143
1144		curr_led = SGPIO0_TR_DRV(nvc->nvc_sgp_cbp->sgpio0_tr,
1145		    led.led_number);
1146
1147		port = SGP_DRV_TO_PORT(led.led_number);
1148		if (nvc->nvc_port[port].nvp_sgp_ioctl_mod & led.led_type) {
1149			led.led_ctl_active = DCL_CNTRL_ON;
1150
1151			if (led.led_type == DCL_TYPE_DEVICE_FAIL) {
1152				if (TR_ERROR(curr_led) == TR_ERROR_DISABLE)
1153					led.led_state = DCL_STATE_OFF;
1154				else
1155					led.led_state = DCL_STATE_ON;
1156			} else if (led.led_type == DCL_TYPE_DEVICE_OK2RM) {
1157				if (TR_LOCATE(curr_led) == TR_LOCATE_DISABLE)
1158					led.led_state = DCL_STATE_OFF;
1159				else
1160					led.led_state = DCL_STATE_ON;
1161			} else {
1162				return (ENXIO);
1163			}
1164		} else {
1165			led.led_ctl_active = DCL_CNTRL_OFF;
1166			/*
1167			 * Not really off, but never set and no constant for
1168			 * tri-state
1169			 */
1170			led.led_state = DCL_STATE_OFF;
1171		}
1172
1173		status = ddi_copyout(&led, (void *)arg,
1174		    sizeof (struct dc_led_ctl), mode);
1175		if (status != 0)
1176			return (EFAULT);
1177
1178		break;
1179
1180	case DEVCTL_NUM_LEDS:
1181		led.led_number = SGPIO_DRV_CNT_VALUE;
1182		led.led_ctl_active = 1;
1183		led.led_type = 3;
1184
1185		/*
1186		 * According to documentation, NVIDIA SGPIO is supposed to
1187		 * support blinking, but it does not seem to work in practice.
1188		 */
1189		led.led_state = DCL_STATE_ON;
1190
1191		status = ddi_copyout(&led, (void *)arg,
1192		    sizeof (struct dc_led_ctl), mode);
1193		if (status != 0)
1194			return (EFAULT);
1195
1196		break;
1197
1198	default:
1199		return (EINVAL);
1200	}
1201
1202	return (0);
1203}
1204#endif	/* SGPIO_SUPPORT */
1205
1206
1207/*
1208 * Called by sata module to probe a port.  Port and device state
1209 * are not changed here... only reported back to the sata module.
1210 *
1211 */
1212static int
1213nv_sata_probe(dev_info_t *dip, sata_device_t *sd)
1214{
1215	nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip));
1216	uint8_t cport = sd->satadev_addr.cport;
1217	uint8_t pmport = sd->satadev_addr.pmport;
1218	uint8_t qual = sd->satadev_addr.qual;
1219	nv_port_t *nvp;
1220
1221	if (cport >= NV_MAX_PORTS(nvc)) {
1222		sd->satadev_type = SATA_DTYPE_NONE;
1223		sd->satadev_state = SATA_STATE_UNKNOWN;
1224
1225		return (SATA_FAILURE);
1226	}
1227
1228	ASSERT(nvc->nvc_port != NULL);
1229	nvp = &(nvc->nvc_port[cport]);
1230	ASSERT(nvp != NULL);
1231
1232	NVLOG((NVDBG_RESET, nvc, nvp,
1233	    "nv_sata_probe: enter cport: 0x%x, pmport: 0x%x, "
1234	    "qual: 0x%x", cport, pmport, qual));
1235
1236	mutex_enter(&nvp->nvp_mutex);
1237
1238	/*
1239	 * This check seems to be done in the SATA module.
1240	 * It may not be required here
1241	 */
1242	if (nvp->nvp_state & NV_PORT_INACTIVE) {
1243		nv_cmn_err(CE_WARN, nvc, nvp,
1244		    "port inactive.  Use cfgadm to activate");
1245		sd->satadev_type = SATA_DTYPE_UNKNOWN;
1246		sd->satadev_state = SATA_PSTATE_SHUTDOWN;
1247		mutex_exit(&nvp->nvp_mutex);
1248
1249		return (SATA_SUCCESS);
1250	}
1251
1252	if (nvp->nvp_state & NV_PORT_FAILED) {
1253		NVLOG((NVDBG_RESET, nvp->nvp_ctlp, nvp,
1254		    "probe: port failed"));
1255		sd->satadev_type = SATA_DTYPE_NONE;
1256		sd->satadev_state = SATA_PSTATE_FAILED;
1257		mutex_exit(&nvp->nvp_mutex);
1258
1259		return (SATA_SUCCESS);
1260	}
1261
1262	if (qual == SATA_ADDR_PMPORT) {
1263		sd->satadev_type = SATA_DTYPE_NONE;
1264		sd->satadev_state = SATA_STATE_UNKNOWN;
1265		mutex_exit(&nvp->nvp_mutex);
1266		nv_cmn_err(CE_WARN, nvc, nvp,
1267		    "controller does not support port multiplier");
1268
1269		return (SATA_SUCCESS);
1270	}
1271
1272	sd->satadev_state = SATA_PSTATE_PWRON;
1273
1274	nv_copy_registers(nvp, sd, NULL);
1275
1276	if (nvp->nvp_state & (NV_PORT_RESET | NV_PORT_RESET_RETRY)) {
1277		/*
1278		 * We are waiting for reset to complete and to fetch
1279		 * a signature.
1280		 * Reset will cause the link to go down for a short period of
1281		 * time.  If reset processing continues for less than
1282		 * NV_LINK_DOWN_TIMEOUT, fake the status of the link so that
1283		 * we will not report intermittent link down.
1284		 * Maybe we should report previous link state?
1285		 */
1286		if (TICK_TO_MSEC(ddi_get_lbolt() - nvp->nvp_reset_time) <
1287		    NV_LINK_DOWN_TIMEOUT) {
1288			SSTATUS_SET_IPM(sd->satadev_scr.sstatus,
1289			    SSTATUS_IPM_ACTIVE);
1290			SSTATUS_SET_DET(sd->satadev_scr.sstatus,
1291			    SSTATUS_DET_DEVPRE_PHYCOM);
1292			sd->satadev_type = nvp->nvp_type;
1293			mutex_exit(&nvp->nvp_mutex);
1294
1295			return (SATA_SUCCESS);
1296		}
1297	}
1298	/*
1299	 * Just report the current port state
1300	 */
1301	sd->satadev_type = nvp->nvp_type;
1302	sd->satadev_state = nvp->nvp_state | SATA_PSTATE_PWRON;
1303	mutex_exit(&nvp->nvp_mutex);
1304
1305#ifdef SGPIO_SUPPORT
1306	if (nvp->nvp_type == SATA_DTYPE_ATADISK) {
1307		nv_sgp_drive_connect(nvp->nvp_ctlp, SGP_CTLR_PORT_TO_DRV(
1308		    nvp->nvp_ctlp->nvc_ctlr_num, nvp->nvp_port_num));
1309	} else {
1310		nv_sgp_drive_disconnect(nvp->nvp_ctlp, SGP_CTLR_PORT_TO_DRV(
1311		    nvp->nvp_ctlp->nvc_ctlr_num, nvp->nvp_port_num));
1312	}
1313#endif
1314
1315	return (SATA_SUCCESS);
1316}
1317
1318
1319/*
1320 * Called by sata module to start a new command.
1321 */
1322static int
1323nv_sata_start(dev_info_t *dip, sata_pkt_t *spkt)
1324{
1325	int cport = spkt->satapkt_device.satadev_addr.cport;
1326	nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip));
1327	nv_port_t *nvp = &(nvc->nvc_port[cport]);
1328	int ret;
1329
1330	NVLOG((NVDBG_ENTRY, nvc, nvp, "nv_sata_start: opmode: 0x%x cmd=%x",
1331	    spkt->satapkt_op_mode, spkt->satapkt_cmd.satacmd_cmd_reg));
1332
1333	mutex_enter(&nvp->nvp_mutex);
1334
1335	if ((nvp->nvp_state & NV_PORT_INIT) == 0) {
1336		spkt->satapkt_reason = SATA_PKT_PORT_ERROR;
1337		NVLOG((NVDBG_ERRS, nvc, nvp,
1338		    "nv_sata_start: port not yet initialized"));
1339		nv_copy_registers(nvp, &spkt->satapkt_device, NULL);
1340		mutex_exit(&nvp->nvp_mutex);
1341
1342		return (SATA_TRAN_PORT_ERROR);
1343	}
1344
1345	if (nvp->nvp_state & NV_PORT_INACTIVE) {
1346		spkt->satapkt_reason = SATA_PKT_PORT_ERROR;
1347		NVLOG((NVDBG_ERRS, nvc, nvp,
1348		    "nv_sata_start: NV_PORT_INACTIVE"));
1349		nv_copy_registers(nvp, &spkt->satapkt_device, NULL);
1350		mutex_exit(&nvp->nvp_mutex);
1351
1352		return (SATA_TRAN_PORT_ERROR);
1353	}
1354
1355	if (nvp->nvp_state & NV_PORT_FAILED) {
1356		spkt->satapkt_reason = SATA_PKT_PORT_ERROR;
1357		NVLOG((NVDBG_ERRS, nvc, nvp,
1358		    "nv_sata_start: NV_PORT_FAILED state"));
1359		nv_copy_registers(nvp, &spkt->satapkt_device, NULL);
1360		mutex_exit(&nvp->nvp_mutex);
1361
1362		return (SATA_TRAN_PORT_ERROR);
1363	}
1364
1365	if (nvp->nvp_state & NV_PORT_RESET) {
1366		NVLOG((NVDBG_VERBOSE, nvc, nvp,
1367		    "still waiting for reset completion"));
1368		spkt->satapkt_reason = SATA_PKT_BUSY;
1369		mutex_exit(&nvp->nvp_mutex);
1370
1371		/*
1372		 * If in panic, timeouts do not occur, so fake one
1373		 * so that the signature can be acquired to complete
1374		 * the reset handling.
1375		 */
1376		if (ddi_in_panic()) {
1377			nv_timeout(nvp);
1378		}
1379
1380		return (SATA_TRAN_BUSY);
1381	}
1382
1383	if (nvp->nvp_type == SATA_DTYPE_NONE) {
1384		spkt->satapkt_reason = SATA_PKT_PORT_ERROR;
1385		NVLOG((NVDBG_ERRS, nvc, nvp,
1386		    "nv_sata_start: SATA_DTYPE_NONE"));
1387		nv_copy_registers(nvp, &spkt->satapkt_device, NULL);
1388		mutex_exit(&nvp->nvp_mutex);
1389
1390		return (SATA_TRAN_PORT_ERROR);
1391	}
1392
1393	if (spkt->satapkt_device.satadev_type == SATA_DTYPE_PMULT) {
1394		ASSERT(nvp->nvp_type == SATA_DTYPE_PMULT);
1395		nv_cmn_err(CE_WARN, nvc, nvp,
1396		    "port multipliers not supported by controller");
1397		spkt->satapkt_reason = SATA_PKT_CMD_UNSUPPORTED;
1398		mutex_exit(&nvp->nvp_mutex);
1399
1400		return (SATA_TRAN_CMD_UNSUPPORTED);
1401	}
1402
1403	/*
1404	 * after a device reset, and then when sata module restore processing
1405	 * is complete, the sata module will set sata_clear_dev_reset which
1406	 * indicates that restore processing has completed and normal
1407	 * non-restore related commands should be processed.
1408	 */
1409	if (spkt->satapkt_cmd.satacmd_flags.sata_clear_dev_reset) {
1410		nvp->nvp_state &= ~NV_PORT_RESTORE;
1411		NVLOG((NVDBG_RESET, nvc, nvp,
1412		    "nv_sata_start: clearing NV_PORT_RESTORE"));
1413	}
1414
1415	/*
1416	 * if the device was recently reset as indicated by NV_PORT_RESTORE,
1417	 * only allow commands which restore device state.  The sata module
1418	 * marks such commands with with sata_ignore_dev_reset.
1419	 *
1420	 * during coredump, nv_reset is called and but then the restore
1421	 * doesn't happen.  For now, workaround by ignoring the wait for
1422	 * restore if the system is panicing.
1423	 */
1424	if ((nvp->nvp_state & NV_PORT_RESTORE) &&
1425	    !(spkt->satapkt_cmd.satacmd_flags.sata_ignore_dev_reset) &&
1426	    (ddi_in_panic() == 0)) {
1427		spkt->satapkt_reason = SATA_PKT_BUSY;
1428		NVLOG((NVDBG_VERBOSE, nvc, nvp,
1429		    "nv_sata_start: waiting for restore "));
1430		mutex_exit(&nvp->nvp_mutex);
1431
1432		return (SATA_TRAN_BUSY);
1433	}
1434
1435	if (nvp->nvp_state & NV_PORT_ABORTING) {
1436		spkt->satapkt_reason = SATA_PKT_BUSY;
1437		NVLOG((NVDBG_ERRS, nvc, nvp,
1438		    "nv_sata_start: NV_PORT_ABORTING"));
1439		mutex_exit(&nvp->nvp_mutex);
1440
1441		return (SATA_TRAN_BUSY);
1442	}
1443
1444	/* Clear SError to be able to check errors after the command failure */
1445	nv_put32(nvp->nvp_ctlp->nvc_bar_hdl[5], nvp->nvp_serror, 0xffffffff);
1446
1447	if (spkt->satapkt_op_mode &
1448	    (SATA_OPMODE_POLLING|SATA_OPMODE_SYNCH)) {
1449
1450		ret = nv_start_sync(nvp, spkt);
1451
1452		mutex_exit(&nvp->nvp_mutex);
1453
1454		return (ret);
1455	}
1456
1457	/*
1458	 * start command asynchronous command
1459	 */
1460	ret = nv_start_async(nvp, spkt);
1461
1462	mutex_exit(&nvp->nvp_mutex);
1463
1464	return (ret);
1465}
1466
1467
1468/*
1469 * SATA_OPMODE_POLLING implies the driver is in a
1470 * synchronous mode, and SATA_OPMODE_SYNCH is also set.
1471 * If only SATA_OPMODE_SYNCH is set, the driver can use
1472 * interrupts and sleep wait on a cv.
1473 *
1474 * If SATA_OPMODE_POLLING is set, the driver can't use
1475 * interrupts and must busy wait and simulate the
1476 * interrupts by waiting for BSY to be cleared.
1477 *
1478 * Synchronous mode has to return BUSY if there are
1479 * any other commands already on the drive.
1480 */
1481static int
1482nv_start_sync(nv_port_t *nvp, sata_pkt_t *spkt)
1483{
1484	nv_ctl_t *nvc = nvp->nvp_ctlp;
1485	int ret;
1486
1487	NVLOG((NVDBG_SYNC, nvp->nvp_ctlp, nvp, "nv_sata_satapkt_sync: entry"));
1488
1489	if (nvp->nvp_ncq_run != 0 || nvp->nvp_non_ncq_run != 0) {
1490		spkt->satapkt_reason = SATA_PKT_BUSY;
1491		NVLOG((NVDBG_SYNC, nvp->nvp_ctlp, nvp,
1492		    "nv_sata_satapkt_sync: device is busy, sync cmd rejected"
1493		    "ncq_run: %d non_ncq_run: %d  spkt: %p",
1494		    nvp->nvp_ncq_run, nvp->nvp_non_ncq_run,
1495		    (&(nvp->nvp_slot[0]))->nvslot_spkt));
1496
1497		return (SATA_TRAN_BUSY);
1498	}
1499
1500	/*
1501	 * if SYNC but not POLL, verify that this is not on interrupt thread.
1502	 */
1503	if (!(spkt->satapkt_op_mode & SATA_OPMODE_POLLING) &&
1504	    servicing_interrupt()) {
1505		spkt->satapkt_reason = SATA_PKT_BUSY;
1506		NVLOG((NVDBG_SYNC, nvp->nvp_ctlp, nvp,
1507		    "SYNC mode not allowed during interrupt"));
1508
1509		return (SATA_TRAN_BUSY);
1510
1511	}
1512
1513	/*
1514	 * disable interrupt generation if in polled mode
1515	 */
1516	if (spkt->satapkt_op_mode & SATA_OPMODE_POLLING) {
1517		(*(nvc->nvc_set_intr))(nvp, NV_INTR_DISABLE);
1518	}
1519
1520	if ((ret = nv_start_common(nvp, spkt)) != SATA_TRAN_ACCEPTED) {
1521		if (spkt->satapkt_op_mode & SATA_OPMODE_POLLING) {
1522			(*(nvc->nvc_set_intr))(nvp, NV_INTR_ENABLE);
1523		}
1524
1525		return (ret);
1526	}
1527
1528	if (spkt->satapkt_op_mode & SATA_OPMODE_POLLING) {
1529		mutex_exit(&nvp->nvp_mutex);
1530		ret = nv_poll_wait(nvp, spkt);
1531		mutex_enter(&nvp->nvp_mutex);
1532
1533		(*(nvc->nvc_set_intr))(nvp, NV_INTR_ENABLE);
1534
1535		NVLOG((NVDBG_SYNC, nvp->nvp_ctlp, nvp, "nv_sata_satapkt_sync:"
1536		    " done % reason %d", ret));
1537
1538		return (ret);
1539	}
1540
1541	/*
1542	 * non-polling synchronous mode handling.  The interrupt will signal
1543	 * when the IO is completed.
1544	 */
1545	cv_wait(&nvp->nvp_poll_cv, &nvp->nvp_mutex);
1546
1547	if (spkt->satapkt_reason != SATA_PKT_COMPLETED) {
1548
1549		spkt->satapkt_reason = SATA_PKT_TIMEOUT;
1550	}
1551
1552	NVLOG((NVDBG_SYNC, nvp->nvp_ctlp, nvp, "nv_sata_satapkt_sync:"
1553	    " done % reason %d", spkt->satapkt_reason));
1554
1555	return (SATA_TRAN_ACCEPTED);
1556}
1557
1558
1559static int
1560nv_poll_wait(nv_port_t *nvp, sata_pkt_t *spkt)
1561{
1562	int ret;
1563	nv_ctl_t *nvc = nvp->nvp_ctlp;
1564#if ! defined(__lock_lint)
1565	nv_slot_t *nv_slotp = &(nvp->nvp_slot[0]); /* not NCQ aware */
1566#endif
1567
1568	NVLOG((NVDBG_SYNC, nvc, nvp, "nv_poll_wait: enter"));
1569
1570	for (;;) {
1571
1572		NV_DELAY_NSEC(400);
1573
1574		NVLOG((NVDBG_SYNC, nvc, nvp, "nv_poll_wait: before nv_wait"));
1575		if (nv_wait(nvp, 0, SATA_STATUS_BSY,
1576		    NV_SEC2USEC(spkt->satapkt_time), NV_NOSLEEP) == B_FALSE) {
1577			mutex_enter(&nvp->nvp_mutex);
1578			spkt->satapkt_reason = SATA_PKT_TIMEOUT;
1579			nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
1580			nvp->nvp_state |= NV_PORT_RESET;
1581			nvp->nvp_state &= ~(NV_PORT_RESTORE |
1582			    NV_PORT_RESET_RETRY);
1583			nv_reset(nvp);
1584			nv_complete_io(nvp, spkt, 0);
1585			mutex_exit(&nvp->nvp_mutex);
1586			NVLOG((NVDBG_SYNC, nvc, nvp, "nv_poll_wait: "
1587			    "SATA_STATUS_BSY"));
1588
1589			return (SATA_TRAN_ACCEPTED);
1590		}
1591
1592		NVLOG((NVDBG_SYNC, nvc, nvp, "nv_poll_wait: before nvc_intr"));
1593
1594		/*
1595		 * Simulate interrupt.
1596		 */
1597		ret = (*(nvc->nvc_interrupt))((caddr_t)nvc, NULL);
1598		NVLOG((NVDBG_SYNC, nvc, nvp, "nv_poll_wait: after nvc_intr"));
1599
1600		if (ret != DDI_INTR_CLAIMED) {
1601			NVLOG((NVDBG_SYNC, nvc, nvp, "nv_poll_wait:"
1602			    " unclaimed -- resetting"));
1603			mutex_enter(&nvp->nvp_mutex);
1604			nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
1605			nvp->nvp_state |= NV_PORT_RESET;
1606			nvp->nvp_state &= ~(NV_PORT_RESTORE |
1607			    NV_PORT_RESET_RETRY);
1608			nv_reset(nvp);
1609			spkt->satapkt_reason = SATA_PKT_TIMEOUT;
1610			nv_complete_io(nvp, spkt, 0);
1611			mutex_exit(&nvp->nvp_mutex);
1612
1613			return (SATA_TRAN_ACCEPTED);
1614		}
1615
1616#if ! defined(__lock_lint)
1617		if (nv_slotp->nvslot_flags == NVSLOT_COMPLETE) {
1618			/*
1619			 * packet is complete
1620			 */
1621			return (SATA_TRAN_ACCEPTED);
1622		}
1623#endif
1624	}
1625	/*NOTREACHED*/
1626}
1627
1628
1629/*
1630 * Called by sata module to abort outstanding packets.
1631 */
1632/*ARGSUSED*/
1633static int
1634nv_sata_abort(dev_info_t *dip, sata_pkt_t *spkt, int flag)
1635{
1636	int cport = spkt->satapkt_device.satadev_addr.cport;
1637	nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip));
1638	nv_port_t *nvp = &(nvc->nvc_port[cport]);
1639	int c_a, ret;
1640
1641	ASSERT(cport < NV_MAX_PORTS(nvc));
1642	NVLOG((NVDBG_ENTRY, nvc, nvp, "nv_sata_abort %d %p", flag, spkt));
1643
1644	mutex_enter(&nvp->nvp_mutex);
1645
1646	if (nvp->nvp_state & NV_PORT_INACTIVE) {
1647		mutex_exit(&nvp->nvp_mutex);
1648		nv_cmn_err(CE_WARN, nvc, nvp,
1649		    "abort request failed: port inactive");
1650
1651		return (SATA_FAILURE);
1652	}
1653
1654	/*
1655	 * spkt == NULL then abort all commands
1656	 */
1657	c_a = nv_abort_active(nvp, spkt, SATA_PKT_ABORTED, B_TRUE);
1658
1659	if (c_a) {
1660		NVLOG((NVDBG_ENTRY, nvc, nvp,
1661		    "packets aborted running=%d", c_a));
1662		ret = SATA_SUCCESS;
1663	} else {
1664		if (spkt == NULL) {
1665			NVLOG((NVDBG_ENTRY, nvc, nvp, "no spkts to abort"));
1666		} else {
1667			NVLOG((NVDBG_ENTRY, nvc, nvp,
1668			    "can't find spkt to abort"));
1669		}
1670		ret = SATA_FAILURE;
1671	}
1672
1673	mutex_exit(&nvp->nvp_mutex);
1674
1675	return (ret);
1676}
1677
1678
1679/*
1680 * if spkt == NULL abort all pkts running, otherwise
1681 * abort the requested packet.  must be called with nv_mutex
1682 * held and returns with it held.  Not NCQ aware.
1683 */
1684static int
1685nv_abort_active(nv_port_t *nvp, sata_pkt_t *spkt, int abort_reason, int flag)
1686{
1687	int aborted = 0, i, reset_once = B_FALSE;
1688	struct nv_slot *nv_slotp;
1689	sata_pkt_t *spkt_slot;
1690
1691	ASSERT(MUTEX_HELD(&nvp->nvp_mutex));
1692
1693	/*
1694	 * return if the port is not configured
1695	 */
1696	if (nvp->nvp_slot == NULL) {
1697		NVLOG((NVDBG_ENTRY, nvp->nvp_ctlp, nvp,
1698		    "nv_abort_active: not configured so returning"));
1699
1700		return (0);
1701	}
1702
1703	NVLOG((NVDBG_RESET, nvp->nvp_ctlp, nvp, "nv_abort_active"));
1704
1705	nvp->nvp_state |= NV_PORT_ABORTING;
1706
1707	for (i = 0; i < nvp->nvp_queue_depth; i++) {
1708
1709		nv_slotp = &(nvp->nvp_slot[i]);
1710		spkt_slot = nv_slotp->nvslot_spkt;
1711
1712		/*
1713		 * skip if not active command in slot
1714		 */
1715		if (spkt_slot == NULL) {
1716			continue;
1717		}
1718
1719		/*
1720		 * if a specific packet was requested, skip if
1721		 * this is not a match
1722		 */
1723		if ((spkt != NULL) && (spkt != spkt_slot)) {
1724			continue;
1725		}
1726
1727		/*
1728		 * stop the hardware.  This could need reworking
1729		 * when NCQ is enabled in the driver.
1730		 */
1731		if (reset_once == B_FALSE) {
1732			ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl;
1733
1734			/*
1735			 * stop DMA engine
1736			 */
1737			nv_put8(bmhdl, nvp->nvp_bmicx,  0);
1738
1739			/*
1740			 * Reset only if explicitly specified by the arg flag
1741			 */
1742			if (flag == B_TRUE) {
1743				reset_once = B_TRUE;
1744				nvp->nvp_state |= NV_PORT_RESET;
1745				nvp->nvp_state &= ~(NV_PORT_RESTORE |
1746				    NV_PORT_RESET_RETRY);
1747				nv_reset(nvp);
1748			}
1749		}
1750
1751		spkt_slot->satapkt_reason = abort_reason;
1752		nv_complete_io(nvp, spkt_slot, i);
1753		aborted++;
1754	}
1755
1756	nvp->nvp_state &= ~NV_PORT_ABORTING;
1757
1758	return (aborted);
1759}
1760
1761
1762/*
1763 * Called by sata module to reset a port, device, or the controller.
1764 */
1765static int
1766nv_sata_reset(dev_info_t *dip, sata_device_t *sd)
1767{
1768	int cport = sd->satadev_addr.cport;
1769	nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip));
1770	nv_port_t *nvp = &(nvc->nvc_port[cport]);
1771	int ret = SATA_SUCCESS;
1772
1773	ASSERT(cport < NV_MAX_PORTS(nvc));
1774
1775	NVLOG((NVDBG_ENTRY, nvc, nvp, "nv_sata_reset"));
1776
1777	mutex_enter(&nvp->nvp_mutex);
1778
1779	switch (sd->satadev_addr.qual) {
1780
1781	case SATA_ADDR_CPORT:
1782		/*FALLTHROUGH*/
1783	case SATA_ADDR_DCPORT:
1784		nvp->nvp_state |= NV_PORT_RESET;
1785		nvp->nvp_state &= ~NV_PORT_RESTORE;
1786		nv_reset(nvp);
1787		(void) nv_abort_active(nvp, NULL, SATA_PKT_RESET, B_FALSE);
1788
1789		break;
1790	case SATA_ADDR_CNTRL:
1791		NVLOG((NVDBG_ENTRY, nvc, nvp,
1792		    "nv_sata_reset: constroller reset not supported"));
1793
1794		break;
1795	case SATA_ADDR_PMPORT:
1796	case SATA_ADDR_DPMPORT:
1797		NVLOG((NVDBG_ENTRY, nvc, nvp,
1798		    "nv_sata_reset: port multipliers not supported"));
1799		/*FALLTHROUGH*/
1800	default:
1801		/*
1802		 * unsupported case
1803		 */
1804		ret = SATA_FAILURE;
1805		break;
1806	}
1807
1808	if (ret == SATA_SUCCESS) {
1809		/*
1810		 * If the port is inactive, do a quiet reset and don't attempt
1811		 * to wait for reset completion or do any post reset processing
1812		 */
1813		if (nvp->nvp_state & NV_PORT_INACTIVE) {
1814			nvp->nvp_state &= ~NV_PORT_RESET;
1815			nvp->nvp_reset_time = 0;
1816		}
1817
1818		/*
1819		 * clear the port failed flag
1820		 */
1821		nvp->nvp_state &= ~NV_PORT_FAILED;
1822	}
1823
1824	mutex_exit(&nvp->nvp_mutex);
1825
1826	return (ret);
1827}
1828
1829
1830/*
1831 * Sata entry point to handle port activation.  cfgadm -c connect
1832 */
1833static int
1834nv_sata_activate(dev_info_t *dip, sata_device_t *sd)
1835{
1836	int cport = sd->satadev_addr.cport;
1837	nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip));
1838	nv_port_t *nvp = &(nvc->nvc_port[cport]);
1839
1840	ASSERT(cport < NV_MAX_PORTS(nvc));
1841	NVLOG((NVDBG_ENTRY, nvc, nvp, "nv_sata_activate"));
1842
1843	mutex_enter(&nvp->nvp_mutex);
1844
1845	sd->satadev_state = SATA_STATE_READY;
1846
1847	nv_copy_registers(nvp, sd, NULL);
1848
1849	(*(nvc->nvc_set_intr))(nvp, NV_INTR_ENABLE);
1850
1851	nvp->nvp_state &= ~NV_PORT_INACTIVE;
1852	/* Initiate link probing and device signature acquisition */
1853	nvp->nvp_type = SATA_DTYPE_NONE;
1854	nvp->nvp_signature = 0;
1855	nvp->nvp_state |= NV_PORT_RESET; /* | NV_PORT_PROBE; */
1856	nvp->nvp_state &= ~(NV_PORT_RESTORE | NV_PORT_RESET_RETRY);
1857	nv_reset(nvp);
1858
1859	mutex_exit(&nvp->nvp_mutex);
1860
1861	return (SATA_SUCCESS);
1862}
1863
1864
1865/*
1866 * Sata entry point to handle port deactivation.  cfgadm -c disconnect
1867 */
1868static int
1869nv_sata_deactivate(dev_info_t *dip, sata_device_t *sd)
1870{
1871	int cport = sd->satadev_addr.cport;
1872	nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip));
1873	nv_port_t *nvp = &(nvc->nvc_port[cport]);
1874
1875	ASSERT(cport < NV_MAX_PORTS(nvc));
1876	NVLOG((NVDBG_ENTRY, nvc, nvp, "nv_sata_deactivate"));
1877
1878	mutex_enter(&nvp->nvp_mutex);
1879
1880	(void) nv_abort_active(nvp, NULL, SATA_PKT_ABORTED, B_FALSE);
1881
1882	/*
1883	 * make the device inaccessible
1884	 */
1885	nvp->nvp_state |= NV_PORT_INACTIVE;
1886
1887	/*
1888	 * disable the interrupts on port
1889	 */
1890	(*(nvc->nvc_set_intr))(nvp, NV_INTR_DISABLE);
1891
1892	sd->satadev_state = SATA_PSTATE_SHUTDOWN;
1893	nv_copy_registers(nvp, sd, NULL);
1894
1895	mutex_exit(&nvp->nvp_mutex);
1896
1897	return (SATA_SUCCESS);
1898}
1899
1900
1901/*
1902 * find an empty slot in the driver's queue, increment counters,
1903 * and then invoke the appropriate PIO or DMA start routine.
1904 */
1905static int
1906nv_start_common(nv_port_t *nvp, sata_pkt_t *spkt)
1907{
1908	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
1909	int on_bit = 0x01, slot, sactive, ret, ncq = 0;
1910	uint8_t cmd = spkt->satapkt_cmd.satacmd_cmd_reg;
1911	int direction = sata_cmdp->satacmd_flags.sata_data_direction;
1912	nv_ctl_t *nvc = nvp->nvp_ctlp;
1913	nv_slot_t *nv_slotp;
1914	boolean_t dma_cmd;
1915
1916	NVLOG((NVDBG_DELIVER, nvc, nvp, "nv_start_common  entered: cmd: 0x%x",
1917	    sata_cmdp->satacmd_cmd_reg));
1918
1919	if ((cmd == SATAC_WRITE_FPDMA_QUEUED) ||
1920	    (cmd == SATAC_READ_FPDMA_QUEUED)) {
1921		nvp->nvp_ncq_run++;
1922		/*
1923		 * search for an empty NCQ slot.  by the time, it's already
1924		 * been determined by the caller that there is room on the
1925		 * queue.
1926		 */
1927		for (slot = 0; slot < nvp->nvp_queue_depth; slot++,
1928		    on_bit <<= 1) {
1929			if ((nvp->nvp_sactive_cache & on_bit) == 0) {
1930				break;
1931			}
1932		}
1933
1934		/*
1935		 * the first empty slot found, should not exceed the queue
1936		 * depth of the drive.  if it does it's an error.
1937		 */
1938		ASSERT(slot != nvp->nvp_queue_depth);
1939
1940		sactive = nv_get32(nvc->nvc_bar_hdl[5],
1941		    nvp->nvp_sactive);
1942		ASSERT((sactive & on_bit) == 0);
1943		nv_put32(nvc->nvc_bar_hdl[5], nvp->nvp_sactive, on_bit);
1944		NVLOG((NVDBG_INIT, nvc, nvp, "setting SACTIVE onbit: %X",
1945		    on_bit));
1946		nvp->nvp_sactive_cache |= on_bit;
1947
1948		ncq = NVSLOT_NCQ;
1949
1950	} else {
1951		nvp->nvp_non_ncq_run++;
1952		slot = 0;
1953	}
1954
1955	nv_slotp = (nv_slot_t *)&nvp->nvp_slot[slot];
1956
1957	ASSERT(nv_slotp->nvslot_spkt == NULL);
1958
1959	nv_slotp->nvslot_spkt = spkt;
1960	nv_slotp->nvslot_flags = ncq;
1961
1962	/*
1963	 * the sata module doesn't indicate which commands utilize the
1964	 * DMA engine, so find out using this switch table.
1965	 */
1966	switch (spkt->satapkt_cmd.satacmd_cmd_reg) {
1967	case SATAC_READ_DMA_EXT:
1968	case SATAC_WRITE_DMA_EXT:
1969	case SATAC_WRITE_DMA:
1970	case SATAC_READ_DMA:
1971	case SATAC_READ_DMA_QUEUED:
1972	case SATAC_READ_DMA_QUEUED_EXT:
1973	case SATAC_WRITE_DMA_QUEUED:
1974	case SATAC_WRITE_DMA_QUEUED_EXT:
1975	case SATAC_READ_FPDMA_QUEUED:
1976	case SATAC_WRITE_FPDMA_QUEUED:
1977		dma_cmd = B_TRUE;
1978		break;
1979	default:
1980		dma_cmd = B_FALSE;
1981	}
1982
1983	if (sata_cmdp->satacmd_num_dma_cookies != 0 && dma_cmd == B_TRUE) {
1984		NVLOG((NVDBG_DELIVER, nvc,  nvp, "DMA command"));
1985		nv_slotp->nvslot_start = nv_start_dma;
1986		nv_slotp->nvslot_intr = nv_intr_dma;
1987	} else if (spkt->satapkt_cmd.satacmd_cmd_reg == SATAC_PACKET) {
1988		NVLOG((NVDBG_DELIVER, nvc,  nvp, "packet command"));
1989		nv_slotp->nvslot_start = nv_start_pkt_pio;
1990		nv_slotp->nvslot_intr = nv_intr_pkt_pio;
1991		if ((direction == SATA_DIR_READ) ||
1992		    (direction == SATA_DIR_WRITE)) {
1993			nv_slotp->nvslot_byte_count =
1994			    spkt->satapkt_cmd.satacmd_bp->b_bcount;
1995			nv_slotp->nvslot_v_addr =
1996			    spkt->satapkt_cmd.satacmd_bp->b_un.b_addr;
1997			/*
1998			 * Freeing DMA resources allocated by the framework
1999			 * now to avoid buffer overwrite (dma sync) problems
2000			 * when the buffer is released at command completion.
2001			 * Primarily an issue on systems with more than
2002			 * 4GB of memory.
2003			 */
2004			sata_free_dma_resources(spkt);
2005		}
2006	} else if (direction == SATA_DIR_NODATA_XFER) {
2007		NVLOG((NVDBG_DELIVER, nvc, nvp, "non-data command"));
2008		nv_slotp->nvslot_start = nv_start_nodata;
2009		nv_slotp->nvslot_intr = nv_intr_nodata;
2010	} else if (direction == SATA_DIR_READ) {
2011		NVLOG((NVDBG_DELIVER, nvc, nvp, "pio in command"));
2012		nv_slotp->nvslot_start = nv_start_pio_in;
2013		nv_slotp->nvslot_intr = nv_intr_pio_in;
2014		nv_slotp->nvslot_byte_count =
2015		    spkt->satapkt_cmd.satacmd_bp->b_bcount;
2016		nv_slotp->nvslot_v_addr =
2017		    spkt->satapkt_cmd.satacmd_bp->b_un.b_addr;
2018		/*
2019		 * Freeing DMA resources allocated by the framework now to
2020		 * avoid buffer overwrite (dma sync) problems when the buffer
2021		 * is released at command completion.  This is not an issue
2022		 * for write because write does not update the buffer.
2023		 * Primarily an issue on systems with more than 4GB of memory.
2024		 */
2025		sata_free_dma_resources(spkt);
2026	} else if (direction == SATA_DIR_WRITE) {
2027		NVLOG((NVDBG_DELIVER, nvc, nvp, "pio out command"));
2028		nv_slotp->nvslot_start = nv_start_pio_out;
2029		nv_slotp->nvslot_intr = nv_intr_pio_out;
2030		nv_slotp->nvslot_byte_count =
2031		    spkt->satapkt_cmd.satacmd_bp->b_bcount;
2032		nv_slotp->nvslot_v_addr =
2033		    spkt->satapkt_cmd.satacmd_bp->b_un.b_addr;
2034	} else {
2035		nv_cmn_err(CE_WARN, nvc, nvp, "malformed command: direction"
2036		    " %d cookies %d cmd %x",
2037		    sata_cmdp->satacmd_flags.sata_data_direction,
2038		    sata_cmdp->satacmd_num_dma_cookies,  cmd);
2039		spkt->satapkt_reason = SATA_PKT_CMD_UNSUPPORTED;
2040		ret = SATA_TRAN_CMD_UNSUPPORTED;
2041
2042		goto fail;
2043	}
2044
2045	if ((ret = (*nv_slotp->nvslot_start)(nvp, slot)) ==
2046	    SATA_TRAN_ACCEPTED) {
2047#ifdef SGPIO_SUPPORT
2048		nv_sgp_drive_active(nvp->nvp_ctlp,
2049		    (nvp->nvp_ctlp->nvc_ctlr_num * 2) + nvp->nvp_port_num);
2050#endif
2051		nv_slotp->nvslot_stime = ddi_get_lbolt();
2052
2053		/*
2054		 * start timer if it's not already running and this packet
2055		 * is not requesting polled mode.
2056		 */
2057		if ((nvp->nvp_timeout_id == 0) &&
2058		    ((spkt->satapkt_op_mode & SATA_OPMODE_POLLING) == 0)) {
2059			nv_setup_timeout(nvp, NV_ONE_SEC);
2060		}
2061
2062		nvp->nvp_previous_cmd = nvp->nvp_last_cmd;
2063		nvp->nvp_last_cmd = spkt->satapkt_cmd.satacmd_cmd_reg;
2064
2065		return (SATA_TRAN_ACCEPTED);
2066	}
2067
2068	fail:
2069
2070	spkt->satapkt_reason = SATA_TRAN_PORT_ERROR;
2071
2072	if (ncq == NVSLOT_NCQ) {
2073		nvp->nvp_ncq_run--;
2074		nvp->nvp_sactive_cache &= ~on_bit;
2075	} else {
2076		nvp->nvp_non_ncq_run--;
2077	}
2078	nv_slotp->nvslot_spkt = NULL;
2079	nv_slotp->nvslot_flags = 0;
2080
2081	return (ret);
2082}
2083
2084
2085/*
2086 * Check if the signature is ready and if non-zero translate
2087 * it into a solaris sata defined type.
2088 */
2089static void
2090nv_read_signature(nv_port_t *nvp)
2091{
2092	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
2093
2094	/*
2095	 * Task file error register bit 0 set to 1 indicate that drive
2096	 * is ready and have sent D2H FIS with a signature.
2097	 */
2098	if (nv_check_tfr_error != 0) {
2099		uint8_t tfr_error = nv_get8(cmdhdl, nvp->nvp_error);
2100		if (!(tfr_error & SATA_ERROR_ILI)) {
2101			NVLOG((NVDBG_RESET, nvp->nvp_ctlp, nvp,
2102			    "nv_read_signature: signature not ready"));
2103			return;
2104		}
2105	}
2106
2107	nvp->nvp_signature = nv_get8(cmdhdl, nvp->nvp_count);
2108	nvp->nvp_signature |= (nv_get8(cmdhdl, nvp->nvp_sect) << 8);
2109	nvp->nvp_signature |= (nv_get8(cmdhdl, nvp->nvp_lcyl) << 16);
2110	nvp->nvp_signature |= (nv_get8(cmdhdl, nvp->nvp_hcyl) << 24);
2111
2112	NVLOG((NVDBG_ENTRY, nvp->nvp_ctlp, nvp,
2113	    "nv_read_signature: 0x%x ", nvp->nvp_signature));
2114
2115	switch (nvp->nvp_signature) {
2116
2117	case NV_SIG_DISK:
2118		NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp, "drive is a disk"));
2119		nvp->nvp_type = SATA_DTYPE_ATADISK;
2120		break;
2121	case NV_SIG_ATAPI:
2122		NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp,
2123		    "drive is an optical device"));
2124		nvp->nvp_type = SATA_DTYPE_ATAPICD;
2125		break;
2126	case NV_SIG_PM:
2127		NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp,
2128		    "device is a port multiplier"));
2129		nvp->nvp_type = SATA_DTYPE_PMULT;
2130		break;
2131	case NV_SIG_NOTREADY:
2132		NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp,
2133		    "signature not ready"));
2134		nvp->nvp_type = SATA_DTYPE_UNKNOWN;
2135		break;
2136	default:
2137		nv_cmn_err(CE_WARN, nvp->nvp_ctlp, nvp, "signature %X not"
2138		    " recognized", nvp->nvp_signature);
2139		nvp->nvp_type = SATA_DTYPE_UNKNOWN;
2140		break;
2141	}
2142
2143	if (nvp->nvp_signature) {
2144		nvp->nvp_state &= ~(NV_PORT_RESET_RETRY | NV_PORT_RESET);
2145	}
2146
2147#ifdef SGPIO_SUPPORT
2148	if (nvp->nvp_signature == NV_SIG_DISK) {
2149		nv_sgp_drive_connect(nvp->nvp_ctlp, SGP_CTLR_PORT_TO_DRV(
2150		    nvp->nvp_ctlp->nvc_ctlr_num, nvp->nvp_port_num));
2151	} else {
2152		nv_sgp_drive_disconnect(nvp->nvp_ctlp, SGP_CTLR_PORT_TO_DRV(
2153		    nvp->nvp_ctlp->nvc_ctlr_num, nvp->nvp_port_num));
2154	}
2155#endif
2156}
2157
2158
2159/*
2160 * Set up a new timeout or complete a timeout.
2161 * Timeout value has to be specified in microseconds. If time is zero, no new
2162 * timeout is scheduled.
2163 * Must be called at the end of the timeout routine.
2164 */
2165static void
2166nv_setup_timeout(nv_port_t *nvp, int time)
2167{
2168	clock_t old_duration = nvp->nvp_timeout_duration;
2169
2170	ASSERT(time != 0);
2171
2172	if (nvp->nvp_timeout_id != 0 && nvp->nvp_timeout_duration == 0) {
2173		/*
2174		 * Since we are dropping the mutex for untimeout,
2175		 * the timeout may be executed while we are trying to
2176		 * untimeout and setting up a new timeout.
2177		 * If nvp_timeout_duration is 0, then this function
2178		 * was re-entered. Just exit.
2179		 */
2180	cmn_err(CE_WARN, "nv_setup_timeout re-entered");
2181		return;
2182	}
2183	nvp->nvp_timeout_duration = 0;
2184	if (nvp->nvp_timeout_id == 0) {
2185		/* Start new timer */
2186		nvp->nvp_timeout_id = timeout(nv_timeout, (void *)nvp,
2187		    drv_usectohz(time));
2188	} else {
2189		/*
2190		 * If the currently running timeout is due later than the
2191		 * requested one, restart it with a new expiration.
2192		 * Our timeouts do not need to be accurate - we would be just
2193		 * checking that the specified time was exceeded.
2194		 */
2195		if (old_duration > time) {
2196			mutex_exit(&nvp->nvp_mutex);
2197			(void) untimeout(nvp->nvp_timeout_id);
2198			mutex_enter(&nvp->nvp_mutex);
2199			nvp->nvp_timeout_id = timeout(nv_timeout, (void *)nvp,
2200			    drv_usectohz(time));
2201		}
2202	}
2203	nvp->nvp_timeout_duration = time;
2204}
2205
2206
2207
2208int nv_reset_length = NV_RESET_LENGTH;
2209
2210/*
2211 * Reset the port
2212 *
2213 * Entered with nvp mutex held
2214 */
2215static void
2216nv_reset(nv_port_t *nvp)
2217{
2218	ddi_acc_handle_t bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5];
2219	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
2220	nv_ctl_t *nvc = nvp->nvp_ctlp;
2221	uint32_t sctrl, serr, sstatus;
2222	uint8_t bmicx;
2223	int i, j, reset = 0;
2224
2225	ASSERT(mutex_owned(&nvp->nvp_mutex));
2226
2227	NVLOG((NVDBG_RESET, nvc, nvp, "nv_reset()"));
2228	serr = nv_get32(bar5_hdl, nvp->nvp_serror);
2229	NVLOG((NVDBG_RESET, nvc, nvp, "nv_reset: serr 0x%x", serr));
2230
2231	/*
2232	 * stop DMA engine.
2233	 */
2234	bmicx = nv_get8(nvp->nvp_bm_hdl, nvp->nvp_bmicx);
2235	nv_put8(nvp->nvp_bm_hdl, nvp->nvp_bmicx,  bmicx & ~BMICX_SSBM);
2236
2237	nvp->nvp_state |= NV_PORT_RESET;
2238	nvp->nvp_reset_time = ddi_get_lbolt();
2239
2240	/*
2241	 * Issue hardware reset; retry if necessary.
2242	 */
2243	for (i = 0; i < NV_RESET_ATTEMPTS; i++) {
2244		/*
2245		 * Clear signature registers
2246		 */
2247		nv_put8(cmdhdl, nvp->nvp_sect, 0);
2248		nv_put8(cmdhdl, nvp->nvp_lcyl, 0);
2249		nv_put8(cmdhdl, nvp->nvp_hcyl, 0);
2250		nv_put8(cmdhdl, nvp->nvp_count, 0);
2251
2252		/* Clear task file error register */
2253		nv_put8(nvp->nvp_cmd_hdl, nvp->nvp_error, 0);
2254
2255		/*
2256		 * assert reset in PHY by writing a 1 to bit 0 scontrol
2257		 */
2258		sctrl = nv_get32(bar5_hdl, nvp->nvp_sctrl);
2259		nv_put32(bar5_hdl, nvp->nvp_sctrl,
2260		    sctrl | SCONTROL_DET_COMRESET);
2261
2262		/* Wait at least 1ms, as required by the spec */
2263		drv_usecwait(nv_reset_length);
2264
2265		/* Reset all accumulated error bits */
2266		nv_put32(bar5_hdl, nvp->nvp_serror, 0xffffffff);
2267
2268		sstatus = nv_get32(bar5_hdl, nvp->nvp_sstatus);
2269		sctrl = nv_get32(bar5_hdl, nvp->nvp_sctrl);
2270		NVLOG((NVDBG_RESET, nvc, nvp, "nv_reset: applied (%d); "
2271		    "sctrl 0x%x, sstatus 0x%x", i, sctrl, sstatus));
2272
2273		/* de-assert reset in PHY */
2274		nv_put32(bar5_hdl, nvp->nvp_sctrl,
2275		    sctrl & ~SCONTROL_DET_COMRESET);
2276
2277		/*
2278		 * Wait up to 10ms for COMINIT to arrive, indicating that
2279		 * the device recognized COMRESET.
2280		 */
2281		for (j = 0; j < 10; j++) {
2282			drv_usecwait(NV_ONE_MSEC);
2283			sstatus = nv_get32(bar5_hdl, nvp->nvp_sstatus);
2284			if ((SSTATUS_GET_IPM(sstatus) == SSTATUS_IPM_ACTIVE) &&
2285			    (SSTATUS_GET_DET(sstatus) ==
2286			    SSTATUS_DET_DEVPRE_PHYCOM)) {
2287				reset = 1;
2288				break;
2289			}
2290		}
2291		if (reset == 1)
2292			break;
2293	}
2294	serr = nv_get32(bar5_hdl, nvp->nvp_serror);
2295	if (reset == 0) {
2296		NVLOG((NVDBG_RESET, nvc, nvp, "nv_reset not succeeded "
2297		    "(serr 0x%x) after %d attempts", serr, i));
2298	} else {
2299		NVLOG((NVDBG_RESET, nvc, nvp, "nv_reset succeeded (serr 0x%x)"
2300		    "after %dms", serr, TICK_TO_MSEC(ddi_get_lbolt() -
2301		    nvp->nvp_reset_time)));
2302	}
2303	nvp->nvp_reset_time = ddi_get_lbolt();
2304
2305	if (servicing_interrupt()) {
2306		nv_setup_timeout(nvp, NV_ONE_MSEC);
2307	} else if (!(nvp->nvp_state & NV_PORT_RESET_RETRY)) {
2308		nv_monitor_reset(nvp);
2309	}
2310}
2311
2312
2313/*
2314 * Initialize register handling specific to mcp51/mcp55
2315 */
2316/* ARGSUSED */
2317static void
2318mcp5x_reg_init(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle)
2319{
2320	nv_port_t *nvp;
2321	uchar_t *bar5  = nvc->nvc_bar_addr[5];
2322	uint8_t off, port;
2323
2324	nvc->nvc_mcp5x_ctl = (uint32_t *)(bar5 + MCP5X_CTL);
2325	nvc->nvc_mcp5x_ncq = (uint32_t *)(bar5 + MCP5X_NCQ);
2326
2327	for (port = 0, off = 0; port < NV_MAX_PORTS(nvc); port++, off += 2) {
2328		nvp = &(nvc->nvc_port[port]);
2329		nvp->nvp_mcp5x_int_status =
2330		    (uint16_t *)(bar5 + MCP5X_INT_STATUS + off);
2331		nvp->nvp_mcp5x_int_ctl =
2332		    (uint16_t *)(bar5 + MCP5X_INT_CTL + off);
2333
2334		/*
2335		 * clear any previous interrupts asserted
2336		 */
2337		nv_put16(nvc->nvc_bar_hdl[5], nvp->nvp_mcp5x_int_status,
2338		    MCP5X_INT_CLEAR);
2339
2340		/*
2341		 * These are the interrupts to accept for now.  The spec
2342		 * says these are enable bits, but nvidia has indicated
2343		 * these are masking bits.  Even though they may be masked
2344		 * out to prevent asserting the main interrupt, they can
2345		 * still be asserted while reading the interrupt status
2346		 * register, so that needs to be considered in the interrupt
2347		 * handler.
2348		 */
2349		nv_put16(nvc->nvc_bar_hdl[5], nvp->nvp_mcp5x_int_ctl,
2350		    ~(MCP5X_INT_IGNORE));
2351	}
2352
2353	/*
2354	 * Allow the driver to program the BM on the first command instead
2355	 * of waiting for an interrupt.
2356	 */
2357#ifdef NCQ
2358	flags = MCP_SATA_AE_NCQ_PDEV_FIRST_CMD | MCP_SATA_AE_NCQ_SDEV_FIRST_CMD;
2359	nv_put32(nvc->nvc_bar_hdl[5], nvc->nvc_mcp5x_ncq, flags);
2360	flags = MCP_SATA_AE_CTL_PRI_SWNCQ | MCP_SATA_AE_CTL_SEC_SWNCQ;
2361	nv_put32(nvc->nvc_bar_hdl[5], nvc->nvc_mcp5x_ctl, flags);
2362#endif
2363
2364	/*
2365	 * mcp55 rev A03 and above supports 40-bit physical addressing.
2366	 * Enable DMA to take advantage of that.
2367	 *
2368	 */
2369	if (nvc->nvc_revid >= 0xa3) {
2370		if (nv_sata_40bit_dma == B_TRUE) {
2371			uint32_t reg32;
2372			NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp,
2373			    "rev id is %X and"
2374			    " is capable of 40-bit DMA addressing",
2375			    nvc->nvc_revid));
2376			nvc->dma_40bit = B_TRUE;
2377			reg32 = pci_config_get32(pci_conf_handle,
2378			    NV_SATA_CFG_20);
2379			pci_config_put32(pci_conf_handle, NV_SATA_CFG_20,
2380			    reg32 | NV_40BIT_PRD);
2381		} else {
2382			NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp,
2383			    "40-bit DMA disabled by nv_sata_40bit_dma"));
2384		}
2385	} else {
2386		nv_cmn_err(CE_NOTE, nvp->nvp_ctlp, nvp, "rev id is %X and is "
2387		    "not capable of 40-bit DMA addressing", nvc->nvc_revid);
2388	}
2389}
2390
2391
2392/*
2393 * Initialize register handling specific to ck804
2394 */
2395static void
2396ck804_reg_init(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle)
2397{
2398	uchar_t *bar5  = nvc->nvc_bar_addr[5];
2399	uint32_t reg32;
2400	uint16_t reg16;
2401	nv_port_t *nvp;
2402	int j;
2403
2404	/*
2405	 * delay hotplug interrupts until PHYRDY.
2406	 */
2407	reg32 = pci_config_get32(pci_conf_handle, NV_SATA_CFG_42);
2408	pci_config_put32(pci_conf_handle, NV_SATA_CFG_42,
2409	    reg32 | CK804_CFG_DELAY_HOTPLUG_INTR);
2410
2411	/*
2412	 * enable hot plug interrupts for channel x and y
2413	 */
2414	reg16 = nv_get16(nvc->nvc_bar_hdl[5],
2415	    (uint16_t *)(bar5 + NV_ADMACTL_X));
2416	nv_put16(nvc->nvc_bar_hdl[5], (uint16_t *)(bar5 + NV_ADMACTL_X),
2417	    NV_HIRQ_EN | reg16);
2418
2419
2420	reg16 = nv_get16(nvc->nvc_bar_hdl[5],
2421	    (uint16_t *)(bar5 + NV_ADMACTL_Y));
2422	nv_put16(nvc->nvc_bar_hdl[5], (uint16_t *)(bar5 + NV_ADMACTL_Y),
2423	    NV_HIRQ_EN | reg16);
2424
2425	nvc->nvc_ck804_int_status = (uint8_t *)(bar5 + CK804_SATA_INT_STATUS);
2426
2427	/*
2428	 * clear any existing interrupt pending then enable
2429	 */
2430	for (j = 0; j < NV_MAX_PORTS(nvc); j++) {
2431		nvp = &(nvc->nvc_port[j]);
2432		mutex_enter(&nvp->nvp_mutex);
2433		(*(nvp->nvp_ctlp->nvc_set_intr))(nvp,
2434		    NV_INTR_CLEAR_ALL|NV_INTR_ENABLE);
2435		mutex_exit(&nvp->nvp_mutex);
2436	}
2437}
2438
2439
2440/*
2441 * Initialize the controller and set up driver data structures.
2442 * determine if ck804 or mcp5x class.
2443 */
2444static int
2445nv_init_ctl(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle)
2446{
2447	struct sata_hba_tran stran;
2448	nv_port_t *nvp;
2449	int j, ck804;
2450	uchar_t *cmd_addr, *ctl_addr, *bm_addr;
2451	ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5];
2452	uchar_t *bar5  = nvc->nvc_bar_addr[5];
2453	uint32_t reg32;
2454	uint8_t reg8, reg8_save;
2455
2456	NVLOG((NVDBG_INIT, nvc, NULL, "nv_init_ctl entered"));
2457
2458	ck804 = B_TRUE;
2459#ifdef SGPIO_SUPPORT
2460	nvc->nvc_mcp5x_flag = B_FALSE;
2461#endif
2462
2463	/*
2464	 * Need to set bit 2 to 1 at config offset 0x50
2465	 * to enable access to the bar5 registers.
2466	 */
2467	reg32 = pci_config_get32(pci_conf_handle, NV_SATA_CFG_20);
2468	if (!(reg32 & NV_BAR5_SPACE_EN)) {
2469		pci_config_put32(pci_conf_handle, NV_SATA_CFG_20,
2470		    reg32 | NV_BAR5_SPACE_EN);
2471	}
2472
2473	/*
2474	 * Determine if this is ck804 or mcp5x.  ck804 will map in the
2475	 * task file registers into bar5 while mcp5x won't.  The offset of
2476	 * the task file registers in mcp5x's space is unused, so it will
2477	 * return zero.  So check one of the task file registers to see if it is
2478	 * writable and reads back what was written.  If it's mcp5x it will
2479	 * return back 0xff whereas ck804 will return the value written.
2480	 */
2481	reg8_save = nv_get8(bar5_hdl,
2482	    (uint8_t *)(bar5 + NV_BAR5_TRAN_LEN_CH_X));
2483
2484
2485	for (j = 1; j < 3; j++) {
2486
2487		nv_put8(bar5_hdl, (uint8_t *)(bar5 + NV_BAR5_TRAN_LEN_CH_X), j);
2488		reg8 = nv_get8(bar5_hdl,
2489		    (uint8_t *)(bar5 + NV_BAR5_TRAN_LEN_CH_X));
2490
2491		if (reg8 != j) {
2492			ck804 = B_FALSE;
2493			nvc->nvc_mcp5x_flag = B_TRUE;
2494			break;
2495		}
2496	}
2497
2498	nv_put8(bar5_hdl, (uint8_t *)(bar5 + NV_BAR5_TRAN_LEN_CH_X), reg8_save);
2499
2500	if (ck804 == B_TRUE) {
2501		NVLOG((NVDBG_INIT, nvc, NULL, "controller is CK804"));
2502		nvc->nvc_interrupt = ck804_intr;
2503		nvc->nvc_reg_init = ck804_reg_init;
2504		nvc->nvc_set_intr = ck804_set_intr;
2505	} else {
2506		NVLOG((NVDBG_INIT, nvc, NULL, "controller is MCP51/MCP55"));
2507		nvc->nvc_interrupt = mcp5x_intr;
2508		nvc->nvc_reg_init = mcp5x_reg_init;
2509		nvc->nvc_set_intr = mcp5x_set_intr;
2510	}
2511
2512
2513	stran.sata_tran_hba_rev = SATA_TRAN_HBA_REV;
2514	stran.sata_tran_hba_dip = nvc->nvc_dip;
2515	stran.sata_tran_hba_num_cports = NV_NUM_CPORTS;
2516	stran.sata_tran_hba_features_support =
2517	    SATA_CTLF_HOTPLUG | SATA_CTLF_ASN | SATA_CTLF_ATAPI;
2518	stran.sata_tran_hba_qdepth = NV_QUEUE_SLOTS;
2519	stran.sata_tran_probe_port = nv_sata_probe;
2520	stran.sata_tran_start = nv_sata_start;
2521	stran.sata_tran_abort = nv_sata_abort;
2522	stran.sata_tran_reset_dport = nv_sata_reset;
2523	stran.sata_tran_selftest = NULL;
2524	stran.sata_tran_hotplug_ops = &nv_hotplug_ops;
2525	stran.sata_tran_pwrmgt_ops = NULL;
2526	stran.sata_tran_ioctl = NULL;
2527	nvc->nvc_sata_hba_tran = stran;
2528
2529	nvc->nvc_port = kmem_zalloc(sizeof (nv_port_t) * NV_MAX_PORTS(nvc),
2530	    KM_SLEEP);
2531
2532	/*
2533	 * initialize registers common to all chipsets
2534	 */
2535	nv_common_reg_init(nvc);
2536
2537	for (j = 0; j < NV_MAX_PORTS(nvc); j++) {
2538		nvp = &(nvc->nvc_port[j]);
2539
2540		cmd_addr = nvp->nvp_cmd_addr;
2541		ctl_addr = nvp->nvp_ctl_addr;
2542		bm_addr = nvp->nvp_bm_addr;
2543
2544		mutex_init(&nvp->nvp_mutex, NULL, MUTEX_DRIVER,
2545		    DDI_INTR_PRI(nvc->nvc_intr_pri));
2546
2547		cv_init(&nvp->nvp_poll_cv, NULL, CV_DRIVER, NULL);
2548
2549		nvp->nvp_data	= cmd_addr + NV_DATA;
2550		nvp->nvp_error	= cmd_addr + NV_ERROR;
2551		nvp->nvp_feature = cmd_addr + NV_FEATURE;
2552		nvp->nvp_count	= cmd_addr + NV_COUNT;
2553		nvp->nvp_sect	= cmd_addr + NV_SECT;
2554		nvp->nvp_lcyl	= cmd_addr + NV_LCYL;
2555		nvp->nvp_hcyl	= cmd_addr + NV_HCYL;
2556		nvp->nvp_drvhd	= cmd_addr + NV_DRVHD;
2557		nvp->nvp_status	= cmd_addr + NV_STATUS;
2558		nvp->nvp_cmd	= cmd_addr + NV_CMD;
2559		nvp->nvp_altstatus = ctl_addr + NV_ALTSTATUS;
2560		nvp->nvp_devctl	= ctl_addr + NV_DEVCTL;
2561
2562		nvp->nvp_bmicx	= bm_addr + BMICX_REG;
2563		nvp->nvp_bmisx	= bm_addr + BMISX_REG;
2564		nvp->nvp_bmidtpx = (uint32_t *)(bm_addr + BMIDTPX_REG);
2565
2566		nvp->nvp_state = 0;
2567
2568		/*
2569		 * Initialize dma handles, etc.
2570		 * If it fails, the port is in inactive state.
2571		 */
2572		(void) nv_init_port(nvp);
2573	}
2574
2575	/*
2576	 * initialize register by calling chip specific reg initialization
2577	 */
2578	(*(nvc->nvc_reg_init))(nvc, pci_conf_handle);
2579
2580	/* initialize the hba dma attribute */
2581	if (nvc->dma_40bit == B_TRUE)
2582		nvc->nvc_sata_hba_tran.sata_tran_hba_dma_attr =
2583		    &buffer_dma_40bit_attr;
2584	else
2585		nvc->nvc_sata_hba_tran.sata_tran_hba_dma_attr =
2586		    &buffer_dma_attr;
2587
2588	return (NV_SUCCESS);
2589}
2590
2591
2592/*
2593 * Initialize data structures with enough slots to handle queuing, if
2594 * enabled.  NV_QUEUE_SLOTS will be set to 1 or 32, depending on whether
2595 * NCQ support is built into the driver and enabled.  It might have been
2596 * better to derive the true size from the drive itself, but the sata
2597 * module only sends down that information on the first NCQ command,
2598 * which means possibly re-sizing the structures on an interrupt stack,
2599 * making error handling more messy.  The easy way is to just allocate
2600 * all 32 slots, which is what most drives support anyway.
2601 */
2602static int
2603nv_init_port(nv_port_t *nvp)
2604{
2605	nv_ctl_t *nvc = nvp->nvp_ctlp;
2606	size_t	prd_size = sizeof (prde_t) * NV_DMA_NSEGS;
2607	dev_info_t *dip = nvc->nvc_dip;
2608	ddi_device_acc_attr_t dev_attr;
2609	size_t buf_size;
2610	ddi_dma_cookie_t cookie;
2611	uint_t count;
2612	int rc, i;
2613
2614	dev_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
2615	dev_attr.devacc_attr_endian_flags = DDI_NEVERSWAP_ACC;
2616	dev_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
2617
2618	if (nvp->nvp_state & NV_PORT_INIT) {
2619		NVLOG((NVDBG_INIT, nvc, nvp,
2620		    "nv_init_port previously initialized"));
2621
2622		return (NV_SUCCESS);
2623	} else {
2624		NVLOG((NVDBG_INIT, nvc, nvp, "nv_init_port initializing"));
2625	}
2626
2627	nvp->nvp_sg_dma_hdl = kmem_zalloc(sizeof (ddi_dma_handle_t) *
2628	    NV_QUEUE_SLOTS, KM_SLEEP);
2629
2630	nvp->nvp_sg_acc_hdl = kmem_zalloc(sizeof (ddi_acc_handle_t) *
2631	    NV_QUEUE_SLOTS, KM_SLEEP);
2632
2633	nvp->nvp_sg_addr = kmem_zalloc(sizeof (caddr_t) *
2634	    NV_QUEUE_SLOTS, KM_SLEEP);
2635
2636	nvp->nvp_sg_paddr = kmem_zalloc(sizeof (uint32_t) *
2637	    NV_QUEUE_SLOTS, KM_SLEEP);
2638
2639	nvp->nvp_slot = kmem_zalloc(sizeof (nv_slot_t) * NV_QUEUE_SLOTS,
2640	    KM_SLEEP);
2641
2642	for (i = 0; i < NV_QUEUE_SLOTS; i++) {
2643
2644		rc = ddi_dma_alloc_handle(dip, &nv_prd_dma_attr,
2645		    DDI_DMA_SLEEP, NULL, &(nvp->nvp_sg_dma_hdl[i]));
2646
2647		if (rc != DDI_SUCCESS) {
2648			nv_uninit_port(nvp);
2649
2650			return (NV_FAILURE);
2651		}
2652
2653		rc = ddi_dma_mem_alloc(nvp->nvp_sg_dma_hdl[i], prd_size,
2654		    &dev_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
2655		    NULL, &(nvp->nvp_sg_addr[i]), &buf_size,
2656		    &(nvp->nvp_sg_acc_hdl[i]));
2657
2658		if (rc != DDI_SUCCESS) {
2659			nv_uninit_port(nvp);
2660
2661			return (NV_FAILURE);
2662		}
2663
2664		rc = ddi_dma_addr_bind_handle(nvp->nvp_sg_dma_hdl[i], NULL,
2665		    nvp->nvp_sg_addr[i], buf_size,
2666		    DDI_DMA_WRITE | DDI_DMA_CONSISTENT,
2667		    DDI_DMA_SLEEP, NULL, &cookie, &count);
2668
2669		if (rc != DDI_DMA_MAPPED) {
2670			nv_uninit_port(nvp);
2671
2672			return (NV_FAILURE);
2673		}
2674
2675		ASSERT(count == 1);
2676		ASSERT((cookie.dmac_address & (sizeof (int) - 1)) == 0);
2677
2678		ASSERT(cookie.dmac_laddress <= UINT32_MAX);
2679
2680		nvp->nvp_sg_paddr[i] = cookie.dmac_address;
2681	}
2682
2683	/*
2684	 * nvp_queue_depth represents the actual drive queue depth, not the
2685	 * number of slots allocated in the structures (which may be more).
2686	 * Actual queue depth is only learned after the first NCQ command, so
2687	 * initialize it to 1 for now.
2688	 */
2689	nvp->nvp_queue_depth = 1;
2690
2691	/*
2692	 * Port is initialized whether the device is attached or not.
2693	 * Link processing and device identification will be started later,
2694	 * after interrupts are initialized.
2695	 */
2696	nvp->nvp_type = SATA_DTYPE_NONE;
2697	nvp->nvp_signature = 0;
2698
2699	nvp->nvp_state |= NV_PORT_INIT;
2700
2701	return (NV_SUCCESS);
2702}
2703
2704
2705/*
2706 * Establish initial link & device type
2707 * Called only from nv_attach
2708 * Loops up to approximately 210ms; can exit earlier.
2709 * The time includes wait for the link up and completion of the initial
2710 * signature gathering operation.
2711 */
2712static void
2713nv_init_port_link_processing(nv_ctl_t *nvc)
2714{
2715	ddi_acc_handle_t bar5_hdl;
2716	nv_port_t *nvp;
2717	volatile uint32_t sstatus;
2718	int port, links_up, ready_ports, i;
2719
2720
2721	for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
2722		nvp = &(nvc->nvc_port[port]);
2723		if (nvp != NULL && (nvp->nvp_state & NV_PORT_INIT)) {
2724			/*
2725			 * Initiate device identification, if any is attached
2726			 * and reset was not already applied by hot-plug
2727			 * event processing.
2728			 */
2729			mutex_enter(&nvp->nvp_mutex);
2730			if (!(nvp->nvp_state & NV_PORT_RESET)) {
2731				nvp->nvp_state |= NV_PORT_RESET | NV_PORT_PROBE;
2732				nv_reset(nvp);
2733			}
2734			mutex_exit(&nvp->nvp_mutex);
2735		}
2736	}
2737	/*
2738	 * Wait up to 10ms for links up.
2739	 * Spec says that link should be up in 1ms.
2740	 */
2741	for (i = 0; i < 10; i++) {
2742		drv_usecwait(NV_ONE_MSEC);
2743		links_up = 0;
2744		for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
2745			nvp = &(nvc->nvc_port[port]);
2746			mutex_enter(&nvp->nvp_mutex);
2747			bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5];
2748			sstatus = nv_get32(bar5_hdl, nvp->nvp_sstatus);
2749			if ((SSTATUS_GET_IPM(sstatus) == SSTATUS_IPM_ACTIVE) &&
2750			    (SSTATUS_GET_DET(sstatus) ==
2751			    SSTATUS_DET_DEVPRE_PHYCOM)) {
2752				if ((nvp->nvp_state & NV_PORT_RESET) &&
2753				    nvp->nvp_type == SATA_DTYPE_NONE) {
2754					nvp->nvp_type = SATA_DTYPE_UNKNOWN;
2755				}
2756				NVLOG((NVDBG_INIT, nvc, nvp,
2757				    "nv_init_port_link_processing()"
2758				    "link up; time from reset %dms",
2759				    TICK_TO_MSEC(ddi_get_lbolt() -
2760				    nvp->nvp_reset_time)));
2761				links_up++;
2762			}
2763			mutex_exit(&nvp->nvp_mutex);
2764		}
2765		if (links_up == NV_MAX_PORTS(nvc)) {
2766			break;
2767		}
2768	}
2769	NVLOG((NVDBG_RESET, nvc, nvp, "nv_init_port_link_processing():"
2770	    "%d links up", links_up));
2771	/*
2772	 * At this point, if any device is attached, the link is established.
2773	 * Wait till devices are ready to be accessed, no more than 200ms.
2774	 * 200ms is empirical time in which a signature should be available.
2775	 */
2776	for (i = 0; i < 200; i++) {
2777		ready_ports = 0;
2778		for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
2779			nvp = &(nvc->nvc_port[port]);
2780			mutex_enter(&nvp->nvp_mutex);
2781			bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5];
2782			sstatus = nv_get32(bar5_hdl, nvp->nvp_sstatus);
2783			if ((SSTATUS_GET_IPM(sstatus) == SSTATUS_IPM_ACTIVE) &&
2784			    (SSTATUS_GET_DET(sstatus) ==
2785			    SSTATUS_DET_DEVPRE_PHYCOM) &&
2786			    !(nvp->nvp_state & (NV_PORT_RESET |
2787			    NV_PORT_RESET_RETRY))) {
2788				/*
2789				 * Reset already processed
2790				 */
2791				NVLOG((NVDBG_RESET, nvc, nvp,
2792				    "nv_init_port_link_processing()"
2793				    "device ready; port state %x; "
2794				    "time from reset %dms", nvp->nvp_state,
2795				    TICK_TO_MSEC(ddi_get_lbolt() -
2796				    nvp->nvp_reset_time)));
2797
2798				ready_ports++;
2799			}
2800			mutex_exit(&nvp->nvp_mutex);
2801		}
2802		if (ready_ports == links_up) {
2803			break;
2804		}
2805		drv_usecwait(NV_ONE_MSEC);
2806	}
2807	NVLOG((NVDBG_RESET, nvc, nvp, "nv_init_port_link_processing():"
2808	    "%d devices ready", ready_ports));
2809}
2810
2811/*
2812 * Free dynamically allocated structures for port.
2813 */
2814static void
2815nv_uninit_port(nv_port_t *nvp)
2816{
2817	int i;
2818
2819	/*
2820	 * It is possible to reach here before a port has been initialized or
2821	 * after it has already been uninitialized.  Just return in that case.
2822	 */
2823	if (nvp->nvp_slot == NULL) {
2824
2825		return;
2826	}
2827	/*
2828	 * Mark port unusable now.
2829	 */
2830	nvp->nvp_state &= ~NV_PORT_INIT;
2831
2832	NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp,
2833	    "nv_uninit_port uninitializing"));
2834
2835#ifdef SGPIO_SUPPORT
2836	if (nvp->nvp_type == SATA_DTYPE_ATADISK) {
2837		nv_sgp_drive_disconnect(nvp->nvp_ctlp, SGP_CTLR_PORT_TO_DRV(
2838		    nvp->nvp_ctlp->nvc_ctlr_num, nvp->nvp_port_num));
2839	}
2840#endif
2841
2842	nvp->nvp_type = SATA_DTYPE_NONE;
2843
2844	for (i = 0; i < NV_QUEUE_SLOTS; i++) {
2845		if (nvp->nvp_sg_paddr[i]) {
2846			(void) ddi_dma_unbind_handle(nvp->nvp_sg_dma_hdl[i]);
2847		}
2848
2849		if (nvp->nvp_sg_acc_hdl[i] != NULL) {
2850			ddi_dma_mem_free(&(nvp->nvp_sg_acc_hdl[i]));
2851		}
2852
2853		if (nvp->nvp_sg_dma_hdl[i] != NULL) {
2854			ddi_dma_free_handle(&(nvp->nvp_sg_dma_hdl[i]));
2855		}
2856	}
2857
2858	kmem_free(nvp->nvp_slot, sizeof (nv_slot_t) * NV_QUEUE_SLOTS);
2859	nvp->nvp_slot = NULL;
2860
2861	kmem_free(nvp->nvp_sg_dma_hdl,
2862	    sizeof (ddi_dma_handle_t) * NV_QUEUE_SLOTS);
2863	nvp->nvp_sg_dma_hdl = NULL;
2864
2865	kmem_free(nvp->nvp_sg_acc_hdl,
2866	    sizeof (ddi_acc_handle_t) * NV_QUEUE_SLOTS);
2867	nvp->nvp_sg_acc_hdl = NULL;
2868
2869	kmem_free(nvp->nvp_sg_addr, sizeof (caddr_t) * NV_QUEUE_SLOTS);
2870	nvp->nvp_sg_addr = NULL;
2871
2872	kmem_free(nvp->nvp_sg_paddr, sizeof (uint32_t) * NV_QUEUE_SLOTS);
2873	nvp->nvp_sg_paddr = NULL;
2874}
2875
2876
2877/*
2878 * Cache register offsets and access handles to frequently accessed registers
2879 * which are common to either chipset.
2880 */
2881static void
2882nv_common_reg_init(nv_ctl_t *nvc)
2883{
2884	uchar_t *bar5_addr = nvc->nvc_bar_addr[5];
2885	uchar_t *bm_addr_offset, *sreg_offset;
2886	uint8_t bar, port;
2887	nv_port_t *nvp;
2888
2889	for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
2890		if (port == 0) {
2891			bar = NV_BAR_0;
2892			bm_addr_offset = 0;
2893			sreg_offset = (uchar_t *)(CH0_SREG_OFFSET + bar5_addr);
2894		} else {
2895			bar = NV_BAR_2;
2896			bm_addr_offset = (uchar_t *)8;
2897			sreg_offset = (uchar_t *)(CH1_SREG_OFFSET + bar5_addr);
2898		}
2899
2900		nvp = &(nvc->nvc_port[port]);
2901		nvp->nvp_ctlp = nvc;
2902		nvp->nvp_port_num = port;
2903		NVLOG((NVDBG_INIT, nvc, nvp, "setting up port mappings"));
2904
2905		nvp->nvp_cmd_hdl = nvc->nvc_bar_hdl[bar];
2906		nvp->nvp_cmd_addr = nvc->nvc_bar_addr[bar];
2907		nvp->nvp_ctl_hdl = nvc->nvc_bar_hdl[bar + 1];
2908		nvp->nvp_ctl_addr = nvc->nvc_bar_addr[bar + 1];
2909		nvp->nvp_bm_hdl = nvc->nvc_bar_hdl[NV_BAR_4];
2910		nvp->nvp_bm_addr = nvc->nvc_bar_addr[NV_BAR_4] +
2911		    (long)bm_addr_offset;
2912
2913		nvp->nvp_sstatus = (uint32_t *)(sreg_offset + NV_SSTATUS);
2914		nvp->nvp_serror = (uint32_t *)(sreg_offset + NV_SERROR);
2915		nvp->nvp_sactive = (uint32_t *)(sreg_offset + NV_SACTIVE);
2916		nvp->nvp_sctrl = (uint32_t *)(sreg_offset + NV_SCTRL);
2917	}
2918}
2919
2920
2921static void
2922nv_uninit_ctl(nv_ctl_t *nvc)
2923{
2924	int port;
2925	nv_port_t *nvp;
2926
2927	NVLOG((NVDBG_INIT, nvc, NULL, "nv_uninit_ctl entered"));
2928
2929	for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
2930		nvp = &(nvc->nvc_port[port]);
2931		mutex_enter(&nvp->nvp_mutex);
2932		NVLOG((NVDBG_INIT, nvc, nvp, "uninitializing port"));
2933		nv_uninit_port(nvp);
2934		mutex_exit(&nvp->nvp_mutex);
2935		mutex_destroy(&nvp->nvp_mutex);
2936		cv_destroy(&nvp->nvp_poll_cv);
2937	}
2938
2939	kmem_free(nvc->nvc_port, NV_MAX_PORTS(nvc) * sizeof (nv_port_t));
2940	nvc->nvc_port = NULL;
2941}
2942
2943
2944/*
2945 * ck804 interrupt.  This is a wrapper around ck804_intr_process so
2946 * that interrupts from other devices can be disregarded while dtracing.
2947 */
2948/* ARGSUSED */
2949static uint_t
2950ck804_intr(caddr_t arg1, caddr_t arg2)
2951{
2952	nv_ctl_t *nvc = (nv_ctl_t *)arg1;
2953	uint8_t intr_status;
2954	ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5];
2955
2956	if (nvc->nvc_state & NV_CTRL_SUSPEND)
2957		return (DDI_INTR_UNCLAIMED);
2958
2959	intr_status = ddi_get8(bar5_hdl, nvc->nvc_ck804_int_status);
2960
2961	if (intr_status == 0) {
2962
2963		return (DDI_INTR_UNCLAIMED);
2964	}
2965
2966	ck804_intr_process(nvc, intr_status);
2967
2968	return (DDI_INTR_CLAIMED);
2969}
2970
2971
2972/*
2973 * Main interrupt handler for ck804.  handles normal device
2974 * interrupts as well as port hot plug and remove interrupts.
2975 *
2976 */
2977static void
2978ck804_intr_process(nv_ctl_t *nvc, uint8_t intr_status)
2979{
2980
2981	int port, i;
2982	nv_port_t *nvp;
2983	nv_slot_t *nv_slotp;
2984	uchar_t	status;
2985	sata_pkt_t *spkt;
2986	uint8_t bmstatus, clear_bits;
2987	ddi_acc_handle_t bmhdl;
2988	int nvcleared = 0;
2989	ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5];
2990	uint32_t sstatus;
2991	int port_mask_hot[] = {
2992		CK804_INT_PDEV_HOT, CK804_INT_SDEV_HOT,
2993	};
2994	int port_mask_pm[] = {
2995		CK804_INT_PDEV_PM, CK804_INT_SDEV_PM,
2996	};
2997
2998	NVLOG((NVDBG_INTR, nvc, NULL,
2999	    "ck804_intr_process entered intr_status=%x", intr_status));
3000
3001	/*
3002	 * For command completion interrupt, explicit clear is not required.
3003	 * however, for the error cases explicit clear is performed.
3004	 */
3005	for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
3006
3007		int port_mask[] = {CK804_INT_PDEV_INT, CK804_INT_SDEV_INT};
3008
3009		if ((port_mask[port] & intr_status) == 0) {
3010			continue;
3011		}
3012
3013		NVLOG((NVDBG_INTR, nvc, NULL,
3014		    "ck804_intr_process interrupt on port %d", port));
3015
3016		nvp = &(nvc->nvc_port[port]);
3017
3018		mutex_enter(&nvp->nvp_mutex);
3019
3020		/*
3021		 * there was a corner case found where an interrupt
3022		 * arrived before nvp_slot was set.  Should
3023		 * probably should track down why that happens and try
3024		 * to eliminate that source and then get rid of this
3025		 * check.
3026		 */
3027		if (nvp->nvp_slot == NULL) {
3028			status = nv_get8(nvp->nvp_ctl_hdl, nvp->nvp_status);
3029			NVLOG((NVDBG_ALWAYS, nvc, nvp, "spurious interrupt "
3030			    "received before initialization "
3031			    "completed status=%x", status));
3032			mutex_exit(&nvp->nvp_mutex);
3033
3034			/*
3035			 * clear interrupt bits
3036			 */
3037			nv_put8(bar5_hdl, nvc->nvc_ck804_int_status,
3038			    port_mask[port]);
3039
3040			continue;
3041		}
3042
3043		if ((&(nvp->nvp_slot[0]))->nvslot_spkt == NULL)  {
3044			status = nv_get8(nvp->nvp_ctl_hdl, nvp->nvp_status);
3045			NVLOG((NVDBG_ALWAYS, nvc, nvp, "spurious interrupt "
3046			    " no command in progress status=%x", status));
3047			mutex_exit(&nvp->nvp_mutex);
3048
3049			/*
3050			 * clear interrupt bits
3051			 */
3052			nv_put8(bar5_hdl, nvc->nvc_ck804_int_status,
3053			    port_mask[port]);
3054
3055			continue;
3056		}
3057
3058		bmhdl = nvp->nvp_bm_hdl;
3059		bmstatus = nv_get8(bmhdl, nvp->nvp_bmisx);
3060
3061		if (!(bmstatus & BMISX_IDEINTS)) {
3062			mutex_exit(&nvp->nvp_mutex);
3063
3064			continue;
3065		}
3066
3067		status = nv_get8(nvp->nvp_ctl_hdl, nvp->nvp_altstatus);
3068
3069		if (status & SATA_STATUS_BSY) {
3070			mutex_exit(&nvp->nvp_mutex);
3071
3072			continue;
3073		}
3074
3075		nv_slotp = &(nvp->nvp_slot[0]);
3076
3077		ASSERT(nv_slotp);
3078
3079		spkt = nv_slotp->nvslot_spkt;
3080
3081		if (spkt == NULL) {
3082			mutex_exit(&nvp->nvp_mutex);
3083
3084			continue;
3085		}
3086
3087		(*nv_slotp->nvslot_intr)(nvp, nv_slotp);
3088
3089		nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
3090
3091		if (nv_slotp->nvslot_flags == NVSLOT_COMPLETE) {
3092
3093			nv_complete_io(nvp, spkt, 0);
3094		}
3095
3096		mutex_exit(&nvp->nvp_mutex);
3097	}
3098
3099	/*
3100	 * ck804 often doesn't correctly distinguish hot add/remove
3101	 * interrupts.  Frequently both the ADD and the REMOVE bits
3102	 * are asserted, whether it was a remove or add.  Use sstatus
3103	 * to distinguish hot add from hot remove.
3104	 */
3105
3106	for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
3107		clear_bits = 0;
3108
3109		nvp = &(nvc->nvc_port[port]);
3110		mutex_enter(&nvp->nvp_mutex);
3111
3112		if ((port_mask_pm[port] & intr_status) != 0) {
3113			clear_bits = port_mask_pm[port];
3114			NVLOG((NVDBG_HOT, nvc, nvp,
3115			    "clearing PM interrupt bit: %x",
3116			    intr_status & port_mask_pm[port]));
3117		}
3118
3119		if ((port_mask_hot[port] & intr_status) == 0) {
3120			if (clear_bits != 0) {
3121				goto clear;
3122			} else {
3123				mutex_exit(&nvp->nvp_mutex);
3124				continue;
3125			}
3126		}
3127
3128		/*
3129		 * reaching here means there was a hot add or remove.
3130		 */
3131		clear_bits |= port_mask_hot[port];
3132
3133		ASSERT(nvc->nvc_port[port].nvp_sstatus);
3134
3135		sstatus = nv_get32(bar5_hdl,
3136		    nvc->nvc_port[port].nvp_sstatus);
3137
3138		if ((sstatus & SSTATUS_DET_DEVPRE_PHYCOM) ==
3139		    SSTATUS_DET_DEVPRE_PHYCOM) {
3140			nv_report_add_remove(nvp, 0);
3141		} else {
3142			nv_report_add_remove(nvp, NV_PORT_HOTREMOVED);
3143		}
3144	clear:
3145		/*
3146		 * clear interrupt bits.  explicit interrupt clear is
3147		 * required for hotplug interrupts.
3148		 */
3149		nv_put8(bar5_hdl, nvc->nvc_ck804_int_status, clear_bits);
3150
3151		/*
3152		 * make sure it's flushed and cleared.  If not try
3153		 * again.  Sometimes it has been observed to not clear
3154		 * on the first try.
3155		 */
3156		intr_status = nv_get8(bar5_hdl, nvc->nvc_ck804_int_status);
3157
3158		/*
3159		 * make 10 additional attempts to clear the interrupt
3160		 */
3161		for (i = 0; (intr_status & clear_bits) && (i < 10); i++) {
3162			NVLOG((NVDBG_ALWAYS, nvc, nvp, "inst_status=%x "
3163			    "still not clear try=%d", intr_status,
3164			    ++nvcleared));
3165			nv_put8(bar5_hdl, nvc->nvc_ck804_int_status,
3166			    clear_bits);
3167			intr_status = nv_get8(bar5_hdl,
3168			    nvc->nvc_ck804_int_status);
3169		}
3170
3171		/*
3172		 * if still not clear, log a message and disable the
3173		 * port. highly unlikely that this path is taken, but it
3174		 * gives protection against a wedged interrupt.
3175		 */
3176		if (intr_status & clear_bits) {
3177			(*(nvc->nvc_set_intr))(nvp, NV_INTR_DISABLE);
3178			nv_port_state_change(nvp, SATA_EVNT_PORT_FAILED,
3179			    SATA_ADDR_CPORT, SATA_PSTATE_FAILED);
3180			nvp->nvp_state |= NV_PORT_FAILED;
3181			(void) nv_abort_active(nvp, NULL, SATA_PKT_DEV_ERROR,
3182			    B_TRUE);
3183			nv_cmn_err(CE_WARN, nvc, nvp, "unable to clear "
3184			    "interrupt.  disabling port intr_status=%X",
3185			    intr_status);
3186		}
3187
3188		mutex_exit(&nvp->nvp_mutex);
3189	}
3190}
3191
3192
3193/*
3194 * Interrupt handler for mcp5x.  It is invoked by the wrapper for each port
3195 * on the controller, to handle completion and hot plug and remove events.
3196 *
3197 */
3198static uint_t
3199mcp5x_intr_port(nv_port_t *nvp)
3200{
3201	nv_ctl_t *nvc = nvp->nvp_ctlp;
3202	ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5];
3203	uint8_t clear = 0, intr_cycles = 0;
3204	int ret = DDI_INTR_UNCLAIMED;
3205	uint16_t int_status;
3206	clock_t intr_time;
3207	int loop_cnt = 0;
3208
3209	nvp->intr_start_time = ddi_get_lbolt();
3210
3211	NVLOG((NVDBG_INTR, nvc, nvp, "mcp55_intr_port entered"));
3212
3213	do {
3214		/*
3215		 * read current interrupt status
3216		 */
3217		int_status = nv_get16(bar5_hdl, nvp->nvp_mcp5x_int_status);
3218
3219		NVLOG((NVDBG_INTR, nvc, nvp, "int_status = %x", int_status));
3220
3221		/*
3222		 * MCP5X_INT_IGNORE interrupts will show up in the status,
3223		 * but are masked out from causing an interrupt to be generated
3224		 * to the processor.  Ignore them here by masking them out.
3225		 */
3226		int_status &= ~(MCP5X_INT_IGNORE);
3227
3228		/*
3229		 * exit the loop when no more interrupts to process
3230		 */
3231		if (int_status == 0) {
3232
3233			break;
3234		}
3235
3236		if (int_status & MCP5X_INT_COMPLETE) {
3237			NVLOG((NVDBG_INTR, nvc, nvp,
3238			    "mcp5x_packet_complete_intr"));
3239			/*
3240			 * since int_status was set, return DDI_INTR_CLAIMED
3241			 * from the DDI's perspective even though the packet
3242			 * completion may not have succeeded.  If it fails,
3243			 * need to manually clear the interrupt, otherwise
3244			 * clearing is implicit.
3245			 */
3246			ret = DDI_INTR_CLAIMED;
3247			if (mcp5x_packet_complete_intr(nvc, nvp) ==
3248			    NV_FAILURE) {
3249				clear |= MCP5X_INT_COMPLETE;
3250			} else {
3251				intr_cycles = 0;
3252			}
3253		}
3254
3255		if (int_status & MCP5X_INT_DMA_SETUP) {
3256			NVLOG((NVDBG_INTR, nvc, nvp, "mcp5x_dma_setup_intr"));
3257
3258			/*
3259			 * Needs to be cleared before starting the BM, so do it
3260			 * now.  make sure this is still working.
3261			 */
3262			nv_put16(bar5_hdl, nvp->nvp_mcp5x_int_status,
3263			    MCP5X_INT_DMA_SETUP);
3264#ifdef NCQ
3265			ret = mcp5x_dma_setup_intr(nvc, nvp);
3266#endif
3267		}
3268
3269		if (int_status & MCP5X_INT_REM) {
3270			NVLOG((NVDBG_HOT, nvc, nvp, "mcp5x device removed"));
3271			clear |= MCP5X_INT_REM;
3272			ret = DDI_INTR_CLAIMED;
3273
3274			mutex_enter(&nvp->nvp_mutex);
3275			nv_report_add_remove(nvp, NV_PORT_HOTREMOVED);
3276			mutex_exit(&nvp->nvp_mutex);
3277
3278		} else if (int_status & MCP5X_INT_ADD) {
3279			NVLOG((NVDBG_HOT, nvc, nvp, "mcp5x device added"));
3280			clear |= MCP5X_INT_ADD;
3281			ret = DDI_INTR_CLAIMED;
3282
3283			mutex_enter(&nvp->nvp_mutex);
3284			nv_report_add_remove(nvp, 0);
3285			mutex_exit(&nvp->nvp_mutex);
3286		}
3287		if (clear) {
3288			nv_put16(bar5_hdl, nvp->nvp_mcp5x_int_status, clear);
3289			clear = 0;
3290		}
3291		/* Protect against a stuck interrupt */
3292		if (intr_cycles++ == NV_MAX_INTR_LOOP) {
3293			nv_cmn_err(CE_WARN, nvc, nvp, "excessive interrupt "
3294			    "processing.  Disabling port int_status=%X"
3295			    " clear=%X", int_status, clear);
3296			mutex_enter(&nvp->nvp_mutex);
3297			(*(nvc->nvc_set_intr))(nvp, NV_INTR_DISABLE);
3298			nv_port_state_change(nvp, SATA_EVNT_PORT_FAILED,
3299			    SATA_ADDR_CPORT, SATA_PSTATE_FAILED);
3300			nvp->nvp_state |= NV_PORT_FAILED;
3301			(void) nv_abort_active(nvp, NULL, SATA_PKT_DEV_ERROR,
3302			    B_TRUE);
3303			mutex_exit(&nvp->nvp_mutex);
3304		}
3305
3306	} while (loop_cnt++ < nv_max_intr_loops);
3307
3308	if (loop_cnt > nvp->intr_loop_cnt) {
3309		NVLOG((NVDBG_INTR, nvp->nvp_ctlp, nvp,
3310		    "Exiting with multiple intr loop count %d", loop_cnt));
3311		nvp->intr_loop_cnt = loop_cnt;
3312	}
3313
3314	if ((nv_debug_flags & (NVDBG_INTR | NVDBG_VERBOSE)) ==
3315	    (NVDBG_INTR | NVDBG_VERBOSE)) {
3316		uint8_t status, bmstatus;
3317		uint16_t int_status2;
3318
3319		if (int_status & MCP5X_INT_COMPLETE) {
3320			status = nv_get8(nvp->nvp_ctl_hdl, nvp->nvp_altstatus);
3321			bmstatus = nv_get8(nvp->nvp_bm_hdl, nvp->nvp_bmisx);
3322			int_status2 = nv_get16(nvp->nvp_ctlp->nvc_bar_hdl[5],
3323			    nvp->nvp_mcp5x_int_status);
3324			NVLOG((NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp,
3325			    "mcp55_intr_port: Exiting with altstatus %x, "
3326			    "bmicx %x, int_status2 %X, int_status %X, ret %x,"
3327			    " loop_cnt %d ", status, bmstatus, int_status2,
3328			    int_status, ret, loop_cnt));
3329		}
3330	}
3331
3332	NVLOG((NVDBG_INTR, nvc, nvp, "mcp55_intr_port: finished ret=%d", ret));
3333
3334	/*
3335	 * To facilitate debugging, keep track of the length of time spent in
3336	 * the port interrupt routine.
3337	 */
3338	intr_time = ddi_get_lbolt() - nvp->intr_start_time;
3339	if (intr_time > nvp->intr_duration)
3340		nvp->intr_duration = intr_time;
3341
3342	return (ret);
3343}
3344
3345
3346/* ARGSUSED */
3347static uint_t
3348mcp5x_intr(caddr_t arg1, caddr_t arg2)
3349{
3350	nv_ctl_t *nvc = (nv_ctl_t *)arg1;
3351	int ret;
3352
3353	if (nvc->nvc_state & NV_CTRL_SUSPEND)
3354		return (DDI_INTR_UNCLAIMED);
3355
3356	ret = mcp5x_intr_port(&(nvc->nvc_port[0]));
3357	ret |= mcp5x_intr_port(&(nvc->nvc_port[1]));
3358
3359	return (ret);
3360}
3361
3362
3363#ifdef NCQ
3364/*
3365 * with software driven NCQ on mcp5x, an interrupt occurs right
3366 * before the drive is ready to do a DMA transfer.  At this point,
3367 * the PRD table needs to be programmed and the DMA engine enabled
3368 * and ready to go.
3369 *
3370 * -- MCP_SATA_AE_INT_STATUS_SDEV_DMA_SETUP indicates the interrupt
3371 * -- MCP_SATA_AE_NCQ_PDEV_DMA_SETUP_TAG shows which command is ready
3372 * -- clear bit 0 of master command reg
3373 * -- program PRD
3374 * -- clear the interrupt status bit for the DMA Setup FIS
3375 * -- set bit 0 of the bus master command register
3376 */
3377static int
3378mcp5x_dma_setup_intr(nv_ctl_t *nvc, nv_port_t *nvp)
3379{
3380	int slot;
3381	ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl;
3382	uint8_t bmicx;
3383	int port = nvp->nvp_port_num;
3384	uint8_t tag_shift[] = {MCP_SATA_AE_NCQ_PDEV_DMA_SETUP_TAG_SHIFT,
3385	    MCP_SATA_AE_NCQ_SDEV_DMA_SETUP_TAG_SHIFT};
3386
3387	nv_cmn_err(CE_PANIC, nvc, nvp,
3388	    "this is should not be executed at all until NCQ");
3389
3390	mutex_enter(&nvp->nvp_mutex);
3391
3392	slot = nv_get32(nvc->nvc_bar_hdl[5], nvc->nvc_mcp5x_ncq);
3393
3394	slot = (slot >> tag_shift[port]) & MCP_SATA_AE_NCQ_DMA_SETUP_TAG_MASK;
3395
3396	NVLOG((NVDBG_INTR, nvc, nvp, "mcp5x_dma_setup_intr slot %d"
3397	    " nvp_slot_sactive %X", slot, nvp->nvp_sactive_cache));
3398
3399	/*
3400	 * halt the DMA engine.  This step is necessary according to
3401	 * the mcp5x spec, probably since there may have been a "first" packet
3402	 * that already programmed the DMA engine, but may not turn out to
3403	 * be the first one processed.
3404	 */
3405	bmicx = nv_get8(bmhdl, nvp->nvp_bmicx);
3406
3407	if (bmicx & BMICX_SSBM) {
3408		NVLOG((NVDBG_INTR, nvc, nvp, "BM was already enabled for "
3409		    "another packet.  Cancelling and reprogramming"));
3410		nv_put8(bmhdl, nvp->nvp_bmicx,  bmicx & ~BMICX_SSBM);
3411	}
3412	nv_put8(bmhdl, nvp->nvp_bmicx,  bmicx & ~BMICX_SSBM);
3413
3414	nv_start_dma_engine(nvp, slot);
3415
3416	mutex_exit(&nvp->nvp_mutex);
3417
3418	return (DDI_INTR_CLAIMED);
3419}
3420#endif /* NCQ */
3421
3422
3423/*
3424 * packet completion interrupt.  If the packet is complete, invoke
3425 * the packet completion callback.
3426 */
3427static int
3428mcp5x_packet_complete_intr(nv_ctl_t *nvc, nv_port_t *nvp)
3429{
3430	uint8_t status, bmstatus;
3431	ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl;
3432	int sactive;
3433	int active_pkt_bit = 0, active_pkt = 0, ncq_command = B_FALSE;
3434	sata_pkt_t *spkt;
3435	nv_slot_t *nv_slotp;
3436
3437	mutex_enter(&nvp->nvp_mutex);
3438
3439	bmstatus = nv_get8(bmhdl, nvp->nvp_bmisx);
3440
3441	if (!(bmstatus & (BMISX_IDEINTS | BMISX_IDERR))) {
3442		NVLOG((NVDBG_INTR, nvc, nvp, "BMISX_IDEINTS not set"));
3443		mutex_exit(&nvp->nvp_mutex);
3444
3445		return (NV_FAILURE);
3446	}
3447
3448	/*
3449	 * Commands may have been processed by abort or timeout before
3450	 * interrupt processing acquired the mutex. So we may be processing
3451	 * an interrupt for packets that were already removed.
3452	 * For functionning NCQ processing all slots may be checked, but
3453	 * with NCQ disabled (current code), relying on *_run flags is OK.
3454	 */
3455	if (nvp->nvp_non_ncq_run) {
3456		/*
3457		 * If the just completed item is a non-ncq command, the busy
3458		 * bit should not be set
3459		 */
3460		status = nv_get8(nvp->nvp_ctl_hdl, nvp->nvp_altstatus);
3461		if (status & SATA_STATUS_BSY) {
3462			nv_cmn_err(CE_WARN, nvc, nvp,
3463			    "unexpected SATA_STATUS_BSY set");
3464			mutex_exit(&nvp->nvp_mutex);
3465			/*
3466			 * calling function will clear interrupt.  then
3467			 * the real interrupt will either arrive or the
3468			 * packet timeout handling will take over and
3469			 * reset.
3470			 */
3471			return (NV_FAILURE);
3472		}
3473		ASSERT(nvp->nvp_ncq_run == 0);
3474	} else {
3475		ASSERT(nvp->nvp_non_ncq_run == 0);
3476		/*
3477		 * Pre-NCQ code!
3478		 * Nothing to do. The packet for the command that just
3479		 * completed is already gone. Just clear the interrupt.
3480		 */
3481		(void) nv_bm_status_clear(nvp);
3482		(void) nv_get8(nvp->nvp_cmd_hdl, nvp->nvp_status);
3483		mutex_exit(&nvp->nvp_mutex);
3484		return (NV_SUCCESS);
3485
3486		/*
3487		 * NCQ check for BSY here and wait if still bsy before
3488		 * continuing. Rather than wait for it to be cleared
3489		 * when starting a packet and wasting CPU time, the starting
3490		 * thread can exit immediate, but might have to spin here
3491		 * for a bit possibly.  Needs more work and experimentation.
3492		 *
3493		 */
3494	}
3495
3496	/*
3497	 * active_pkt_bit will represent the bitmap of the single completed
3498	 * packet.  Because of the nature of sw assisted NCQ, only one
3499	 * command will complete per interrupt.
3500	 */
3501
3502	if (ncq_command == B_FALSE) {
3503		active_pkt = 0;
3504	} else {
3505		/*
3506		 * NCQ: determine which command just completed, by examining
3507		 * which bit cleared in the register since last written.
3508		 */
3509		sactive = nv_get32(nvc->nvc_bar_hdl[5], nvp->nvp_sactive);
3510
3511		active_pkt_bit = ~sactive & nvp->nvp_sactive_cache;
3512
3513		ASSERT(active_pkt_bit);
3514
3515
3516		/*
3517		 * this failure path needs more work to handle the
3518		 * error condition and recovery.
3519		 */
3520		if (active_pkt_bit == 0) {
3521			ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
3522
3523			nv_cmn_err(CE_CONT, nvc, nvp, "ERROR sactive = %X  "
3524			    "nvp->nvp_sactive %X", sactive,
3525			    nvp->nvp_sactive_cache);
3526
3527			(void) nv_get8(cmdhdl, nvp->nvp_status);
3528
3529			mutex_exit(&nvp->nvp_mutex);
3530
3531			return (NV_FAILURE);
3532		}
3533
3534		for (active_pkt = 0; (active_pkt_bit & 0x1) != 0x1;
3535		    active_pkt++, active_pkt_bit >>= 1) {
3536		}
3537
3538		/*
3539		 * make sure only one bit is ever turned on
3540		 */
3541		ASSERT(active_pkt_bit == 1);
3542
3543		nvp->nvp_sactive_cache &= ~(0x01 << active_pkt);
3544	}
3545
3546	nv_slotp = &(nvp->nvp_slot[active_pkt]);
3547
3548	spkt = nv_slotp->nvslot_spkt;
3549
3550	ASSERT(spkt != NULL);
3551
3552	(*nv_slotp->nvslot_intr)(nvp, nv_slotp);
3553
3554	nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
3555
3556	if (nv_slotp->nvslot_flags == NVSLOT_COMPLETE) {
3557
3558		nv_complete_io(nvp, spkt, active_pkt);
3559	}
3560
3561	mutex_exit(&nvp->nvp_mutex);
3562
3563	return (NV_SUCCESS);
3564}
3565
3566
3567static void
3568nv_complete_io(nv_port_t *nvp, sata_pkt_t *spkt, int slot)
3569{
3570
3571	ASSERT(MUTEX_HELD(&nvp->nvp_mutex));
3572
3573	if ((&(nvp->nvp_slot[slot]))->nvslot_flags & NVSLOT_NCQ) {
3574		nvp->nvp_ncq_run--;
3575	} else {
3576		nvp->nvp_non_ncq_run--;
3577	}
3578
3579	/*
3580	 * mark the packet slot idle so it can be reused.  Do this before
3581	 * calling satapkt_comp so the slot can be reused.
3582	 */
3583	(&(nvp->nvp_slot[slot]))->nvslot_spkt = NULL;
3584
3585	if (spkt->satapkt_op_mode & SATA_OPMODE_SYNCH) {
3586		/*
3587		 * If this is not timed polled mode cmd, which has an
3588		 * active thread monitoring for completion, then need
3589		 * to signal the sleeping thread that the cmd is complete.
3590		 */
3591		if ((spkt->satapkt_op_mode & SATA_OPMODE_POLLING) == 0) {
3592			cv_signal(&nvp->nvp_poll_cv);
3593		}
3594
3595		return;
3596	}
3597
3598	if (spkt->satapkt_comp != NULL) {
3599		mutex_exit(&nvp->nvp_mutex);
3600		(*spkt->satapkt_comp)(spkt);
3601		mutex_enter(&nvp->nvp_mutex);
3602	}
3603}
3604
3605
3606/*
3607 * check whether packet is ncq command or not.  for ncq command,
3608 * start it if there is still room on queue.  for non-ncq command only
3609 * start if no other command is running.
3610 */
3611static int
3612nv_start_async(nv_port_t *nvp, sata_pkt_t *spkt)
3613{
3614	uint8_t cmd, ncq;
3615
3616	NVLOG((NVDBG_ENTRY, nvp->nvp_ctlp, nvp, "nv_start_async: entry"));
3617
3618	cmd = spkt->satapkt_cmd.satacmd_cmd_reg;
3619
3620	ncq = ((cmd == SATAC_WRITE_FPDMA_QUEUED) ||
3621	    (cmd == SATAC_READ_FPDMA_QUEUED));
3622
3623	if (ncq == B_FALSE) {
3624
3625		if ((nvp->nvp_non_ncq_run == 1) ||
3626		    (nvp->nvp_ncq_run > 0)) {
3627			/*
3628			 * next command is non-ncq which can't run
3629			 * concurrently.  exit and return queue full.
3630			 */
3631			spkt->satapkt_reason = SATA_PKT_QUEUE_FULL;
3632
3633			return (SATA_TRAN_QUEUE_FULL);
3634		}
3635
3636		return (nv_start_common(nvp, spkt));
3637	}
3638
3639	/*
3640	 * ncq == B_TRUE
3641	 */
3642	if (nvp->nvp_non_ncq_run == 1) {
3643		/*
3644		 * cannot start any NCQ commands when there
3645		 * is a non-NCQ command running.
3646		 */
3647		spkt->satapkt_reason = SATA_PKT_QUEUE_FULL;
3648
3649		return (SATA_TRAN_QUEUE_FULL);
3650	}
3651
3652#ifdef NCQ
3653	/*
3654	 * this is not compiled for now as satapkt_device.satadev_qdepth
3655	 * is being pulled out until NCQ support is later addressed
3656	 *
3657	 * nvp_queue_depth is initialized by the first NCQ command
3658	 * received.
3659	 */
3660	if (nvp->nvp_queue_depth == 1) {
3661		nvp->nvp_queue_depth =
3662		    spkt->satapkt_device.satadev_qdepth;
3663
3664		ASSERT(nvp->nvp_queue_depth > 1);
3665
3666		NVLOG((NVDBG_ENTRY, nvp->nvp_ctlp, nvp,
3667		    "nv_process_queue: nvp_queue_depth set to %d",
3668		    nvp->nvp_queue_depth));
3669	}
3670#endif
3671
3672	if (nvp->nvp_ncq_run >= nvp->nvp_queue_depth) {
3673		/*
3674		 * max number of NCQ commands already active
3675		 */
3676		spkt->satapkt_reason = SATA_PKT_QUEUE_FULL;
3677
3678		return (SATA_TRAN_QUEUE_FULL);
3679	}
3680
3681	return (nv_start_common(nvp, spkt));
3682}
3683
3684
3685/*
3686 * configure INTx and legacy interrupts
3687 */
3688static int
3689nv_add_legacy_intrs(nv_ctl_t *nvc)
3690{
3691	dev_info_t	*devinfo = nvc->nvc_dip;
3692	int		actual, count = 0;
3693	int		x, y, rc, inum = 0;
3694
3695	NVLOG((NVDBG_ENTRY, nvc, NULL, "nv_add_legacy_intrs"));
3696
3697	/*
3698	 * get number of interrupts
3699	 */
3700	rc = ddi_intr_get_nintrs(devinfo, DDI_INTR_TYPE_FIXED, &count);
3701	if ((rc != DDI_SUCCESS) || (count == 0)) {
3702		NVLOG((NVDBG_INTR, nvc, NULL,
3703		    "ddi_intr_get_nintrs() failed, "
3704		    "rc %d count %d", rc, count));
3705
3706		return (DDI_FAILURE);
3707	}
3708
3709	/*
3710	 * allocate an array of interrupt handles
3711	 */
3712	nvc->nvc_intr_size = count * sizeof (ddi_intr_handle_t);
3713	nvc->nvc_htable = kmem_zalloc(nvc->nvc_intr_size, KM_SLEEP);
3714
3715	/*
3716	 * call ddi_intr_alloc()
3717	 */
3718	rc = ddi_intr_alloc(devinfo, nvc->nvc_htable, DDI_INTR_TYPE_FIXED,
3719	    inum, count, &actual, DDI_INTR_ALLOC_STRICT);
3720
3721	if ((rc != DDI_SUCCESS) || (actual == 0)) {
3722		nv_cmn_err(CE_WARN, nvc, NULL,
3723		    "ddi_intr_alloc() failed, rc %d", rc);
3724		kmem_free(nvc->nvc_htable, nvc->nvc_intr_size);
3725
3726		return (DDI_FAILURE);
3727	}
3728
3729	if (actual < count) {
3730		nv_cmn_err(CE_WARN, nvc, NULL,
3731		    "ddi_intr_alloc: requested: %d, received: %d",
3732		    count, actual);
3733
3734		goto failure;
3735	}
3736
3737	nvc->nvc_intr_cnt = actual;
3738
3739	/*
3740	 * get intr priority
3741	 */
3742	if (ddi_intr_get_pri(nvc->nvc_htable[0], &nvc->nvc_intr_pri) !=
3743	    DDI_SUCCESS) {
3744		nv_cmn_err(CE_WARN, nvc, NULL, "ddi_intr_get_pri() failed");
3745
3746		goto failure;
3747	}
3748
3749	/*
3750	 * Test for high level mutex
3751	 */
3752	if (nvc->nvc_intr_pri >= ddi_intr_get_hilevel_pri()) {
3753		nv_cmn_err(CE_WARN, nvc, NULL,
3754		    "nv_add_legacy_intrs: high level intr not supported");
3755
3756		goto failure;
3757	}
3758
3759	for (x = 0; x < actual; x++) {
3760		if (ddi_intr_add_handler(nvc->nvc_htable[x],
3761		    nvc->nvc_interrupt, (caddr_t)nvc, NULL) != DDI_SUCCESS) {
3762			nv_cmn_err(CE_WARN, nvc, NULL,
3763			    "ddi_intr_add_handler() failed");
3764
3765			goto failure;
3766		}
3767	}
3768
3769	/*
3770	 * call ddi_intr_enable() for legacy interrupts
3771	 */
3772	for (x = 0; x < nvc->nvc_intr_cnt; x++) {
3773		(void) ddi_intr_enable(nvc->nvc_htable[x]);
3774	}
3775
3776	return (DDI_SUCCESS);
3777
3778	failure:
3779	/*
3780	 * free allocated intr and nvc_htable
3781	 */
3782	for (y = 0; y < actual; y++) {
3783		(void) ddi_intr_free(nvc->nvc_htable[y]);
3784	}
3785
3786	kmem_free(nvc->nvc_htable, nvc->nvc_intr_size);
3787
3788	return (DDI_FAILURE);
3789}
3790
3791#ifdef	NV_MSI_SUPPORTED
3792/*
3793 * configure MSI interrupts
3794 */
3795static int
3796nv_add_msi_intrs(nv_ctl_t *nvc)
3797{
3798	dev_info_t	*devinfo = nvc->nvc_dip;
3799	int		count, avail, actual;
3800	int		x, y, rc, inum = 0;
3801
3802	NVLOG((NVDBG_ENTRY, nvc, NULL, "nv_add_msi_intrs"));
3803
3804	/*
3805	 * get number of interrupts
3806	 */
3807	rc = ddi_intr_get_nintrs(devinfo, DDI_INTR_TYPE_MSI, &count);
3808	if ((rc != DDI_SUCCESS) || (count == 0)) {
3809		nv_cmn_err(CE_WARN, nvc, NULL,
3810		    "ddi_intr_get_nintrs() failed, "
3811		    "rc %d count %d", rc, count);
3812
3813		return (DDI_FAILURE);
3814	}
3815
3816	/*
3817	 * get number of available interrupts
3818	 */
3819	rc = ddi_intr_get_navail(devinfo, DDI_INTR_TYPE_MSI, &avail);
3820	if ((rc != DDI_SUCCESS) || (avail == 0)) {
3821		nv_cmn_err(CE_WARN, nvc, NULL,
3822		    "ddi_intr_get_navail() failed, "
3823		    "rc %d avail %d", rc, avail);
3824
3825		return (DDI_FAILURE);
3826	}
3827
3828	if (avail < count) {
3829		nv_cmn_err(CE_WARN, nvc, NULL,
3830		    "ddi_intr_get_nvail returned %d ddi_intr_get_nintrs: %d",
3831		    avail, count);
3832	}
3833
3834	/*
3835	 * allocate an array of interrupt handles
3836	 */
3837	nvc->nvc_intr_size = count * sizeof (ddi_intr_handle_t);
3838	nvc->nvc_htable = kmem_alloc(nvc->nvc_intr_size, KM_SLEEP);
3839
3840	rc = ddi_intr_alloc(devinfo, nvc->nvc_htable, DDI_INTR_TYPE_MSI,
3841	    inum, count, &actual, DDI_INTR_ALLOC_NORMAL);
3842
3843	if ((rc != DDI_SUCCESS) || (actual == 0)) {
3844		nv_cmn_err(CE_WARN, nvc, NULL,
3845		    "ddi_intr_alloc() failed, rc %d", rc);
3846		kmem_free(nvc->nvc_htable, nvc->nvc_intr_size);
3847
3848		return (DDI_FAILURE);
3849	}
3850
3851	/*
3852	 * Use interrupt count returned or abort?
3853	 */
3854	if (actual < count) {
3855		NVLOG((NVDBG_INIT, nvc, NULL,
3856		    "Requested: %d, Received: %d", count, actual));
3857	}
3858
3859	nvc->nvc_intr_cnt = actual;
3860
3861	/*
3862	 * get priority for first msi, assume remaining are all the same
3863	 */
3864	if (ddi_intr_get_pri(nvc->nvc_htable[0], &nvc->nvc_intr_pri) !=
3865	    DDI_SUCCESS) {
3866		nv_cmn_err(CE_WARN, nvc, NULL, "ddi_intr_get_pri() failed");
3867
3868		goto failure;
3869	}
3870
3871	/*
3872	 * test for high level mutex
3873	 */
3874	if (nvc->nvc_intr_pri >= ddi_intr_get_hilevel_pri()) {
3875		nv_cmn_err(CE_WARN, nvc, NULL,
3876		    "nv_add_msi_intrs: high level intr not supported");
3877
3878		goto failure;
3879	}
3880
3881	/*
3882	 * Call ddi_intr_add_handler()
3883	 */
3884	for (x = 0; x < actual; x++) {
3885		if (ddi_intr_add_handler(nvc->nvc_htable[x],
3886		    nvc->nvc_interrupt, (caddr_t)nvc, NULL) != DDI_SUCCESS) {
3887			nv_cmn_err(CE_WARN, nvc, NULL,
3888			    "ddi_intr_add_handler() failed");
3889
3890			goto failure;
3891		}
3892	}
3893
3894	(void) ddi_intr_get_cap(nvc->nvc_htable[0], &nvc->nvc_intr_cap);
3895
3896	if (nvc->nvc_intr_cap & DDI_INTR_FLAG_BLOCK) {
3897		(void) ddi_intr_block_enable(nvc->nvc_htable,
3898		    nvc->nvc_intr_cnt);
3899	} else {
3900		/*
3901		 * Call ddi_intr_enable() for MSI non block enable
3902		 */
3903		for (x = 0; x < nvc->nvc_intr_cnt; x++) {
3904			(void) ddi_intr_enable(nvc->nvc_htable[x]);
3905		}
3906	}
3907
3908	return (DDI_SUCCESS);
3909
3910	failure:
3911	/*
3912	 * free allocated intr and nvc_htable
3913	 */
3914	for (y = 0; y < actual; y++) {
3915		(void) ddi_intr_free(nvc->nvc_htable[y]);
3916	}
3917
3918	kmem_free(nvc->nvc_htable, nvc->nvc_intr_size);
3919
3920	return (DDI_FAILURE);
3921}
3922#endif
3923
3924
3925static void
3926nv_rem_intrs(nv_ctl_t *nvc)
3927{
3928	int x, i;
3929	nv_port_t *nvp;
3930
3931	NVLOG((NVDBG_ENTRY, nvc, NULL, "nv_rem_intrs"));
3932
3933	/*
3934	 * prevent controller from generating interrupts by
3935	 * masking them out.  This is an extra precaution.
3936	 */
3937	for (i = 0; i < NV_MAX_PORTS(nvc); i++) {
3938		nvp = (&nvc->nvc_port[i]);
3939		mutex_enter(&nvp->nvp_mutex);
3940		(*(nvc->nvc_set_intr))(nvp, NV_INTR_DISABLE);
3941		mutex_exit(&nvp->nvp_mutex);
3942	}
3943
3944	/*
3945	 * disable all interrupts
3946	 */
3947	if ((nvc->nvc_intr_type == DDI_INTR_TYPE_MSI) &&
3948	    (nvc->nvc_intr_cap & DDI_INTR_FLAG_BLOCK)) {
3949		(void) ddi_intr_block_disable(nvc->nvc_htable,
3950		    nvc->nvc_intr_cnt);
3951	} else {
3952		for (x = 0; x < nvc->nvc_intr_cnt; x++) {
3953			(void) ddi_intr_disable(nvc->nvc_htable[x]);
3954		}
3955	}
3956
3957	for (x = 0; x < nvc->nvc_intr_cnt; x++) {
3958		(void) ddi_intr_remove_handler(nvc->nvc_htable[x]);
3959		(void) ddi_intr_free(nvc->nvc_htable[x]);
3960	}
3961
3962	kmem_free(nvc->nvc_htable, nvc->nvc_intr_size);
3963}
3964
3965
3966/*
3967 * variable argument wrapper for cmn_err.  prefixes the instance and port
3968 * number if possible
3969 */
3970static void
3971nv_vcmn_err(int ce, nv_ctl_t *nvc, nv_port_t *nvp, char *fmt, va_list ap)
3972{
3973	char port[NV_STRING_10];
3974	char inst[NV_STRING_10];
3975
3976	mutex_enter(&nv_log_mutex);
3977
3978	if (nvc) {
3979		(void) snprintf(inst, NV_STRING_10, "inst %d",
3980		    ddi_get_instance(nvc->nvc_dip));
3981	} else {
3982		inst[0] = '\0';
3983	}
3984
3985	if (nvp) {
3986		(void) sprintf(port, " port %d", nvp->nvp_port_num);
3987	} else {
3988		port[0] = '\0';
3989	}
3990
3991	(void) sprintf(nv_log_buf, "nv_sata %s%s%s", inst, port,
3992	    (inst[0]|port[0] ? ": " :""));
3993
3994	(void) vsnprintf(&nv_log_buf[strlen(nv_log_buf)],
3995	    NV_STRING_512 - strlen(nv_log_buf), fmt, ap);
3996
3997	/*
3998	 * normally set to log to console but in some debug situations it
3999	 * may be useful to log only to a file.
4000	 */
4001	if (nv_log_to_console) {
4002		if (nv_prom_print) {
4003			prom_printf("%s\n", nv_log_buf);
4004		} else {
4005			cmn_err(ce, "%s", nv_log_buf);
4006		}
4007
4008
4009	} else {
4010		cmn_err(ce, "!%s", nv_log_buf);
4011	}
4012
4013	mutex_exit(&nv_log_mutex);
4014}
4015
4016
4017/*
4018 * wrapper for cmn_err
4019 */
4020static void
4021nv_cmn_err(int ce, nv_ctl_t *nvc, nv_port_t *nvp, char *fmt, ...)
4022{
4023	va_list ap;
4024
4025	va_start(ap, fmt);
4026	nv_vcmn_err(ce, nvc, nvp, fmt, ap);
4027	va_end(ap);
4028}
4029
4030
4031#if defined(DEBUG)
4032/*
4033 * prefixes the instance and port number if possible to the debug message
4034 */
4035static void
4036nv_log(uint_t flag, nv_ctl_t *nvc, nv_port_t *nvp, char *fmt, ...)
4037{
4038	va_list ap;
4039
4040	if ((nv_debug_flags & flag) == 0) {
4041		return;
4042	}
4043
4044	va_start(ap, fmt);
4045	nv_vcmn_err(CE_NOTE, nvc, nvp, fmt, ap);
4046	va_end(ap);
4047
4048	/*
4049	 * useful for some debugging situations
4050	 */
4051	if (nv_log_delay) {
4052		drv_usecwait(nv_log_delay);
4053	}
4054
4055}
4056#endif /* DEBUG */
4057
4058
4059/*
4060 * program registers which are common to all commands
4061 */
4062static void
4063nv_program_taskfile_regs(nv_port_t *nvp, int slot)
4064{
4065	nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
4066	sata_pkt_t *spkt;
4067	sata_cmd_t *satacmd;
4068	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4069	uint8_t cmd, ncq = B_FALSE;
4070
4071	spkt = nv_slotp->nvslot_spkt;
4072	satacmd = &spkt->satapkt_cmd;
4073	cmd = satacmd->satacmd_cmd_reg;
4074
4075	ASSERT(nvp->nvp_slot);
4076
4077	if ((cmd == SATAC_WRITE_FPDMA_QUEUED) ||
4078	    (cmd == SATAC_READ_FPDMA_QUEUED)) {
4079		ncq = B_TRUE;
4080	}
4081
4082	/*
4083	 * select the drive
4084	 */
4085	nv_put8(cmdhdl, nvp->nvp_drvhd, satacmd->satacmd_device_reg);
4086
4087	/*
4088	 * make certain the drive selected
4089	 */
4090	if (nv_wait(nvp, SATA_STATUS_DRDY, SATA_STATUS_BSY,
4091	    NV_SEC2USEC(5), 0) == B_FALSE) {
4092
4093		return;
4094	}
4095
4096	switch (spkt->satapkt_cmd.satacmd_addr_type) {
4097
4098	case ATA_ADDR_LBA:
4099		NVLOG((NVDBG_DELIVER, nvp->nvp_ctlp, nvp, "ATA_ADDR_LBA mode"));
4100
4101		nv_put8(cmdhdl, nvp->nvp_count, satacmd->satacmd_sec_count_lsb);
4102		nv_put8(cmdhdl, nvp->nvp_hcyl, satacmd->satacmd_lba_high_lsb);
4103		nv_put8(cmdhdl, nvp->nvp_lcyl, satacmd->satacmd_lba_mid_lsb);
4104		nv_put8(cmdhdl, nvp->nvp_sect, satacmd->satacmd_lba_low_lsb);
4105
4106		break;
4107
4108	case ATA_ADDR_LBA28:
4109		NVLOG((NVDBG_DELIVER, nvp->nvp_ctlp, nvp,
4110		    "ATA_ADDR_LBA28 mode"));
4111		/*
4112		 * NCQ only uses 48-bit addressing
4113		 */
4114		ASSERT(ncq != B_TRUE);
4115
4116		nv_put8(cmdhdl, nvp->nvp_count, satacmd->satacmd_sec_count_lsb);
4117		nv_put8(cmdhdl, nvp->nvp_hcyl, satacmd->satacmd_lba_high_lsb);
4118		nv_put8(cmdhdl, nvp->nvp_lcyl, satacmd->satacmd_lba_mid_lsb);
4119		nv_put8(cmdhdl, nvp->nvp_sect, satacmd->satacmd_lba_low_lsb);
4120
4121		break;
4122
4123	case ATA_ADDR_LBA48:
4124		NVLOG((NVDBG_DELIVER, nvp->nvp_ctlp, nvp,
4125		    "ATA_ADDR_LBA48 mode"));
4126
4127		/*
4128		 * for NCQ, tag goes into count register and real sector count
4129		 * into features register.  The sata module does the translation
4130		 * in the satacmd.
4131		 */
4132		if (ncq == B_TRUE) {
4133			nv_put8(cmdhdl, nvp->nvp_count, slot << 3);
4134			nv_put8(cmdhdl, nvp->nvp_feature,
4135			    satacmd->satacmd_features_reg_ext);
4136			nv_put8(cmdhdl, nvp->nvp_feature,
4137			    satacmd->satacmd_features_reg);
4138		} else {
4139			nv_put8(cmdhdl, nvp->nvp_count,
4140			    satacmd->satacmd_sec_count_msb);
4141			nv_put8(cmdhdl, nvp->nvp_count,
4142			    satacmd->satacmd_sec_count_lsb);
4143		}
4144
4145		/*
4146		 * send the high-order half first
4147		 */
4148		nv_put8(cmdhdl, nvp->nvp_hcyl, satacmd->satacmd_lba_high_msb);
4149		nv_put8(cmdhdl, nvp->nvp_lcyl, satacmd->satacmd_lba_mid_msb);
4150		nv_put8(cmdhdl, nvp->nvp_sect, satacmd->satacmd_lba_low_msb);
4151		/*
4152		 * Send the low-order half
4153		 */
4154		nv_put8(cmdhdl, nvp->nvp_hcyl, satacmd->satacmd_lba_high_lsb);
4155		nv_put8(cmdhdl, nvp->nvp_lcyl, satacmd->satacmd_lba_mid_lsb);
4156		nv_put8(cmdhdl, nvp->nvp_sect, satacmd->satacmd_lba_low_lsb);
4157
4158		break;
4159
4160	case 0:
4161		/*
4162		 * non-media access commands such as identify and features
4163		 * take this path.
4164		 */
4165		nv_put8(cmdhdl, nvp->nvp_count, satacmd->satacmd_sec_count_lsb);
4166		nv_put8(cmdhdl, nvp->nvp_feature,
4167		    satacmd->satacmd_features_reg);
4168		nv_put8(cmdhdl, nvp->nvp_hcyl, satacmd->satacmd_lba_high_lsb);
4169		nv_put8(cmdhdl, nvp->nvp_lcyl, satacmd->satacmd_lba_mid_lsb);
4170		nv_put8(cmdhdl, nvp->nvp_sect, satacmd->satacmd_lba_low_lsb);
4171
4172		break;
4173
4174	default:
4175		break;
4176	}
4177
4178	ASSERT(nvp->nvp_slot);
4179}
4180
4181
4182/*
4183 * start a command that involves no media access
4184 */
4185static int
4186nv_start_nodata(nv_port_t *nvp, int slot)
4187{
4188	nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
4189	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4190	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
4191	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4192
4193	nv_program_taskfile_regs(nvp, slot);
4194
4195	/*
4196	 * This next one sets the controller in motion
4197	 */
4198	nv_put8(cmdhdl, nvp->nvp_cmd, sata_cmdp->satacmd_cmd_reg);
4199
4200	return (SATA_TRAN_ACCEPTED);
4201}
4202
4203
4204static int
4205nv_bm_status_clear(nv_port_t *nvp)
4206{
4207	ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl;
4208	uchar_t	status, ret;
4209
4210	/*
4211	 * Get the current BM status
4212	 */
4213	ret = status = nv_get8(bmhdl, nvp->nvp_bmisx);
4214
4215	status = (status & BMISX_MASK) | BMISX_IDERR | BMISX_IDEINTS;
4216
4217	/*
4218	 * Clear the latches (and preserve the other bits)
4219	 */
4220	nv_put8(bmhdl, nvp->nvp_bmisx, status);
4221
4222	return (ret);
4223}
4224
4225
4226/*
4227 * program the bus master DMA engine with the PRD address for
4228 * the active slot command, and start the DMA engine.
4229 */
4230static void
4231nv_start_dma_engine(nv_port_t *nvp, int slot)
4232{
4233	nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
4234	ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl;
4235	uchar_t direction;
4236
4237	ASSERT(nv_slotp->nvslot_spkt != NULL);
4238
4239	if (nv_slotp->nvslot_spkt->satapkt_cmd.satacmd_flags.sata_data_direction
4240	    == SATA_DIR_READ) {
4241		direction = BMICX_RWCON_WRITE_TO_MEMORY;
4242	} else {
4243		direction = BMICX_RWCON_READ_FROM_MEMORY;
4244	}
4245
4246	NVLOG((NVDBG_DELIVER, nvp->nvp_ctlp, nvp,
4247	    "nv_start_dma_engine entered"));
4248
4249#if NOT_USED
4250	/*
4251	 * NOT NEEDED. Left here of historical reason.
4252	 * Reset the controller's interrupt and error status bits.
4253	 */
4254	(void) nv_bm_status_clear(nvp);
4255#endif
4256	/*
4257	 * program the PRD table physical start address
4258	 */
4259	nv_put32(bmhdl, nvp->nvp_bmidtpx, nvp->nvp_sg_paddr[slot]);
4260
4261	/*
4262	 * set the direction control and start the DMA controller
4263	 */
4264	nv_put8(bmhdl, nvp->nvp_bmicx, direction | BMICX_SSBM);
4265}
4266
4267/*
4268 * start dma command, either in or out
4269 */
4270static int
4271nv_start_dma(nv_port_t *nvp, int slot)
4272{
4273	nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
4274	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4275	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4276	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
4277	uint8_t cmd = sata_cmdp->satacmd_cmd_reg;
4278#ifdef NCQ
4279	uint8_t ncq = B_FALSE;
4280#endif
4281	ddi_acc_handle_t sghdl = nvp->nvp_sg_acc_hdl[slot];
4282	uint_t *dstp = (uint_t *)nvp->nvp_sg_addr[slot];
4283	int sg_count = sata_cmdp->satacmd_num_dma_cookies, idx;
4284	ddi_dma_cookie_t  *srcp = sata_cmdp->satacmd_dma_cookie_list;
4285
4286	ASSERT(sg_count != 0);
4287
4288	if (sata_cmdp->satacmd_num_dma_cookies > NV_DMA_NSEGS) {
4289		nv_cmn_err(CE_WARN, nvp->nvp_ctlp, nvp, "NV_DMA_NSEGS=%d <"
4290		    " satacmd_num_dma_cookies=%d", NV_DMA_NSEGS,
4291		    sata_cmdp->satacmd_num_dma_cookies);
4292
4293		return (NV_FAILURE);
4294	}
4295
4296	nv_program_taskfile_regs(nvp, slot);
4297
4298	/*
4299	 * start the drive in motion
4300	 */
4301	nv_put8(cmdhdl, nvp->nvp_cmd, cmd);
4302
4303	/*
4304	 * the drive starts processing the transaction when the cmd register
4305	 * is written.  This is done here before programming the DMA engine to
4306	 * parallelize and save some time.  In the event that the drive is ready
4307	 * before DMA, it will wait.
4308	 */
4309#ifdef NCQ
4310	if ((cmd == SATAC_WRITE_FPDMA_QUEUED) ||
4311	    (cmd == SATAC_READ_FPDMA_QUEUED)) {
4312		ncq = B_TRUE;
4313	}
4314#endif
4315
4316	/*
4317	 * copy the PRD list to PRD table in DMA accessible memory
4318	 * so that the controller can access it.
4319	 */
4320	for (idx = 0; idx < sg_count; idx++, srcp++) {
4321		uint32_t size;
4322
4323		nv_put32(sghdl, dstp++, srcp->dmac_address);
4324
4325		/* Set the number of bytes to transfer, 0 implies 64KB */
4326		size = srcp->dmac_size;
4327		if (size == 0x10000)
4328			size = 0;
4329
4330		/*
4331		 * If this is a 40-bit address, copy bits 32-40 of the
4332		 * physical address to bits 16-24 of the PRD count.
4333		 */
4334		if (srcp->dmac_laddress > UINT32_MAX) {
4335			size |= ((srcp->dmac_laddress & 0xff00000000) >> 16);
4336		}
4337
4338		/*
4339		 * set the end of table flag for the last entry
4340		 */
4341		if (idx == (sg_count - 1)) {
4342			size |= PRDE_EOT;
4343		}
4344
4345		nv_put32(sghdl, dstp++, size);
4346	}
4347
4348	(void) ddi_dma_sync(nvp->nvp_sg_dma_hdl[slot], 0,
4349	    sizeof (prde_t) * NV_DMA_NSEGS, DDI_DMA_SYNC_FORDEV);
4350
4351	nv_start_dma_engine(nvp, slot);
4352
4353#ifdef NCQ
4354	/*
4355	 * optimization:  for SWNCQ, start DMA engine if this is the only
4356	 * command running.  Preliminary NCQ efforts indicated this needs
4357	 * more debugging.
4358	 *
4359	 * if (nvp->nvp_ncq_run <= 1)
4360	 */
4361
4362	if (ncq == B_FALSE) {
4363		NVLOG((NVDBG_DELIVER, nvp->nvp_ctlp, nvp,
4364		    "NOT NCQ so starting DMA NOW non_ncq_commands=%d"
4365		    " cmd = %X", non_ncq_commands++, cmd));
4366		nv_start_dma_engine(nvp, slot);
4367	} else {
4368		NVLOG((NVDBG_DELIVER, nvp->nvp_ctlp, nvp, "?NCQ, so program "
4369		    "DMA later ncq_commands=%d cmd = %X", ncq_commands++, cmd));
4370	}
4371#endif /* NCQ */
4372
4373	return (SATA_TRAN_ACCEPTED);
4374}
4375
4376
4377/*
4378 * start a PIO data-in ATA command
4379 */
4380static int
4381nv_start_pio_in(nv_port_t *nvp, int slot)
4382{
4383
4384	nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
4385	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4386	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4387
4388	nv_program_taskfile_regs(nvp, slot);
4389
4390	/*
4391	 * This next one sets the drive in motion
4392	 */
4393	nv_put8(cmdhdl, nvp->nvp_cmd, spkt->satapkt_cmd.satacmd_cmd_reg);
4394
4395	return (SATA_TRAN_ACCEPTED);
4396}
4397
4398
4399/*
4400 * start a PIO data-out ATA command
4401 */
4402static int
4403nv_start_pio_out(nv_port_t *nvp, int slot)
4404{
4405	nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
4406	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4407	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4408
4409	nv_program_taskfile_regs(nvp, slot);
4410
4411	/*
4412	 * this next one sets the drive in motion
4413	 */
4414	nv_put8(cmdhdl, nvp->nvp_cmd, spkt->satapkt_cmd.satacmd_cmd_reg);
4415
4416	/*
4417	 * wait for the busy bit to settle
4418	 */
4419	NV_DELAY_NSEC(400);
4420
4421	/*
4422	 * wait for the drive to assert DRQ to send the first chunk
4423	 * of data. Have to busy wait because there's no interrupt for
4424	 * the first chunk. This is bad... uses a lot of cycles if the
4425	 * drive responds too slowly or if the wait loop granularity
4426	 * is too large. It's even worse if the drive is defective and
4427	 * the loop times out.
4428	 */
4429	if (nv_wait3(nvp, SATA_STATUS_DRQ, SATA_STATUS_BSY, /* okay */
4430	    SATA_STATUS_ERR, SATA_STATUS_BSY, /* cmd failed */
4431	    SATA_STATUS_DF, SATA_STATUS_BSY, /* drive failed */
4432	    4000000, 0) == B_FALSE) {
4433		spkt->satapkt_reason = SATA_PKT_TIMEOUT;
4434
4435		goto error;
4436	}
4437
4438	/*
4439	 * send the first block.
4440	 */
4441	nv_intr_pio_out(nvp, nv_slotp);
4442
4443	/*
4444	 * If nvslot_flags is not set to COMPLETE yet, then processing
4445	 * is OK so far, so return.  Otherwise, fall into error handling
4446	 * below.
4447	 */
4448	if (nv_slotp->nvslot_flags != NVSLOT_COMPLETE) {
4449
4450		return (SATA_TRAN_ACCEPTED);
4451	}
4452
4453	error:
4454	/*
4455	 * there was an error so reset the device and complete the packet.
4456	 */
4457	nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
4458	nv_complete_io(nvp, spkt, 0);
4459	nvp->nvp_state |= NV_PORT_RESET;
4460	nvp->nvp_state &= ~(NV_PORT_RESTORE | NV_PORT_RESET_RETRY);
4461	nv_reset(nvp);
4462
4463	return (SATA_TRAN_PORT_ERROR);
4464}
4465
4466
4467/*
4468 * start a ATAPI Packet command (PIO data in or out)
4469 */
4470static int
4471nv_start_pkt_pio(nv_port_t *nvp, int slot)
4472{
4473	nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
4474	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4475	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4476	sata_cmd_t *satacmd = &spkt->satapkt_cmd;
4477
4478	NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4479	    "nv_start_pkt_pio: start"));
4480
4481	/*
4482	 * Write the PACKET command to the command register.  Normally
4483	 * this would be done through nv_program_taskfile_regs().  It
4484	 * is done here because some values need to be overridden.
4485	 */
4486
4487	/* select the drive */
4488	nv_put8(cmdhdl, nvp->nvp_drvhd, satacmd->satacmd_device_reg);
4489
4490	/* make certain the drive selected */
4491	if (nv_wait(nvp, SATA_STATUS_DRDY, SATA_STATUS_BSY,
4492	    NV_SEC2USEC(5), 0) == B_FALSE) {
4493		NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4494		    "nv_start_pkt_pio: drive select failed"));
4495		return (SATA_TRAN_PORT_ERROR);
4496	}
4497
4498	/*
4499	 * The command is always sent via PIO, despite whatever the SATA
4500	 * framework sets in the command.  Overwrite the DMA bit to do this.
4501	 * Also, overwrite the overlay bit to be safe (it shouldn't be set).
4502	 */
4503	nv_put8(cmdhdl, nvp->nvp_feature, 0);	/* deassert DMA and OVL */
4504
4505	/* set appropriately by the sata framework */
4506	nv_put8(cmdhdl, nvp->nvp_hcyl, satacmd->satacmd_lba_high_lsb);
4507	nv_put8(cmdhdl, nvp->nvp_lcyl, satacmd->satacmd_lba_mid_lsb);
4508	nv_put8(cmdhdl, nvp->nvp_sect, satacmd->satacmd_lba_low_lsb);
4509	nv_put8(cmdhdl, nvp->nvp_count, satacmd->satacmd_sec_count_lsb);
4510
4511	/* initiate the command by writing the command register last */
4512	nv_put8(cmdhdl, nvp->nvp_cmd, spkt->satapkt_cmd.satacmd_cmd_reg);
4513
4514	/* Give the host controller time to do its thing */
4515	NV_DELAY_NSEC(400);
4516
4517	/*
4518	 * Wait for the device to indicate that it is ready for the command
4519	 * ATAPI protocol state - HP0: Check_Status_A
4520	 */
4521
4522	if (nv_wait3(nvp, SATA_STATUS_DRQ, SATA_STATUS_BSY, /* okay */
4523	    SATA_STATUS_ERR, SATA_STATUS_BSY, /* cmd failed */
4524	    SATA_STATUS_DF, SATA_STATUS_BSY, /* drive failed */
4525	    4000000, 0) == B_FALSE) {
4526		/*
4527		 * Either an error or device fault occurred or the wait
4528		 * timed out.  According to the ATAPI protocol, command
4529		 * completion is also possible.  Other implementations of
4530		 * this protocol don't handle this last case, so neither
4531		 * does this code.
4532		 */
4533
4534		if (nv_get8(cmdhdl, nvp->nvp_status) &
4535		    (SATA_STATUS_ERR | SATA_STATUS_DF)) {
4536			spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4537
4538			NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4539			    "nv_start_pkt_pio: device error (HP0)"));
4540		} else {
4541			spkt->satapkt_reason = SATA_PKT_TIMEOUT;
4542
4543			NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4544			    "nv_start_pkt_pio: timeout (HP0)"));
4545		}
4546
4547		nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
4548		nv_complete_io(nvp, spkt, 0);
4549		nvp->nvp_state |= NV_PORT_RESET;
4550		nvp->nvp_state &= ~(NV_PORT_RESTORE | NV_PORT_RESET_RETRY);
4551		nv_reset(nvp);
4552
4553		return (SATA_TRAN_PORT_ERROR);
4554	}
4555
4556	/*
4557	 * Put the ATAPI command in the data register
4558	 * ATAPI protocol state - HP1: Send_Packet
4559	 */
4560
4561	ddi_rep_put16(cmdhdl, (ushort_t *)spkt->satapkt_cmd.satacmd_acdb,
4562	    (ushort_t *)nvp->nvp_data,
4563	    (spkt->satapkt_cmd.satacmd_acdb_len >> 1), DDI_DEV_NO_AUTOINCR);
4564
4565	/*
4566	 * See you in nv_intr_pkt_pio.
4567	 * ATAPI protocol state - HP3: INTRQ_wait
4568	 */
4569
4570	NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4571	    "nv_start_pkt_pio: exiting into HP3"));
4572
4573	return (SATA_TRAN_ACCEPTED);
4574}
4575
4576
4577/*
4578 * Interrupt processing for a non-data ATA command.
4579 */
4580static void
4581nv_intr_nodata(nv_port_t *nvp, nv_slot_t *nv_slotp)
4582{
4583	uchar_t status;
4584	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4585	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
4586	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
4587	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4588
4589	NVLOG((NVDBG_INTR, nvp->nvp_ctlp, nvp, "nv_intr_nodata entered"));
4590
4591	status = nv_get8(cmdhdl, nvp->nvp_status);
4592
4593	/*
4594	 * check for errors
4595	 */
4596	if (status & (SATA_STATUS_DF | SATA_STATUS_ERR)) {
4597		spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4598		sata_cmdp->satacmd_status_reg = nv_get8(ctlhdl,
4599		    nvp->nvp_altstatus);
4600		sata_cmdp->satacmd_error_reg = nv_get8(cmdhdl, nvp->nvp_error);
4601	} else {
4602		spkt->satapkt_reason = SATA_PKT_COMPLETED;
4603	}
4604
4605	nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4606}
4607
4608
4609/*
4610 * ATA command, PIO data in
4611 */
4612static void
4613nv_intr_pio_in(nv_port_t *nvp, nv_slot_t *nv_slotp)
4614{
4615	uchar_t	status;
4616	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4617	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
4618	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
4619	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4620	int count;
4621
4622	status = nv_get8(cmdhdl, nvp->nvp_status);
4623
4624	if (status & SATA_STATUS_BSY) {
4625		spkt->satapkt_reason = SATA_PKT_TIMEOUT;
4626		nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4627		sata_cmdp->satacmd_status_reg = nv_get8(ctlhdl,
4628		    nvp->nvp_altstatus);
4629		sata_cmdp->satacmd_error_reg = nv_get8(cmdhdl, nvp->nvp_error);
4630		nvp->nvp_state |= NV_PORT_RESET;
4631		nvp->nvp_state &= ~(NV_PORT_RESTORE | NV_PORT_RESET_RETRY);
4632		nv_reset(nvp);
4633
4634		return;
4635	}
4636
4637	/*
4638	 * check for errors
4639	 */
4640	if ((status & (SATA_STATUS_DRQ | SATA_STATUS_DF |
4641	    SATA_STATUS_ERR)) != SATA_STATUS_DRQ) {
4642		nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
4643		nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4644		spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4645
4646		return;
4647	}
4648
4649	/*
4650	 * read the next chunk of data (if any)
4651	 */
4652	count = min(nv_slotp->nvslot_byte_count, NV_BYTES_PER_SEC);
4653
4654	/*
4655	 * read count bytes
4656	 */
4657	ASSERT(count != 0);
4658
4659	ddi_rep_get16(cmdhdl, (ushort_t *)nv_slotp->nvslot_v_addr,
4660	    (ushort_t *)nvp->nvp_data, (count >> 1), DDI_DEV_NO_AUTOINCR);
4661
4662	nv_slotp->nvslot_v_addr += count;
4663	nv_slotp->nvslot_byte_count -= count;
4664
4665
4666	if (nv_slotp->nvslot_byte_count != 0) {
4667		/*
4668		 * more to transfer.  Wait for next interrupt.
4669		 */
4670		return;
4671	}
4672
4673	/*
4674	 * transfer is complete. wait for the busy bit to settle.
4675	 */
4676	NV_DELAY_NSEC(400);
4677
4678	spkt->satapkt_reason = SATA_PKT_COMPLETED;
4679	nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4680}
4681
4682
4683/*
4684 * ATA command PIO data out
4685 */
4686static void
4687nv_intr_pio_out(nv_port_t *nvp, nv_slot_t *nv_slotp)
4688{
4689	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4690	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
4691	uchar_t status;
4692	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
4693	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4694	int count;
4695
4696	/*
4697	 * clear the IRQ
4698	 */
4699	status = nv_get8(cmdhdl, nvp->nvp_status);
4700
4701	if (status & SATA_STATUS_BSY) {
4702		/*
4703		 * this should not happen
4704		 */
4705		spkt->satapkt_reason = SATA_PKT_TIMEOUT;
4706		nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4707		sata_cmdp->satacmd_status_reg = nv_get8(ctlhdl,
4708		    nvp->nvp_altstatus);
4709		sata_cmdp->satacmd_error_reg = nv_get8(cmdhdl, nvp->nvp_error);
4710
4711		return;
4712	}
4713
4714	/*
4715	 * check for errors
4716	 */
4717	if (status & (SATA_STATUS_DF | SATA_STATUS_ERR)) {
4718		nv_copy_registers(nvp,  &spkt->satapkt_device, spkt);
4719		nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4720		spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4721
4722		return;
4723	}
4724
4725	/*
4726	 * this is the condition which signals the drive is
4727	 * no longer ready to transfer.  Likely that the transfer
4728	 * completed successfully, but check that byte_count is
4729	 * zero.
4730	 */
4731	if ((status & SATA_STATUS_DRQ) == 0) {
4732
4733		if (nv_slotp->nvslot_byte_count == 0) {
4734			/*
4735			 * complete; successful transfer
4736			 */
4737			spkt->satapkt_reason = SATA_PKT_COMPLETED;
4738		} else {
4739			/*
4740			 * error condition, incomplete transfer
4741			 */
4742			nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
4743			spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4744		}
4745		nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4746
4747		return;
4748	}
4749
4750	/*
4751	 * write the next chunk of data
4752	 */
4753	count = min(nv_slotp->nvslot_byte_count, NV_BYTES_PER_SEC);
4754
4755	/*
4756	 * read or write count bytes
4757	 */
4758
4759	ASSERT(count != 0);
4760
4761	ddi_rep_put16(cmdhdl, (ushort_t *)nv_slotp->nvslot_v_addr,
4762	    (ushort_t *)nvp->nvp_data, (count >> 1), DDI_DEV_NO_AUTOINCR);
4763
4764	nv_slotp->nvslot_v_addr += count;
4765	nv_slotp->nvslot_byte_count -= count;
4766}
4767
4768
4769/*
4770 * ATAPI PACKET command, PIO in/out interrupt
4771 *
4772 * Under normal circumstances, one of four different interrupt scenarios
4773 * will result in this function being called:
4774 *
4775 * 1. Packet command data transfer
4776 * 2. Packet command completion
4777 * 3. Request sense data transfer
4778 * 4. Request sense command completion
4779 */
4780static void
4781nv_intr_pkt_pio(nv_port_t *nvp, nv_slot_t *nv_slotp)
4782{
4783	uchar_t	status;
4784	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4785	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
4786	int direction = sata_cmdp->satacmd_flags.sata_data_direction;
4787	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
4788	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4789	uint16_t ctlr_count;
4790	int count;
4791
4792	/* ATAPI protocol state - HP2: Check_Status_B */
4793
4794	status = nv_get8(cmdhdl, nvp->nvp_status);
4795	NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4796	    "nv_intr_pkt_pio: status 0x%x", status));
4797
4798	if (status & SATA_STATUS_BSY) {
4799		if ((nv_slotp->nvslot_flags & NVSLOT_RQSENSE) != 0) {
4800			nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4801			spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4802		} else {
4803			nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4804			spkt->satapkt_reason = SATA_PKT_TIMEOUT;
4805			nvp->nvp_state |= NV_PORT_RESET;
4806			nvp->nvp_state &= ~(NV_PORT_RESTORE |
4807			    NV_PORT_RESET_RETRY);
4808			nv_reset(nvp);
4809		}
4810
4811		NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4812		    "nv_intr_pkt_pio: busy - status 0x%x", status));
4813
4814		return;
4815	}
4816
4817	if ((status & SATA_STATUS_DF) != 0) {
4818		/*
4819		 * On device fault, just clean up and bail.  Request sense
4820		 * will just default to its NO SENSE initialized value.
4821		 */
4822
4823		if ((nv_slotp->nvslot_flags & NVSLOT_RQSENSE) == 0) {
4824			nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
4825		}
4826
4827		nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4828		spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4829
4830		sata_cmdp->satacmd_status_reg = nv_get8(ctlhdl,
4831		    nvp->nvp_altstatus);
4832		sata_cmdp->satacmd_error_reg = nv_get8(cmdhdl,
4833		    nvp->nvp_error);
4834
4835		NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4836		    "nv_intr_pkt_pio: device fault"));
4837
4838		return;
4839	}
4840
4841	if ((status & SATA_STATUS_ERR) != 0) {
4842		/*
4843		 * On command error, figure out whether we are processing a
4844		 * request sense.  If so, clean up and bail.  Otherwise,
4845		 * do a REQUEST SENSE.
4846		 */
4847
4848		if ((nv_slotp->nvslot_flags & NVSLOT_RQSENSE) == 0) {
4849			nv_slotp->nvslot_flags |= NVSLOT_RQSENSE;
4850			if (nv_start_rqsense_pio(nvp, nv_slotp) ==
4851			    NV_FAILURE) {
4852				nv_copy_registers(nvp, &spkt->satapkt_device,
4853				    spkt);
4854				nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4855				spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4856			}
4857
4858			sata_cmdp->satacmd_status_reg = nv_get8(ctlhdl,
4859			    nvp->nvp_altstatus);
4860			sata_cmdp->satacmd_error_reg = nv_get8(cmdhdl,
4861			    nvp->nvp_error);
4862		} else {
4863			nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4864			spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4865
4866			nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
4867		}
4868
4869		NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4870		    "nv_intr_pkt_pio: error (status 0x%x)", status));
4871
4872		return;
4873	}
4874
4875	if ((nv_slotp->nvslot_flags & NVSLOT_RQSENSE) != 0) {
4876		/*
4877		 * REQUEST SENSE command processing
4878		 */
4879
4880		if ((status & (SATA_STATUS_DRQ)) != 0) {
4881			/* ATAPI state - HP4: Transfer_Data */
4882
4883			/* read the byte count from the controller */
4884			ctlr_count =
4885			    (uint16_t)nv_get8(cmdhdl, nvp->nvp_hcyl) << 8;
4886			ctlr_count |= nv_get8(cmdhdl, nvp->nvp_lcyl);
4887
4888			NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4889			    "nv_intr_pkt_pio: ctlr byte count - %d",
4890			    ctlr_count));
4891
4892			if (ctlr_count == 0) {
4893				/* no data to transfer - some devices do this */
4894
4895				spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4896				nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4897
4898				NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4899				    "nv_intr_pkt_pio: done (no data)"));
4900
4901				return;
4902			}
4903
4904			count = min(ctlr_count, SATA_ATAPI_RQSENSE_LEN);
4905
4906			/* transfer the data */
4907			ddi_rep_get16(cmdhdl,
4908			    (ushort_t *)nv_slotp->nvslot_rqsense_buff,
4909			    (ushort_t *)nvp->nvp_data, (count >> 1),
4910			    DDI_DEV_NO_AUTOINCR);
4911
4912			/* consume residual bytes */
4913			ctlr_count -= count;
4914
4915			if (ctlr_count > 0) {
4916				for (; ctlr_count > 0; ctlr_count -= 2)
4917					(void) ddi_get16(cmdhdl,
4918					    (ushort_t *)nvp->nvp_data);
4919			}
4920
4921			NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4922			    "nv_intr_pkt_pio: transition to HP2"));
4923		} else {
4924			/* still in ATAPI state - HP2 */
4925
4926			/*
4927			 * In order to avoid clobbering the rqsense data
4928			 * set by the SATA framework, the sense data read
4929			 * from the device is put in a separate buffer and
4930			 * copied into the packet after the request sense
4931			 * command successfully completes.
4932			 */
4933			bcopy(nv_slotp->nvslot_rqsense_buff,
4934			    spkt->satapkt_cmd.satacmd_rqsense,
4935			    SATA_ATAPI_RQSENSE_LEN);
4936
4937			nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4938			spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4939
4940			NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4941			    "nv_intr_pkt_pio: request sense done"));
4942		}
4943
4944		return;
4945	}
4946
4947	/*
4948	 * Normal command processing
4949	 */
4950
4951	if ((status & (SATA_STATUS_DRQ)) != 0) {
4952		/* ATAPI protocol state - HP4: Transfer_Data */
4953
4954		/* read the byte count from the controller */
4955		ctlr_count = (uint16_t)nv_get8(cmdhdl, nvp->nvp_hcyl) << 8;
4956		ctlr_count |= nv_get8(cmdhdl, nvp->nvp_lcyl);
4957
4958		if (ctlr_count == 0) {
4959			/* no data to transfer - some devices do this */
4960
4961			spkt->satapkt_reason = SATA_PKT_COMPLETED;
4962			nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4963
4964			NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4965			    "nv_intr_pkt_pio: done (no data)"));
4966
4967			return;
4968		}
4969
4970		count = min(ctlr_count, nv_slotp->nvslot_byte_count);
4971
4972		NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4973		    "nv_intr_pkt_pio: drive_bytes 0x%x", ctlr_count));
4974
4975		NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4976		    "nv_intr_pkt_pio: byte_count 0x%x",
4977		    nv_slotp->nvslot_byte_count));
4978
4979		/* transfer the data */
4980
4981		if (direction == SATA_DIR_READ) {
4982			ddi_rep_get16(cmdhdl,
4983			    (ushort_t *)nv_slotp->nvslot_v_addr,
4984			    (ushort_t *)nvp->nvp_data, (count >> 1),
4985			    DDI_DEV_NO_AUTOINCR);
4986
4987			ctlr_count -= count;
4988
4989			if (ctlr_count > 0) {
4990				/* consume remainding bytes */
4991
4992				for (; ctlr_count > 0;
4993				    ctlr_count -= 2)
4994					(void) ddi_get16(cmdhdl,
4995					    (ushort_t *)nvp->nvp_data);
4996
4997				NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4998				    "nv_intr_pkt_pio: bytes remained"));
4999			}
5000		} else {
5001			ddi_rep_put16(cmdhdl,
5002			    (ushort_t *)nv_slotp->nvslot_v_addr,
5003			    (ushort_t *)nvp->nvp_data, (count >> 1),
5004			    DDI_DEV_NO_AUTOINCR);
5005		}
5006
5007		nv_slotp->nvslot_v_addr += count;
5008		nv_slotp->nvslot_byte_count -= count;
5009
5010		NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
5011		    "nv_intr_pkt_pio: transition to HP2"));
5012	} else {
5013		/* still in ATAPI state - HP2 */
5014
5015		spkt->satapkt_reason = SATA_PKT_COMPLETED;
5016		nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
5017
5018		NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
5019		    "nv_intr_pkt_pio: done"));
5020	}
5021}
5022
5023
5024/*
5025 * ATA command, DMA data in/out
5026 */
5027static void
5028nv_intr_dma(nv_port_t *nvp, struct nv_slot *nv_slotp)
5029{
5030	uchar_t status;
5031	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
5032	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
5033	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
5034	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
5035	ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl;
5036	uchar_t	bmicx;
5037	uchar_t bm_status;
5038
5039	nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
5040
5041	/*
5042	 * stop DMA engine.
5043	 */
5044	bmicx = nv_get8(bmhdl, nvp->nvp_bmicx);
5045	nv_put8(bmhdl, nvp->nvp_bmicx,  bmicx & ~BMICX_SSBM);
5046
5047	/*
5048	 * get the status and clear the IRQ, and check for DMA error
5049	 */
5050	status = nv_get8(cmdhdl, nvp->nvp_status);
5051
5052	/*
5053	 * check for drive errors
5054	 */
5055	if (status & (SATA_STATUS_DF | SATA_STATUS_ERR)) {
5056		nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
5057		spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
5058		(void) nv_bm_status_clear(nvp);
5059
5060		return;
5061	}
5062
5063	bm_status = nv_bm_status_clear(nvp);
5064
5065	/*
5066	 * check for bus master errors
5067	 */
5068	if (bm_status & BMISX_IDERR) {
5069		spkt->satapkt_reason = SATA_PKT_RESET;   /* ? */
5070		sata_cmdp->satacmd_status_reg = nv_get8(ctlhdl,
5071		    nvp->nvp_altstatus);
5072		sata_cmdp->satacmd_error_reg = nv_get8(cmdhdl, nvp->nvp_error);
5073		nvp->nvp_state |= NV_PORT_RESET;
5074		nvp->nvp_state &= ~(NV_PORT_RESTORE | NV_PORT_RESET_RETRY);
5075		nv_reset(nvp);
5076
5077		return;
5078	}
5079
5080	spkt->satapkt_reason = SATA_PKT_COMPLETED;
5081}
5082
5083
5084/*
5085 * Wait for a register of a controller to achieve a specific state.
5086 * To return normally, all the bits in the first sub-mask must be ON,
5087 * all the bits in the second sub-mask must be OFF.
5088 * If timeout_usec microseconds pass without the controller achieving
5089 * the desired bit configuration, return TRUE, else FALSE.
5090 *
5091 * hybrid waiting algorithm: if not in interrupt context, busy looping will
5092 * occur for the first 250 us, then switch over to a sleeping wait.
5093 *
5094 */
5095int
5096nv_wait(nv_port_t *nvp, uchar_t onbits, uchar_t offbits, uint_t timeout_usec,
5097    int type_wait)
5098{
5099	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
5100	hrtime_t end, cur, start_sleep, start;
5101	int first_time = B_TRUE;
5102	ushort_t val;
5103
5104	for (;;) {
5105		val = nv_get8(ctlhdl, nvp->nvp_altstatus);
5106
5107		if ((val & onbits) == onbits && (val & offbits) == 0) {
5108
5109			return (B_TRUE);
5110		}
5111
5112		cur = gethrtime();
5113
5114		/*
5115		 * store the start time and calculate the end
5116		 * time.  also calculate "start_sleep" which is
5117		 * the point after which the driver will stop busy
5118		 * waiting and change to sleep waiting.
5119		 */
5120		if (first_time) {
5121			first_time = B_FALSE;
5122			/*
5123			 * start and end are in nanoseconds
5124			 */
5125			start = cur;
5126			end = start + timeout_usec * 1000;
5127			/*
5128			 * add 1 ms to start
5129			 */
5130			start_sleep =  start + 250000;
5131
5132			if (servicing_interrupt()) {
5133				type_wait = NV_NOSLEEP;
5134			}
5135		}
5136
5137		if (cur > end) {
5138
5139			break;
5140		}
5141
5142		if ((type_wait != NV_NOSLEEP) && (cur > start_sleep)) {
5143#if ! defined(__lock_lint)
5144			delay(1);
5145#endif
5146		} else {
5147			drv_usecwait(nv_usec_delay);
5148		}
5149	}
5150
5151	return (B_FALSE);
5152}
5153
5154
5155/*
5156 * This is a slightly more complicated version that checks
5157 * for error conditions and bails-out rather than looping
5158 * until the timeout is exceeded.
5159 *
5160 * hybrid waiting algorithm: if not in interrupt context, busy looping will
5161 * occur for the first 250 us, then switch over to a sleeping wait.
5162 */
5163int
5164nv_wait3(
5165	nv_port_t	*nvp,
5166	uchar_t		onbits1,
5167	uchar_t		offbits1,
5168	uchar_t		failure_onbits2,
5169	uchar_t		failure_offbits2,
5170	uchar_t		failure_onbits3,
5171	uchar_t		failure_offbits3,
5172	uint_t		timeout_usec,
5173	int		type_wait)
5174{
5175	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
5176	hrtime_t end, cur, start_sleep, start;
5177	int first_time = B_TRUE;
5178	ushort_t val;
5179
5180	for (;;) {
5181		val = nv_get8(ctlhdl, nvp->nvp_altstatus);
5182
5183		/*
5184		 * check for expected condition
5185		 */
5186		if ((val & onbits1) == onbits1 && (val & offbits1) == 0) {
5187
5188			return (B_TRUE);
5189		}
5190
5191		/*
5192		 * check for error conditions
5193		 */
5194		if ((val & failure_onbits2) == failure_onbits2 &&
5195		    (val & failure_offbits2) == 0) {
5196
5197			return (B_FALSE);
5198		}
5199
5200		if ((val & failure_onbits3) == failure_onbits3 &&
5201		    (val & failure_offbits3) == 0) {
5202
5203			return (B_FALSE);
5204		}
5205
5206		/*
5207		 * store the start time and calculate the end
5208		 * time.  also calculate "start_sleep" which is
5209		 * the point after which the driver will stop busy
5210		 * waiting and change to sleep waiting.
5211		 */
5212		if (first_time) {
5213			first_time = B_FALSE;
5214			/*
5215			 * start and end are in nanoseconds
5216			 */
5217			cur = start = gethrtime();
5218			end = start + timeout_usec * 1000;
5219			/*
5220			 * add 1 ms to start
5221			 */
5222			start_sleep =  start + 250000;
5223
5224			if (servicing_interrupt()) {
5225				type_wait = NV_NOSLEEP;
5226			}
5227		} else {
5228			cur = gethrtime();
5229		}
5230
5231		if (cur > end) {
5232
5233			break;
5234		}
5235
5236		if ((type_wait != NV_NOSLEEP) && (cur > start_sleep)) {
5237#if ! defined(__lock_lint)
5238			delay(1);
5239#endif
5240		} else {
5241			drv_usecwait(nv_usec_delay);
5242		}
5243	}
5244
5245	return (B_FALSE);
5246}
5247
5248
5249/*
5250 * nv_port_state_change() reports the state of the port to the
5251 * sata module by calling sata_hba_event_notify().  This
5252 * function is called any time the state of the port is changed
5253 */
5254static void
5255nv_port_state_change(nv_port_t *nvp, int event, uint8_t addr_type, int state)
5256{
5257	sata_device_t sd;
5258
5259	NVLOG((NVDBG_EVENT, nvp->nvp_ctlp, nvp,
5260	    "nv_port_state_change: event 0x%x type 0x%x state 0x%x "
5261	    "time %ld (ticks)", event, addr_type, state, ddi_get_lbolt()));
5262
5263	bzero((void *)&sd, sizeof (sata_device_t));
5264	sd.satadev_rev = SATA_DEVICE_REV;
5265	nv_copy_registers(nvp, &sd, NULL);
5266
5267	/*
5268	 * When NCQ is implemented sactive and snotific field need to be
5269	 * updated.
5270	 */
5271	sd.satadev_addr.cport = nvp->nvp_port_num;
5272	sd.satadev_addr.qual = addr_type;
5273	sd.satadev_state = state;
5274
5275	sata_hba_event_notify(nvp->nvp_ctlp->nvc_dip, &sd, event);
5276}
5277
5278
5279
5280/*
5281 * Monitor reset progress and signature gathering.
5282 * This function may loop, so it should not be called from interrupt
5283 * context.
5284 *
5285 * Entered with nvp mutex held.
5286 */
5287static void
5288nv_monitor_reset(nv_port_t *nvp)
5289{
5290	ddi_acc_handle_t bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5];
5291	uint32_t sstatus;
5292	int send_notification = B_FALSE;
5293	uint8_t dev_type;
5294
5295	sstatus = nv_get32(bar5_hdl, nvp->nvp_sstatus);
5296
5297	/*
5298	 * We do not know here the reason for port reset.
5299	 * Check the link status. The link needs to be active before
5300	 * we can check the link's status.
5301	 */
5302	if ((SSTATUS_GET_IPM(sstatus) != SSTATUS_IPM_ACTIVE) ||
5303	    (SSTATUS_GET_DET(sstatus) != SSTATUS_DET_DEVPRE_PHYCOM)) {
5304		/*
5305		 * Either link is not active or there is no device
5306		 * If the link remains down for more than NV_LINK_DOWN_TIMEOUT
5307		 * (milliseconds), abort signature acquisition and complete
5308		 * reset processing.
5309		 * The link will go down when COMRESET is sent by nv_reset(),
5310		 * so it is practically nvp_reset_time milliseconds.
5311		 */
5312
5313		if (TICK_TO_MSEC(ddi_get_lbolt() - nvp->nvp_reset_time) >=
5314		    NV_LINK_DOWN_TIMEOUT) {
5315			NVLOG((NVDBG_RESET, nvp->nvp_ctlp, nvp,
5316			    "nv_monitor_reset: no link - ending signature "
5317			    "acquisition; time after reset %ldms",
5318			    TICK_TO_MSEC(ddi_get_lbolt() -
5319			    nvp->nvp_reset_time)));
5320		}
5321		nvp->nvp_state &= ~(NV_PORT_RESET | NV_PORT_RESET_RETRY |
5322		    NV_PORT_PROBE | NV_PORT_HOTPLUG_DELAY);
5323		/*
5324		 * Else, if the link was lost (i.e. was present before)
5325		 * the controller should generate a 'remove' interrupt
5326		 * that will cause the appropriate event notification.
5327		 */
5328		return;
5329	}
5330
5331	NVLOG((NVDBG_RESET, nvp->nvp_ctlp, nvp,
5332	    "nv_monitor_reset: link up after reset; time %ldms",
5333	    TICK_TO_MSEC(ddi_get_lbolt() - nvp->nvp_reset_time)));
5334
5335sig_read:
5336	if (nvp->nvp_signature != 0) {
5337		/*
5338		 * The link is up. The signature was acquired before (device
5339		 * was present).
5340		 * But we may need to wait for the signature (D2H FIS) before
5341		 * accessing the drive.
5342		 */
5343		if (nv_wait_for_signature != 0) {
5344			uint32_t old_signature;
5345			uint8_t old_type;
5346
5347			old_signature = nvp->nvp_signature;
5348			old_type = nvp->nvp_type;
5349			nvp->nvp_signature = 0;
5350			nv_read_signature(nvp);
5351			if (nvp->nvp_signature == 0) {
5352				nvp->nvp_signature = old_signature;
5353				nvp->nvp_type = old_type;
5354
5355#ifdef NV_DEBUG
5356				/* FOR DEBUGGING */
5357				if (nv_wait_here_forever) {
5358					drv_usecwait(1000);
5359					goto sig_read;
5360				}
5361#endif
5362				/*
5363				 * Wait, but not endlessly.
5364				 */
5365				if (TICK_TO_MSEC(ddi_get_lbolt() -
5366				    nvp->nvp_reset_time) <
5367				    nv_sig_acquisition_time) {
5368					drv_usecwait(1000);
5369					goto sig_read;
5370				} else if (!(nvp->nvp_state &
5371				    NV_PORT_RESET_RETRY)) {
5372					/*
5373					 * Retry reset.
5374					 */
5375					NVLOG((NVDBG_RESET, nvp->nvp_ctlp, nvp,
5376					    "nv_monitor_reset: retrying reset "
5377					    "time after first reset: %ldms",
5378					    TICK_TO_MSEC(ddi_get_lbolt() -
5379					    nvp->nvp_reset_time)));
5380					nvp->nvp_state |= NV_PORT_RESET_RETRY;
5381					nv_reset(nvp);
5382					goto sig_read;
5383				}
5384
5385				NVLOG((NVDBG_RESET, nvp->nvp_ctlp, nvp,
5386				    "nv_monitor_reset: terminating signature "
5387				    "acquisition (1); time after reset: %ldms",
5388				    TICK_TO_MSEC(ddi_get_lbolt() -
5389				    nvp->nvp_reset_time)));
5390			} else {
5391				NVLOG((NVDBG_RESET, nvp->nvp_ctlp, nvp,
5392				    "nv_monitor_reset: signature acquired; "
5393				    "time after reset: %ldms",
5394				    TICK_TO_MSEC(ddi_get_lbolt() -
5395				    nvp->nvp_reset_time)));
5396			}
5397		}
5398		/*
5399		 * Clear reset state, set device reset recovery state
5400		 */
5401		nvp->nvp_state &= ~(NV_PORT_RESET | NV_PORT_RESET_RETRY |
5402		    NV_PORT_PROBE);
5403		nvp->nvp_state |= NV_PORT_RESTORE;
5404
5405		/*
5406		 * Need to send reset event notification
5407		 */
5408		send_notification = B_TRUE;
5409	} else {
5410		/*
5411		 * The link is up. The signature was not acquired before.
5412		 * We can try to fetch a device signature.
5413		 */
5414		dev_type = nvp->nvp_type;
5415
5416acquire_signature:
5417		nv_read_signature(nvp);
5418		if (nvp->nvp_signature != 0) {
5419			/*
5420			 * Got device signature.
5421			 */
5422			NVLOG((NVDBG_RESET, nvp->nvp_ctlp, nvp,
5423			    "nv_monitor_reset: signature acquired; "
5424			    "time after reset: %ldms",
5425			    TICK_TO_MSEC(ddi_get_lbolt() -
5426			    nvp->nvp_reset_time)));
5427
5428			/* Clear internal reset state */
5429			nvp->nvp_state &=
5430			    ~(NV_PORT_RESET | NV_PORT_RESET_RETRY);
5431
5432			if (dev_type != SATA_DTYPE_NONE) {
5433				/*
5434				 * We acquired the signature for a
5435				 * pre-existing device that was not identified
5436				 * before and and was reset.
5437				 * Need to enter the device reset recovery
5438				 * state and to send the reset notification.
5439				 */
5440				nvp->nvp_state |= NV_PORT_RESTORE;
5441				send_notification = B_TRUE;
5442			} else {
5443				/*
5444				 * Else, We acquired the signature because a new
5445				 * device was attached (the driver attach or
5446				 * a hot-plugged device). There is no need to
5447				 * enter the device reset recovery state or to
5448				 * send the reset notification, but we may need
5449				 * to send a device attached notification.
5450				 */
5451				if (nvp->nvp_state & NV_PORT_PROBE) {
5452					nv_port_state_change(nvp,
5453					    SATA_EVNT_DEVICE_ATTACHED,
5454					    SATA_ADDR_CPORT, 0);
5455					nvp->nvp_state &= ~NV_PORT_PROBE;
5456				}
5457			}
5458		} else {
5459			if (TICK_TO_MSEC(ddi_get_lbolt() -
5460			    nvp->nvp_reset_time) < nv_sig_acquisition_time) {
5461				drv_usecwait(1000);
5462				goto acquire_signature;
5463			} else if (!(nvp->nvp_state & NV_PORT_RESET_RETRY)) {
5464				/*
5465				 * Some drives may require additional
5466				 * reset(s) to get a valid signature
5467				 * (indicating that the drive is ready).
5468				 * If a drive was not just powered
5469				 * up, the signature should be available
5470				 * within few hundred milliseconds
5471				 * after reset.  Therefore, if more than
5472				 * NV_SIG_ACQUISITION_TIME has elapsed
5473				 * while waiting for a signature, reset
5474				 * device again.
5475				 */
5476				NVLOG((NVDBG_RESET, nvp->nvp_ctlp, nvp,
5477				    "nv_monitor_reset: retrying reset "
5478				    "time after first reset: %ldms",
5479				    TICK_TO_MSEC(ddi_get_lbolt() -
5480				    nvp->nvp_reset_time)));
5481				nvp->nvp_state |= NV_PORT_RESET_RETRY;
5482				nv_reset(nvp);
5483				drv_usecwait(1000);
5484				goto acquire_signature;
5485			}
5486			/*
5487			 * Terminating signature acquisition.
5488			 * Hopefully, the drive is ready.
5489			 * The SATA module can deal with this as long as it
5490			 * knows that some device is attached and a device
5491			 * responds to commands.
5492			 */
5493			if (!(nvp->nvp_state & NV_PORT_PROBE)) {
5494				send_notification = B_TRUE;
5495			}
5496			nvp->nvp_state &= ~(NV_PORT_RESET |
5497			    NV_PORT_RESET_RETRY);
5498			nvp->nvp_type = SATA_DTYPE_UNKNOWN;
5499			if (nvp->nvp_state & NV_PORT_PROBE) {
5500				nv_port_state_change(nvp,
5501				    SATA_EVNT_DEVICE_ATTACHED,
5502				    SATA_ADDR_CPORT, 0);
5503				nvp->nvp_state &= ~NV_PORT_PROBE;
5504			}
5505			nvp->nvp_type = dev_type;
5506			NVLOG((NVDBG_RESET, nvp->nvp_ctlp, nvp,
5507			    "nv_monitor_reset: terminating signature "
5508			    "acquisition (2); time after reset: %ldms",
5509			    TICK_TO_MSEC(ddi_get_lbolt() -
5510			    nvp->nvp_reset_time)));
5511		}
5512	}
5513
5514	if (send_notification) {
5515		nv_port_state_change(nvp, SATA_EVNT_DEVICE_RESET,
5516		    SATA_ADDR_DCPORT,
5517		    SATA_DSTATE_RESET | SATA_DSTATE_PWR_ACTIVE);
5518	}
5519
5520#ifdef SGPIO_SUPPORT
5521	if (nvp->nvp_type == SATA_DTYPE_ATADISK) {
5522		nv_sgp_drive_connect(nvp->nvp_ctlp, SGP_CTLR_PORT_TO_DRV(
5523		    nvp->nvp_ctlp->nvc_ctlr_num, nvp->nvp_port_num));
5524	} else {
5525		nv_sgp_drive_disconnect(nvp->nvp_ctlp, SGP_CTLR_PORT_TO_DRV(
5526		    nvp->nvp_ctlp->nvc_ctlr_num, nvp->nvp_port_num));
5527	}
5528#endif
5529}
5530
5531
5532/*
5533 * Send a hotplug (add device) notification at the appropriate time after
5534 * hotplug detection.
5535 * Relies on nvp_reset_time set at a hotplug detection time.
5536 * Called only from nv_timeout when NV_PORT_HOTPLUG_DELAY flag is set in
5537 * the nvp_state.
5538 */
5539static void
5540nv_delay_hotplug_notification(nv_port_t *nvp)
5541{
5542
5543	if (TICK_TO_MSEC(ddi_get_lbolt() - nvp->nvp_reset_time) >=
5544	    nv_hotplug_delay) {
5545		NVLOG((NVDBG_RESET, nvp->nvp_ctlp, nvp,
5546		    "nv_delay_hotplug_notification: notifying framework after "
5547		    "%dms delay", TICK_TO_MSEC(ddi_get_lbolt() -
5548		    nvp->nvp_reset_time)));
5549		nvp->nvp_state &= ~NV_PORT_HOTPLUG_DELAY;
5550		nv_port_state_change(nvp, SATA_EVNT_DEVICE_ATTACHED,
5551		    SATA_ADDR_CPORT, 0);
5552	}
5553}
5554
5555/*
5556 * timeout processing:
5557 *
5558 * Check if any packets have crossed a timeout threshold.  If so,
5559 * abort the packet.  This function is not NCQ-aware.
5560 *
5561 * If reset was invoked, call reset monitoring function.
5562 *
5563 * Timeout frequency may be lower for checking packet timeout (1s)
5564 * and higher for reset monitoring (1ms)
5565 *
5566 */
5567static void
5568nv_timeout(void *arg)
5569{
5570	nv_port_t *nvp = arg;
5571	nv_slot_t *nv_slotp;
5572	int next_timeout = NV_ONE_SEC;	/* Default */
5573	uint16_t int_status;
5574	uint8_t status, bmstatus;
5575	static int intr_warn_once = 0;
5576
5577	ASSERT(nvp != NULL);
5578
5579	mutex_enter(&nvp->nvp_mutex);
5580	nvp->nvp_timeout_id = 0;
5581
5582	/*
5583	 * If the port is not in the init state, ignore it.
5584	 */
5585	if ((nvp->nvp_state & NV_PORT_INIT) == 0) {
5586		NVLOG((NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp,
5587		    "nv_timeout: port uninitialized"));
5588		next_timeout = 0;
5589
5590		goto finished;
5591	}
5592
5593	if (nvp->nvp_state & (NV_PORT_RESET | NV_PORT_RESET_RETRY)) {
5594		nv_monitor_reset(nvp);
5595		next_timeout = NV_ONE_MSEC;	/* at least 1ms */
5596
5597		goto finished;
5598	}
5599
5600	if ((nvp->nvp_state & NV_PORT_HOTPLUG_DELAY) != 0) {
5601		nv_delay_hotplug_notification(nvp);
5602		next_timeout = NV_ONE_MSEC;	/* at least 1ms */
5603
5604		goto finished;
5605	}
5606
5607	/*
5608	 * Not yet NCQ-aware - there is only one command active.
5609	 */
5610	nv_slotp = &(nvp->nvp_slot[0]);
5611
5612	/*
5613	 * perform timeout checking and processing only if there is an
5614	 * active packet on the port
5615	 */
5616	if (nv_slotp != NULL && nv_slotp->nvslot_spkt != NULL)  {
5617		sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
5618		sata_cmd_t *satacmd = &spkt->satapkt_cmd;
5619		uint8_t cmd = satacmd->satacmd_cmd_reg;
5620		uint64_t lba;
5621
5622#if ! defined(__lock_lint) && defined(DEBUG)
5623
5624		lba = (uint64_t)satacmd->satacmd_lba_low_lsb |
5625		    ((uint64_t)satacmd->satacmd_lba_mid_lsb << 8) |
5626		    ((uint64_t)satacmd->satacmd_lba_high_lsb << 16) |
5627		    ((uint64_t)satacmd->satacmd_lba_low_msb << 24) |
5628		    ((uint64_t)satacmd->satacmd_lba_mid_msb << 32) |
5629		    ((uint64_t)satacmd->satacmd_lba_high_msb << 40);
5630#endif
5631
5632		/*
5633		 * timeout not needed if there is a polling thread
5634		 */
5635		if (spkt->satapkt_op_mode & SATA_OPMODE_POLLING) {
5636			next_timeout = 0;
5637
5638			goto finished;
5639		}
5640
5641		if (TICK_TO_SEC(ddi_get_lbolt() - nv_slotp->nvslot_stime) >
5642		    spkt->satapkt_time) {
5643
5644			uint32_t serr = nv_get32(nvp->nvp_ctlp->nvc_bar_hdl[5],
5645			    nvp->nvp_serror);
5646
5647			NVLOG((NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp,
5648			    "nv_timeout: aborting: "
5649			    "nvslot_stime: %ld max ticks till timeout: "
5650			    "%ld cur_time: %ld cmd=%x lba=%d",
5651			    nv_slotp->nvslot_stime,
5652			    drv_usectohz(MICROSEC *
5653			    spkt->satapkt_time), ddi_get_lbolt(),
5654			    cmd, lba));
5655
5656			NVLOG((NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp,
5657			    "nv_timeout: SError at timeout: 0x%x", serr));
5658
5659			NVLOG((NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp,
5660			    "nv_timeout: previous cmd=%x",
5661			    nvp->nvp_previous_cmd));
5662
5663			if (nvp->nvp_mcp5x_int_status != NULL) {
5664				status = nv_get8(nvp->nvp_ctl_hdl,
5665				    nvp->nvp_altstatus);
5666				bmstatus = nv_get8(nvp->nvp_bm_hdl,
5667				    nvp->nvp_bmisx);
5668				int_status = nv_get16(
5669				    nvp->nvp_ctlp->nvc_bar_hdl[5],
5670				    nvp->nvp_mcp5x_int_status);
5671				NVLOG((NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp,
5672				    "nv_timeout: altstatus %x, bmicx %x, "
5673				    "int_status %X", status, bmstatus,
5674				    int_status));
5675
5676				if (int_status & MCP5X_INT_COMPLETE) {
5677					/*
5678					 * Completion interrupt was missed!
5679					 * Issue warning message once
5680					 */
5681					if (!intr_warn_once) {
5682						cmn_err(CE_WARN,
5683						    "nv_sata: missing command "
5684						    "completion interrupt(s)!");
5685						intr_warn_once = 1;
5686					}
5687					NVLOG((NVDBG_TIMEOUT, nvp->nvp_ctlp,
5688					    nvp, "timeout detected with "
5689					    "interrupt ready - calling "
5690					    "int directly"));
5691					mutex_exit(&nvp->nvp_mutex);
5692					(void) mcp5x_intr_port(nvp);
5693					mutex_enter(&nvp->nvp_mutex);
5694				} else {
5695					/*
5696					 * True timeout and not a missing
5697					 * interrupt.
5698					 */
5699					(void) nv_abort_active(nvp, spkt,
5700					    SATA_PKT_TIMEOUT, B_TRUE);
5701				}
5702			} else {
5703				(void) nv_abort_active(nvp, spkt,
5704				    SATA_PKT_TIMEOUT, B_TRUE);
5705			}
5706
5707		} else {
5708#ifdef NV_DEBUG
5709			if (nv_debug_flags & NVDBG_VERBOSE) {
5710				NVLOG((NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp,
5711				    "nv_timeout:"
5712				    " still in use so restarting timeout"));
5713			}
5714#endif
5715			next_timeout = NV_ONE_SEC;
5716		}
5717	} else {
5718		/*
5719		 * there was no active packet, so do not re-enable timeout
5720		 */
5721		next_timeout = 0;
5722#ifdef NV_DEBUG
5723		if (nv_debug_flags & NVDBG_VERBOSE) {
5724			NVLOG((NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp,
5725			    "nv_timeout: no active packet so not re-arming "
5726			    "timeout"));
5727		}
5728#endif
5729	}
5730
5731finished:
5732	if (next_timeout != 0) {
5733		nv_setup_timeout(nvp, next_timeout);
5734	}
5735	mutex_exit(&nvp->nvp_mutex);
5736}
5737
5738
5739/*
5740 * enable or disable the 3 interrupt types the driver is
5741 * interested in: completion, add and remove.
5742 */
5743static void
5744ck804_set_intr(nv_port_t *nvp, int flag)
5745{
5746	nv_ctl_t *nvc = nvp->nvp_ctlp;
5747	ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5];
5748	uchar_t *bar5  = nvc->nvc_bar_addr[5];
5749	uint8_t intr_bits[] = { CK804_INT_PDEV_HOT|CK804_INT_PDEV_INT,
5750	    CK804_INT_SDEV_HOT|CK804_INT_SDEV_INT };
5751	uint8_t clear_all_bits[] = { CK804_INT_PDEV_ALL, CK804_INT_SDEV_ALL };
5752	uint8_t int_en, port = nvp->nvp_port_num, intr_status;
5753
5754	if (flag & NV_INTR_DISABLE_NON_BLOCKING) {
5755		int_en = nv_get8(bar5_hdl,
5756		    (uint8_t *)(bar5 + CK804_SATA_INT_EN));
5757		int_en &= ~intr_bits[port];
5758		nv_put8(bar5_hdl, (uint8_t *)(bar5 + CK804_SATA_INT_EN),
5759		    int_en);
5760		return;
5761	}
5762
5763	ASSERT(mutex_owned(&nvp->nvp_mutex));
5764
5765	/*
5766	 * controller level lock also required since access to an 8-bit
5767	 * interrupt register is shared between both channels.
5768	 */
5769	mutex_enter(&nvc->nvc_mutex);
5770
5771	if (flag & NV_INTR_CLEAR_ALL) {
5772		NVLOG((NVDBG_INTR, nvc, nvp,
5773		    "ck804_set_intr: NV_INTR_CLEAR_ALL"));
5774
5775		intr_status = nv_get8(nvc->nvc_bar_hdl[5],
5776		    (uint8_t *)(nvc->nvc_ck804_int_status));
5777
5778		if (intr_status & clear_all_bits[port]) {
5779
5780			nv_put8(nvc->nvc_bar_hdl[5],
5781			    (uint8_t *)(nvc->nvc_ck804_int_status),
5782			    clear_all_bits[port]);
5783
5784			NVLOG((NVDBG_INTR, nvc, nvp,
5785			    "interrupt bits cleared %x",
5786			    intr_status & clear_all_bits[port]));
5787		}
5788	}
5789
5790	if (flag & NV_INTR_DISABLE) {
5791		NVLOG((NVDBG_INTR, nvc, nvp,
5792		    "ck804_set_intr: NV_INTR_DISABLE"));
5793		int_en = nv_get8(bar5_hdl,
5794		    (uint8_t *)(bar5 + CK804_SATA_INT_EN));
5795		int_en &= ~intr_bits[port];
5796		nv_put8(bar5_hdl, (uint8_t *)(bar5 + CK804_SATA_INT_EN),
5797		    int_en);
5798	}
5799
5800	if (flag & NV_INTR_ENABLE) {
5801		NVLOG((NVDBG_INTR, nvc, nvp, "ck804_set_intr: NV_INTR_ENABLE"));
5802		int_en = nv_get8(bar5_hdl,
5803		    (uint8_t *)(bar5 + CK804_SATA_INT_EN));
5804		int_en |= intr_bits[port];
5805		nv_put8(bar5_hdl, (uint8_t *)(bar5 + CK804_SATA_INT_EN),
5806		    int_en);
5807	}
5808
5809	mutex_exit(&nvc->nvc_mutex);
5810}
5811
5812
5813/*
5814 * enable or disable the 3 interrupts the driver is interested in:
5815 * completion interrupt, hot add, and hot remove interrupt.
5816 */
5817static void
5818mcp5x_set_intr(nv_port_t *nvp, int flag)
5819{
5820	nv_ctl_t *nvc = nvp->nvp_ctlp;
5821	ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5];
5822	uint16_t intr_bits =
5823	    MCP5X_INT_ADD|MCP5X_INT_REM|MCP5X_INT_COMPLETE;
5824	uint16_t int_en;
5825
5826	if (flag & NV_INTR_DISABLE_NON_BLOCKING) {
5827		int_en = nv_get16(bar5_hdl, nvp->nvp_mcp5x_int_ctl);
5828		int_en &= ~intr_bits;
5829		nv_put16(bar5_hdl, nvp->nvp_mcp5x_int_ctl, int_en);
5830		return;
5831	}
5832
5833	ASSERT(mutex_owned(&nvp->nvp_mutex));
5834
5835	NVLOG((NVDBG_HOT, nvc, nvp, "mcp055_set_intr: enter flag: %d", flag));
5836
5837	if (flag & NV_INTR_CLEAR_ALL) {
5838		NVLOG((NVDBG_INTR, nvc, nvp,
5839		    "mcp5x_set_intr: NV_INTR_CLEAR_ALL"));
5840		nv_put16(bar5_hdl, nvp->nvp_mcp5x_int_status, MCP5X_INT_CLEAR);
5841	}
5842
5843	if (flag & NV_INTR_ENABLE) {
5844		NVLOG((NVDBG_INTR, nvc, nvp, "mcp5x_set_intr: NV_INTR_ENABLE"));
5845		int_en = nv_get16(bar5_hdl, nvp->nvp_mcp5x_int_ctl);
5846		int_en |= intr_bits;
5847		nv_put16(bar5_hdl, nvp->nvp_mcp5x_int_ctl, int_en);
5848	}
5849
5850	if (flag & NV_INTR_DISABLE) {
5851		NVLOG((NVDBG_INTR, nvc, nvp,
5852		    "mcp5x_set_intr: NV_INTR_DISABLE"));
5853		int_en = nv_get16(bar5_hdl, nvp->nvp_mcp5x_int_ctl);
5854		int_en &= ~intr_bits;
5855		nv_put16(bar5_hdl, nvp->nvp_mcp5x_int_ctl, int_en);
5856	}
5857}
5858
5859
5860static void
5861nv_resume(nv_port_t *nvp)
5862{
5863	NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp, "nv_resume()"));
5864
5865	mutex_enter(&nvp->nvp_mutex);
5866
5867	if (nvp->nvp_state & NV_PORT_INACTIVE) {
5868		mutex_exit(&nvp->nvp_mutex);
5869
5870		return;
5871	}
5872
5873	/* Enable interrupt */
5874	(*(nvp->nvp_ctlp->nvc_set_intr))(nvp, NV_INTR_CLEAR_ALL|NV_INTR_ENABLE);
5875
5876	/*
5877	 * Power may have been removed to the port and the
5878	 * drive, and/or a drive may have been added or removed.
5879	 * Force a reset which will cause a probe and re-establish
5880	 * any state needed on the drive.
5881	 */
5882	nvp->nvp_state |= NV_PORT_RESET;
5883	nvp->nvp_state &= ~(NV_PORT_RESTORE | NV_PORT_RESET_RETRY);
5884	nv_reset(nvp);
5885
5886	mutex_exit(&nvp->nvp_mutex);
5887}
5888
5889
5890static void
5891nv_suspend(nv_port_t *nvp)
5892{
5893	NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp, "nv_suspend()"));
5894
5895	mutex_enter(&nvp->nvp_mutex);
5896
5897#ifdef SGPIO_SUPPORT
5898	if (nvp->nvp_type == SATA_DTYPE_ATADISK) {
5899		nv_sgp_drive_disconnect(nvp->nvp_ctlp, SGP_CTLR_PORT_TO_DRV(
5900		    nvp->nvp_ctlp->nvc_ctlr_num, nvp->nvp_port_num));
5901	}
5902#endif
5903
5904	if (nvp->nvp_state & NV_PORT_INACTIVE) {
5905		mutex_exit(&nvp->nvp_mutex);
5906
5907		return;
5908	}
5909
5910	/*
5911	 * Stop the timeout handler.
5912	 * (It will be restarted in nv_reset() during nv_resume().)
5913	 */
5914	if (nvp->nvp_timeout_id) {
5915		(void) untimeout(nvp->nvp_timeout_id);
5916		nvp->nvp_timeout_id = 0;
5917	}
5918
5919	/* Disable interrupt */
5920	(*(nvp->nvp_ctlp->nvc_set_intr))(nvp,
5921	    NV_INTR_CLEAR_ALL|NV_INTR_DISABLE);
5922
5923	mutex_exit(&nvp->nvp_mutex);
5924}
5925
5926
5927static void
5928nv_copy_registers(nv_port_t *nvp, sata_device_t *sd, sata_pkt_t *spkt)
5929{
5930	ddi_acc_handle_t bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5];
5931	sata_cmd_t *scmd = &spkt->satapkt_cmd;
5932	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
5933	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
5934	uchar_t status;
5935	struct sata_cmd_flags flags;
5936
5937	sd->satadev_scr.sstatus = nv_get32(bar5_hdl, nvp->nvp_sstatus);
5938	sd->satadev_scr.serror = nv_get32(bar5_hdl, nvp->nvp_serror);
5939	sd->satadev_scr.scontrol = nv_get32(bar5_hdl, nvp->nvp_sctrl);
5940
5941	if (spkt == NULL) {
5942
5943		return;
5944	}
5945
5946	/*
5947	 * in the error case, implicitly set the return of regs needed
5948	 * for error handling.
5949	 */
5950	status = scmd->satacmd_status_reg = nv_get8(ctlhdl,
5951	    nvp->nvp_altstatus);
5952
5953	flags = scmd->satacmd_flags;
5954
5955	if (status & SATA_STATUS_ERR) {
5956		flags.sata_copy_out_lba_low_msb = B_TRUE;
5957		flags.sata_copy_out_lba_mid_msb = B_TRUE;
5958		flags.sata_copy_out_lba_high_msb = B_TRUE;
5959		flags.sata_copy_out_lba_low_lsb = B_TRUE;
5960		flags.sata_copy_out_lba_mid_lsb = B_TRUE;
5961		flags.sata_copy_out_lba_high_lsb = B_TRUE;
5962		flags.sata_copy_out_error_reg = B_TRUE;
5963		flags.sata_copy_out_sec_count_msb = B_TRUE;
5964		flags.sata_copy_out_sec_count_lsb = B_TRUE;
5965		scmd->satacmd_status_reg = status;
5966	}
5967
5968	if (scmd->satacmd_addr_type & ATA_ADDR_LBA48) {
5969
5970		/*
5971		 * set HOB so that high byte will be read
5972		 */
5973		nv_put8(ctlhdl, nvp->nvp_devctl, ATDC_HOB|ATDC_D3);
5974
5975		/*
5976		 * get the requested high bytes
5977		 */
5978		if (flags.sata_copy_out_sec_count_msb) {
5979			scmd->satacmd_sec_count_msb =
5980			    nv_get8(cmdhdl, nvp->nvp_count);
5981		}
5982
5983		if (flags.sata_copy_out_lba_low_msb) {
5984			scmd->satacmd_lba_low_msb =
5985			    nv_get8(cmdhdl, nvp->nvp_sect);
5986		}
5987
5988		if (flags.sata_copy_out_lba_mid_msb) {
5989			scmd->satacmd_lba_mid_msb =
5990			    nv_get8(cmdhdl, nvp->nvp_lcyl);
5991		}
5992
5993		if (flags.sata_copy_out_lba_high_msb) {
5994			scmd->satacmd_lba_high_msb =
5995			    nv_get8(cmdhdl, nvp->nvp_hcyl);
5996		}
5997	}
5998
5999	/*
6000	 * disable HOB so that low byte is read
6001	 */
6002	nv_put8(ctlhdl, nvp->nvp_devctl, ATDC_D3);
6003
6004	/*
6005	 * get the requested low bytes
6006	 */
6007	if (flags.sata_copy_out_sec_count_lsb) {
6008		scmd->satacmd_sec_count_lsb = nv_get8(cmdhdl, nvp->nvp_count);
6009	}
6010
6011	if (flags.sata_copy_out_lba_low_lsb) {
6012		scmd->satacmd_lba_low_lsb = nv_get8(cmdhdl, nvp->nvp_sect);
6013	}
6014
6015	if (flags.sata_copy_out_lba_mid_lsb) {
6016		scmd->satacmd_lba_mid_lsb = nv_get8(cmdhdl, nvp->nvp_lcyl);
6017	}
6018
6019	if (flags.sata_copy_out_lba_high_lsb) {
6020		scmd->satacmd_lba_high_lsb = nv_get8(cmdhdl, nvp->nvp_hcyl);
6021	}
6022
6023	/*
6024	 * get the device register if requested
6025	 */
6026	if (flags.sata_copy_out_device_reg) {
6027		scmd->satacmd_device_reg =  nv_get8(cmdhdl, nvp->nvp_drvhd);
6028	}
6029
6030	/*
6031	 * get the error register if requested
6032	 */
6033	if (flags.sata_copy_out_error_reg) {
6034		scmd->satacmd_error_reg = nv_get8(cmdhdl, nvp->nvp_error);
6035	}
6036}
6037
6038
6039/*
6040 * Hot plug and remove interrupts can occur when the device is reset.  Just
6041 * masking the interrupt doesn't always work well because if a
6042 * different interrupt arrives on the other port, the driver can still
6043 * end up checking the state of the other port and discover the hot
6044 * interrupt flag is set even though it was masked.  Checking for recent
6045 * reset activity and then ignoring turns out to be the easiest way.
6046 *
6047 * Entered with nvp mutex held.
6048 */
6049static void
6050nv_report_add_remove(nv_port_t *nvp, int flags)
6051{
6052	ddi_acc_handle_t bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5];
6053	uint32_t sstatus;
6054	int i;
6055	clock_t nv_lbolt = ddi_get_lbolt();
6056
6057
6058	NVLOG((NVDBG_HOT, nvp->nvp_ctlp, nvp, "nv_report_add_remove() - "
6059	    "time (ticks) %d", nv_lbolt));
6060
6061	/*
6062	 * wait up to 1ms for sstatus to settle and reflect the true
6063	 * status of the port.  Failure to do so can create confusion
6064	 * in probe, where the incorrect sstatus value can still
6065	 * persist.
6066	 */
6067	for (i = 0; i < 1000; i++) {
6068		sstatus = nv_get32(bar5_hdl, nvp->nvp_sstatus);
6069
6070		if ((flags == NV_PORT_HOTREMOVED) &&
6071		    ((sstatus & SSTATUS_DET_DEVPRE_PHYCOM) !=
6072		    SSTATUS_DET_DEVPRE_PHYCOM)) {
6073			break;
6074		}
6075
6076		if ((flags != NV_PORT_HOTREMOVED) &&
6077		    ((sstatus & SSTATUS_DET_DEVPRE_PHYCOM) ==
6078		    SSTATUS_DET_DEVPRE_PHYCOM)) {
6079			break;
6080		}
6081		drv_usecwait(1);
6082	}
6083
6084	NVLOG((NVDBG_HOT, nvp->nvp_ctlp, nvp,
6085	    "sstatus took %d us for DEVPRE_PHYCOM to settle", i));
6086
6087	if (flags == NV_PORT_HOTREMOVED) {
6088
6089		(void) nv_abort_active(nvp, NULL, SATA_PKT_PORT_ERROR,
6090		    B_FALSE);
6091
6092		/*
6093		 * No device, no point of bothering with device reset
6094		 */
6095		nvp->nvp_type = SATA_DTYPE_NONE;
6096		nvp->nvp_signature = 0;
6097		nvp->nvp_state &= ~(NV_PORT_RESET | NV_PORT_RESET_RETRY |
6098		    NV_PORT_RESTORE);
6099		NVLOG((NVDBG_HOT, nvp->nvp_ctlp, nvp,
6100		    "nv_report_add_remove() hot removed"));
6101		nv_port_state_change(nvp,
6102		    SATA_EVNT_DEVICE_DETACHED,
6103		    SATA_ADDR_CPORT, 0);
6104
6105#ifdef SGPIO_SUPPORT
6106		nv_sgp_drive_disconnect(nvp->nvp_ctlp, SGP_CTLR_PORT_TO_DRV(
6107		    nvp->nvp_ctlp->nvc_ctlr_num, nvp->nvp_port_num));
6108#endif
6109	} else {
6110		/*
6111		 * This is a hot plug or link up indication
6112		 * Now, re-check the link state - no link, no device
6113		 */
6114		if ((SSTATUS_GET_IPM(sstatus) == SSTATUS_IPM_ACTIVE) &&
6115		    (SSTATUS_GET_DET(sstatus) == SSTATUS_DET_DEVPRE_PHYCOM)) {
6116
6117			if (nvp->nvp_type == SATA_DTYPE_NONE) {
6118				/*
6119				 * Real device attach - there was no device
6120				 * attached to this port before this report
6121				 */
6122				NVLOG((NVDBG_HOT, nvp->nvp_ctlp, nvp,
6123				    "nv_report_add_remove() new device hot"
6124				    "plugged"));
6125				nvp->nvp_reset_time = ddi_get_lbolt();
6126				if (!(nvp->nvp_state &
6127				    (NV_PORT_RESET_RETRY | NV_PORT_RESET))) {
6128
6129					nvp->nvp_signature = 0;
6130					if (nv_reset_after_hotplug != 0) {
6131
6132						/*
6133						 * Send reset to obtain a device
6134						 * signature
6135						 */
6136						nvp->nvp_state |=
6137						    NV_PORT_RESET |
6138						    NV_PORT_PROBE;
6139						nv_reset(nvp);
6140						NVLOG((NVDBG_HOT,
6141						    nvp->nvp_ctlp, nvp,
6142						    "nv_report_add_remove() "
6143						    "resetting device"));
6144					} else {
6145						nvp->nvp_type =
6146						    SATA_DTYPE_UNKNOWN;
6147					}
6148				}
6149
6150				if (!(nvp->nvp_state & NV_PORT_PROBE)) {
6151					if (nv_reset_after_hotplug == 0) {
6152						/*
6153						 * In case a hotplug interrupt
6154						 * is generated right after a
6155						 * link is up, delay reporting
6156						 * a hotplug event to let the
6157						 * drive to initialize and to
6158						 * send a D2H FIS with a
6159						 * signature.
6160						 * The timeout will issue an
6161						 * event notification after
6162						 * the NV_HOTPLUG_DELAY
6163						 * milliseconds delay.
6164						 */
6165						nvp->nvp_state |=
6166						    NV_PORT_HOTPLUG_DELAY;
6167						nvp->nvp_type =
6168						    SATA_DTYPE_UNKNOWN;
6169						/*
6170						 * Make sure timer is running.
6171						 */
6172						nv_setup_timeout(nvp,
6173						    NV_ONE_MSEC);
6174					} else {
6175						nv_port_state_change(nvp,
6176						    SATA_EVNT_DEVICE_ATTACHED,
6177						    SATA_ADDR_CPORT, 0);
6178					}
6179				}
6180				return;
6181			}
6182			/*
6183			 * Othervise it is a bogus attach, indicating recovered
6184			 * link loss. No real need to report it after-the-fact.
6185			 * But we may keep some statistics, or notify the
6186			 * sata module by reporting LINK_LOST/LINK_ESTABLISHED
6187			 * events to keep track of such occurrences.
6188			 * Anyhow, we may want to terminate signature
6189			 * acquisition.
6190			 */
6191			NVLOG((NVDBG_HOT, nvp->nvp_ctlp, nvp,
6192			    "nv_report_add_remove() ignoring plug interrupt "
6193			    "- recovered link?"));
6194
6195			if (nvp->nvp_state &
6196			    (NV_PORT_RESET_RETRY | NV_PORT_RESET)) {
6197				NVLOG((NVDBG_HOT, nvp->nvp_ctlp, nvp,
6198				    "nv_report_add_remove() - "
6199				    "time since last reset %dms",
6200				    TICK_TO_MSEC(ddi_get_lbolt() -
6201				    nvp->nvp_reset_time)));
6202				/*
6203				 * If the driver does not have to wait for
6204				 * a signature, then terminate reset processing
6205				 * now.
6206				 */
6207				if (nv_wait_for_signature == 0) {
6208					NVLOG((NVDBG_RESET, nvp->nvp_ctlp,
6209					    nvp, "nv_report_add_remove() - ",
6210					    "terminating signature acquisition",
6211					    ", time after reset: %dms",
6212					    TICK_TO_MSEC(ddi_get_lbolt() -
6213					    nvp->nvp_reset_time)));
6214
6215					nvp->nvp_state &= ~(NV_PORT_RESET |
6216					    NV_PORT_RESET_RETRY);
6217
6218					if (!(nvp->nvp_state & NV_PORT_PROBE)) {
6219						nvp->nvp_state |=
6220						    NV_PORT_RESTORE;
6221						nvp->nvp_state &=
6222						    ~NV_PORT_PROBE;
6223
6224						/*
6225						 * It is not the initial device
6226						 * probing, so notify sata
6227						 * module that device was
6228						 * reset
6229						 */
6230						nv_port_state_change(nvp,
6231						    SATA_EVNT_DEVICE_RESET,
6232						    SATA_ADDR_DCPORT,
6233						    SATA_DSTATE_RESET |
6234						    SATA_DSTATE_PWR_ACTIVE);
6235					}
6236
6237				}
6238			}
6239			return;
6240		}
6241		NVLOG((NVDBG_HOT, nvp->nvp_ctlp, nvp, "nv_report_add_remove()"
6242		    "ignoring add dev interrupt - "
6243		    "link is down or no device!"));
6244	}
6245
6246}
6247
6248/*
6249 * Get request sense data and stuff it the command's sense buffer.
6250 * Start a request sense command in order to get sense data to insert
6251 * in the sata packet's rqsense buffer.  The command completion
6252 * processing is in nv_intr_pkt_pio.
6253 *
6254 * The sata framework provides a function to allocate and set-up a
6255 * request sense packet command. The reasons it is not being used here is:
6256 * a) it cannot be called in an interrupt context and this function is
6257 *    called in an interrupt context.
6258 * b) it allocates DMA resources that are not used here because this is
6259 *    implemented using PIO.
6260 *
6261 * If, in the future, this is changed to use DMA, the sata framework should
6262 * be used to allocate and set-up the error retrieval (request sense)
6263 * command.
6264 */
6265static int
6266nv_start_rqsense_pio(nv_port_t *nvp, nv_slot_t *nv_slotp)
6267{
6268	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
6269	sata_cmd_t *satacmd = &spkt->satapkt_cmd;
6270	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
6271	int cdb_len = spkt->satapkt_cmd.satacmd_acdb_len;
6272
6273	NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
6274	    "nv_start_rqsense_pio: start"));
6275
6276	/* clear the local request sense buffer before starting the command */
6277	bzero(nv_slotp->nvslot_rqsense_buff, SATA_ATAPI_RQSENSE_LEN);
6278
6279	/* Write the request sense PACKET command */
6280
6281	/* select the drive */
6282	nv_put8(cmdhdl, nvp->nvp_drvhd, satacmd->satacmd_device_reg);
6283
6284	/* make certain the drive selected */
6285	if (nv_wait(nvp, SATA_STATUS_DRDY, SATA_STATUS_BSY,
6286	    NV_SEC2USEC(5), 0) == B_FALSE) {
6287		NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
6288		    "nv_start_rqsense_pio: drive select failed"));
6289		return (NV_FAILURE);
6290	}
6291
6292	/* set up the command */
6293	nv_put8(cmdhdl, nvp->nvp_feature, 0);	/* deassert DMA and OVL */
6294	nv_put8(cmdhdl, nvp->nvp_hcyl, SATA_ATAPI_MAX_BYTES_PER_DRQ >> 8);
6295	nv_put8(cmdhdl, nvp->nvp_lcyl, SATA_ATAPI_MAX_BYTES_PER_DRQ & 0xff);
6296	nv_put8(cmdhdl, nvp->nvp_sect, 0);
6297	nv_put8(cmdhdl, nvp->nvp_count, 0);	/* no tag */
6298
6299	/* initiate the command by writing the command register last */
6300	nv_put8(cmdhdl, nvp->nvp_cmd, SATAC_PACKET);
6301
6302	/* Give the host ctlr time to do its thing, according to ATA/ATAPI */
6303	NV_DELAY_NSEC(400);
6304
6305	/*
6306	 * Wait for the device to indicate that it is ready for the command
6307	 * ATAPI protocol state - HP0: Check_Status_A
6308	 */
6309
6310	if (nv_wait3(nvp, SATA_STATUS_DRQ, SATA_STATUS_BSY, /* okay */
6311	    SATA_STATUS_ERR, SATA_STATUS_BSY, /* cmd failed */
6312	    SATA_STATUS_DF, SATA_STATUS_BSY, /* drive failed */
6313	    4000000, 0) == B_FALSE) {
6314		if (nv_get8(cmdhdl, nvp->nvp_status) &
6315		    (SATA_STATUS_ERR | SATA_STATUS_DF)) {
6316			NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
6317			    "nv_start_rqsense_pio: rqsense dev error (HP0)"));
6318		} else {
6319			NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
6320			    "nv_start_rqsense_pio: rqsense timeout (HP0)"));
6321		}
6322
6323		nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
6324		nv_complete_io(nvp, spkt, 0);
6325		nvp->nvp_state |= NV_PORT_RESET;
6326		nvp->nvp_state &= ~(NV_PORT_RESTORE | NV_PORT_RESET_RETRY);
6327		nv_reset(nvp);
6328
6329		return (NV_FAILURE);
6330	}
6331
6332	/*
6333	 * Put the ATAPI command in the data register
6334	 * ATAPI protocol state - HP1: Send_Packet
6335	 */
6336
6337	ddi_rep_put16(cmdhdl, (ushort_t *)nv_rqsense_cdb,
6338	    (ushort_t *)nvp->nvp_data,
6339	    (cdb_len >> 1), DDI_DEV_NO_AUTOINCR);
6340
6341	NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
6342	    "nv_start_rqsense_pio: exiting into HP3"));
6343
6344	return (NV_SUCCESS);
6345}
6346
6347/*
6348 * quiesce(9E) entry point.
6349 *
6350 * This function is called when the system is single-threaded at high
6351 * PIL with preemption disabled. Therefore, this function must not be
6352 * blocked.
6353 *
6354 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
6355 * DDI_FAILURE indicates an error condition and should almost never happen.
6356 */
6357static int
6358nv_quiesce(dev_info_t *dip)
6359{
6360	int port, instance = ddi_get_instance(dip);
6361	nv_ctl_t *nvc;
6362
6363	if ((nvc = (nv_ctl_t *)ddi_get_soft_state(nv_statep, instance)) == NULL)
6364		return (DDI_FAILURE);
6365
6366	for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
6367		nv_port_t *nvp = &(nvc->nvc_port[port]);
6368		ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
6369		ddi_acc_handle_t bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5];
6370		uint32_t sctrl;
6371
6372		/*
6373		 * Stop the controllers from generating interrupts.
6374		 */
6375		(*(nvc->nvc_set_intr))(nvp, NV_INTR_DISABLE_NON_BLOCKING);
6376
6377		/*
6378		 * clear signature registers
6379		 */
6380		nv_put8(cmdhdl, nvp->nvp_sect, 0);
6381		nv_put8(cmdhdl, nvp->nvp_lcyl, 0);
6382		nv_put8(cmdhdl, nvp->nvp_hcyl, 0);
6383		nv_put8(cmdhdl, nvp->nvp_count, 0);
6384
6385		nvp->nvp_signature = 0;
6386		nvp->nvp_type = 0;
6387		nvp->nvp_state |= NV_PORT_RESET;
6388		nvp->nvp_reset_time = ddi_get_lbolt();
6389
6390		/*
6391		 * assert reset in PHY by writing a 1 to bit 0 scontrol
6392		 */
6393		sctrl = nv_get32(bar5_hdl, nvp->nvp_sctrl);
6394
6395		nv_put32(bar5_hdl, nvp->nvp_sctrl,
6396		    sctrl | SCONTROL_DET_COMRESET);
6397
6398		/*
6399		 * wait 1ms
6400		 */
6401		drv_usecwait(1000);
6402
6403		/*
6404		 * de-assert reset in PHY
6405		 */
6406		nv_put32(bar5_hdl, nvp->nvp_sctrl, sctrl);
6407	}
6408
6409	return (DDI_SUCCESS);
6410}
6411
6412
6413#ifdef SGPIO_SUPPORT
6414/*
6415 * NVIDIA specific SGPIO LED support
6416 * Please refer to the NVIDIA documentation for additional details
6417 */
6418
6419/*
6420 * nv_sgp_led_init
6421 * Detect SGPIO support.  If present, initialize.
6422 */
6423static void
6424nv_sgp_led_init(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle)
6425{
6426	uint16_t csrp;		/* SGPIO_CSRP from PCI config space */
6427	uint32_t cbp;		/* SGPIO_CBP from PCI config space */
6428	nv_sgp_cmn_t *cmn;	/* shared data structure */
6429	int i;
6430	char tqname[SGPIO_TQ_NAME_LEN];
6431	extern caddr_t psm_map_phys_new(paddr_t, size_t, int);
6432
6433	/*
6434	 * Initialize with appropriately invalid values in case this function
6435	 * exits without initializing SGPIO (for example, there is no SGPIO
6436	 * support).
6437	 */
6438	nvc->nvc_sgp_csr = 0;
6439	nvc->nvc_sgp_cbp = NULL;
6440	nvc->nvc_sgp_cmn = NULL;
6441
6442	/*
6443	 * Only try to initialize SGPIO LED support if this property
6444	 * indicates it should be.
6445	 */
6446	if (ddi_getprop(DDI_DEV_T_ANY, nvc->nvc_dip, DDI_PROP_DONTPASS,
6447	    "enable-sgpio-leds", 0) != 1)
6448		return;
6449
6450	/*
6451	 * CK804 can pass the sgpio_detect test even though it does not support
6452	 * SGPIO, so don't even look at a CK804.
6453	 */
6454	if (nvc->nvc_mcp5x_flag != B_TRUE)
6455		return;
6456
6457	/*
6458	 * The NVIDIA SGPIO support can nominally handle 6 drives.
6459	 * However, the current implementation only supports 4 drives.
6460	 * With two drives per controller, that means only look at the
6461	 * first two controllers.
6462	 */
6463	if ((nvc->nvc_ctlr_num != 0) && (nvc->nvc_ctlr_num != 1))
6464		return;
6465
6466	/* confirm that the SGPIO registers are there */
6467	if (nv_sgp_detect(pci_conf_handle, &csrp, &cbp) != NV_SUCCESS) {
6468		NVLOG((NVDBG_INIT, nvc, NULL,
6469		    "SGPIO registers not detected"));
6470		return;
6471	}
6472
6473	/* save off the SGPIO_CSR I/O address */
6474	nvc->nvc_sgp_csr = csrp;
6475
6476	/* map in Control Block */
6477	nvc->nvc_sgp_cbp = (nv_sgp_cb_t *)psm_map_phys_new(cbp,
6478	    sizeof (nv_sgp_cb_t), PROT_READ | PROT_WRITE);
6479
6480	/* initialize the SGPIO h/w */
6481	if (nv_sgp_init(nvc) == NV_FAILURE) {
6482		nv_cmn_err(CE_WARN, nvc, NULL,
6483		    "!Unable to initialize SGPIO");
6484	}
6485
6486	/*
6487	 * Initialize the shared space for this instance.  This could
6488	 * involve allocating the space, saving a pointer to the space
6489	 * and starting the taskq that actually turns the LEDs on and off.
6490	 * Or, it could involve just getting the pointer to the already
6491	 * allocated space.
6492	 */
6493
6494	mutex_enter(&nv_sgp_c2c_mutex);
6495
6496	/* try and find our CBP in the mapping table */
6497	cmn = NULL;
6498	for (i = 0; i < NV_MAX_CBPS; i++) {
6499		if (nv_sgp_cbp2cmn[i].c2cm_cbp == cbp) {
6500			cmn = nv_sgp_cbp2cmn[i].c2cm_cmn;
6501			break;
6502		}
6503
6504		if (nv_sgp_cbp2cmn[i].c2cm_cbp == 0)
6505			break;
6506	}
6507
6508	if (i >= NV_MAX_CBPS) {
6509		/*
6510		 * CBP to shared space mapping table is full
6511		 */
6512		nvc->nvc_sgp_cmn = NULL;
6513		nv_cmn_err(CE_WARN, nvc, NULL,
6514		    "!LED handling not initialized - too many controllers");
6515	} else if (cmn == NULL) {
6516		/*
6517		 * Allocate the shared space, point the SGPIO scratch register
6518		 * at it and start the led update taskq.
6519		 */
6520
6521		/* allocate shared space */
6522		cmn = (nv_sgp_cmn_t *)kmem_zalloc(sizeof (nv_sgp_cmn_t),
6523		    KM_SLEEP);
6524		if (cmn == NULL) {
6525			nv_cmn_err(CE_WARN, nvc, NULL,
6526			    "!Failed to allocate shared data");
6527			return;
6528		}
6529
6530		nvc->nvc_sgp_cmn = cmn;
6531
6532		/* initialize the shared data structure */
6533		cmn->nvs_in_use = (1 << nvc->nvc_ctlr_num);
6534		cmn->nvs_connected = 0;
6535		cmn->nvs_activity = 0;
6536		cmn->nvs_cbp = cbp;
6537
6538		mutex_init(&cmn->nvs_slock, NULL, MUTEX_DRIVER, NULL);
6539		mutex_init(&cmn->nvs_tlock, NULL, MUTEX_DRIVER, NULL);
6540		cv_init(&cmn->nvs_cv, NULL, CV_DRIVER, NULL);
6541
6542		/* put the address in the SGPIO scratch register */
6543#if defined(__amd64)
6544		nvc->nvc_sgp_cbp->sgpio_sr = (uint64_t)cmn;
6545#else
6546		nvc->nvc_sgp_cbp->sgpio_sr = (uint32_t)cmn;
6547#endif
6548
6549		/* add an entry to the cbp to cmn mapping table */
6550
6551		/* i should be the next available table position */
6552		nv_sgp_cbp2cmn[i].c2cm_cbp = cbp;
6553		nv_sgp_cbp2cmn[i].c2cm_cmn = cmn;
6554
6555		/* start the activity LED taskq */
6556
6557		/*
6558		 * The taskq name should be unique and the time
6559		 */
6560		(void) snprintf(tqname, SGPIO_TQ_NAME_LEN,
6561		    "nvSataLed%x", (short)(ddi_get_lbolt() & 0xffff));
6562		cmn->nvs_taskq = ddi_taskq_create(nvc->nvc_dip, tqname, 1,
6563		    TASKQ_DEFAULTPRI, 0);
6564		if (cmn->nvs_taskq == NULL) {
6565			cmn->nvs_taskq_delay = 0;
6566			nv_cmn_err(CE_WARN, nvc, NULL,
6567			    "!Failed to start activity LED taskq");
6568		} else {
6569			cmn->nvs_taskq_delay = SGPIO_LOOP_WAIT_USECS;
6570			(void) ddi_taskq_dispatch(cmn->nvs_taskq,
6571			    nv_sgp_activity_led_ctl, nvc, DDI_SLEEP);
6572		}
6573	} else {
6574		nvc->nvc_sgp_cmn = cmn;
6575		cmn->nvs_in_use |= (1 << nvc->nvc_ctlr_num);
6576	}
6577
6578	mutex_exit(&nv_sgp_c2c_mutex);
6579}
6580
6581/*
6582 * nv_sgp_detect
6583 * Read the SGPIO_CSR and SGPIO_CBP values from PCI config space and
6584 * report back whether both were readable.
6585 */
6586static int
6587nv_sgp_detect(ddi_acc_handle_t pci_conf_handle, uint16_t *csrpp,
6588    uint32_t *cbpp)
6589{
6590	/* get the SGPIO_CSRP */
6591	*csrpp = pci_config_get16(pci_conf_handle, SGPIO_CSRP);
6592	if (*csrpp == 0) {
6593		return (NV_FAILURE);
6594	}
6595
6596	/* SGPIO_CSRP is good, get the SGPIO_CBP */
6597	*cbpp = pci_config_get32(pci_conf_handle, SGPIO_CBP);
6598	if (*cbpp == 0) {
6599		return (NV_FAILURE);
6600	}
6601
6602	/* SGPIO_CBP is good, so we must support SGPIO */
6603	return (NV_SUCCESS);
6604}
6605
6606/*
6607 * nv_sgp_init
6608 * Initialize SGPIO.
6609 * The initialization process is described by NVIDIA, but the hardware does
6610 * not always behave as documented, so several steps have been changed and/or
6611 * omitted.
6612 */
6613static int
6614nv_sgp_init(nv_ctl_t *nvc)
6615{
6616	int seq;
6617	int rval = NV_SUCCESS;
6618	hrtime_t start, end;
6619	uint32_t cmd;
6620	uint32_t status;
6621	int drive_count;
6622
6623	status = nv_sgp_csr_read(nvc);
6624	if (SGPIO_CSR_SSTAT(status) == SGPIO_STATE_RESET) {
6625		/* SGPIO logic is in reset state and requires initialization */
6626
6627		/* noting the Sequence field value */
6628		seq = SGPIO_CSR_SEQ(status);
6629
6630		/* issue SGPIO_CMD_READ_PARAMS command */
6631		cmd = SGPIO_CSR_CMD_SET(SGPIO_CMD_READ_PARAMS);
6632		nv_sgp_csr_write(nvc, cmd);
6633
6634		DTRACE_PROBE2(sgpio__cmd, int, cmd, int, status);
6635
6636		/* poll for command completion */
6637		start = gethrtime();
6638		end = start + NV_SGP_CMD_TIMEOUT;
6639		for (;;) {
6640			status = nv_sgp_csr_read(nvc);
6641
6642			/* break on error */
6643			if (SGPIO_CSR_CSTAT(status) == SGPIO_CMD_ERROR) {
6644				NVLOG((NVDBG_ALWAYS, nvc, NULL,
6645				    "Command error during initialization"));
6646				rval = NV_FAILURE;
6647				break;
6648			}
6649
6650			/* command processing is taking place */
6651			if (SGPIO_CSR_CSTAT(status) == SGPIO_CMD_OK) {
6652				if (SGPIO_CSR_SEQ(status) != seq) {
6653					NVLOG((NVDBG_ALWAYS, nvc, NULL,
6654					    "Sequence number change error"));
6655				}
6656
6657				break;
6658			}
6659
6660			/* if completion not detected in 2000ms ... */
6661
6662			if (gethrtime() > end)
6663				break;
6664
6665			/* wait 400 ns before checking again */
6666			NV_DELAY_NSEC(400);
6667		}
6668	}
6669
6670	if (rval == NV_FAILURE)
6671		return (rval);
6672
6673	if (SGPIO_CSR_SSTAT(status) != SGPIO_STATE_OPERATIONAL) {
6674		NVLOG((NVDBG_ALWAYS, nvc, NULL,
6675		    "SGPIO logic not operational after init - state %d",
6676		    SGPIO_CSR_SSTAT(status)));
6677		/*
6678		 * Should return (NV_FAILURE) but the hardware can be
6679		 * operational even if the SGPIO Status does not indicate
6680		 * this.
6681		 */
6682	}
6683
6684	/*
6685	 * NVIDIA recommends reading the supported drive count even
6686	 * though they also indicate that it is always 4 at this time.
6687	 */
6688	drive_count = SGP_CR0_DRV_CNT(nvc->nvc_sgp_cbp->sgpio_cr0);
6689	if (drive_count != SGPIO_DRV_CNT_VALUE) {
6690		NVLOG((NVDBG_INIT, nvc, NULL,
6691		    "SGPIO reported undocumented drive count - %d",
6692		    drive_count));
6693	}
6694
6695	NVLOG((NVDBG_INIT, nvc, NULL,
6696	    "initialized ctlr: %d csr: 0x%08x",
6697	    nvc->nvc_ctlr_num, nvc->nvc_sgp_csr));
6698
6699	return (rval);
6700}
6701
6702static int
6703nv_sgp_check_set_cmn(nv_ctl_t *nvc)
6704{
6705	nv_sgp_cmn_t *cmn = nvc->nvc_sgp_cmn;
6706
6707	if (cmn == NULL)
6708		return (NV_FAILURE);
6709
6710	mutex_enter(&cmn->nvs_slock);
6711	cmn->nvs_in_use |= (1 << nvc->nvc_ctlr_num);
6712	mutex_exit(&cmn->nvs_slock);
6713
6714	return (NV_SUCCESS);
6715}
6716
6717/*
6718 * nv_sgp_csr_read
6719 * This is just a 32-bit port read from the value that was obtained from the
6720 * PCI config space.
6721 *
6722 * XXX It was advised to use the in[bwl] function for this, even though they
6723 * are obsolete interfaces.
6724 */
6725static int
6726nv_sgp_csr_read(nv_ctl_t *nvc)
6727{
6728	return (inl(nvc->nvc_sgp_csr));
6729}
6730
6731/*
6732 * nv_sgp_csr_write
6733 * This is just a 32-bit I/O port write.  The port number was obtained from
6734 * the PCI config space.
6735 *
6736 * XXX It was advised to use the out[bwl] function for this, even though they
6737 * are obsolete interfaces.
6738 */
6739static void
6740nv_sgp_csr_write(nv_ctl_t *nvc, uint32_t val)
6741{
6742	outl(nvc->nvc_sgp_csr, val);
6743}
6744
6745/*
6746 * nv_sgp_write_data
6747 * Cause SGPIO to send Control Block data
6748 */
6749static int
6750nv_sgp_write_data(nv_ctl_t *nvc)
6751{
6752	hrtime_t start, end;
6753	uint32_t status;
6754	uint32_t cmd;
6755
6756	/* issue command */
6757	cmd = SGPIO_CSR_CMD_SET(SGPIO_CMD_WRITE_DATA);
6758	nv_sgp_csr_write(nvc, cmd);
6759
6760	/* poll for completion */
6761	start = gethrtime();
6762	end = start + NV_SGP_CMD_TIMEOUT;
6763	for (;;) {
6764		status = nv_sgp_csr_read(nvc);
6765
6766		/* break on error completion */
6767		if (SGPIO_CSR_CSTAT(status) == SGPIO_CMD_ERROR)
6768			break;
6769
6770		/* break on successful completion */
6771		if (SGPIO_CSR_CSTAT(status) == SGPIO_CMD_OK)
6772			break;
6773
6774		/* Wait 400 ns and try again */
6775		NV_DELAY_NSEC(400);
6776
6777		if (gethrtime() > end)
6778			break;
6779	}
6780
6781	if (SGPIO_CSR_CSTAT(status) == SGPIO_CMD_OK)
6782		return (NV_SUCCESS);
6783
6784	return (NV_FAILURE);
6785}
6786
6787/*
6788 * nv_sgp_activity_led_ctl
6789 * This is run as a taskq.  It wakes up at a fixed interval and checks to
6790 * see if any of the activity LEDs need to be changed.
6791 */
6792static void
6793nv_sgp_activity_led_ctl(void *arg)
6794{
6795	nv_ctl_t *nvc = (nv_ctl_t *)arg;
6796	nv_sgp_cmn_t *cmn;
6797	volatile nv_sgp_cb_t *cbp;
6798	clock_t ticks;
6799	uint8_t drv_leds;
6800	uint32_t old_leds;
6801	uint32_t new_led_state;
6802	int i;
6803
6804	cmn = nvc->nvc_sgp_cmn;
6805	cbp = nvc->nvc_sgp_cbp;
6806
6807	do {
6808		/* save off the old state of all of the LEDs */
6809		old_leds = cbp->sgpio0_tr;
6810
6811		DTRACE_PROBE3(sgpio__activity__state,
6812		    int, cmn->nvs_connected, int, cmn->nvs_activity,
6813		    int, old_leds);
6814
6815		new_led_state = 0;
6816
6817		/* for each drive */
6818		for (i = 0; i < SGPIO_DRV_CNT_VALUE; i++) {
6819
6820			/* get the current state of the LEDs for the drive */
6821			drv_leds = SGPIO0_TR_DRV(old_leds, i);
6822
6823			if ((cmn->nvs_connected & (1 << i)) == 0) {
6824				/* if not connected, turn off activity */
6825				drv_leds &= ~TR_ACTIVE_MASK;
6826				drv_leds |= TR_ACTIVE_SET(TR_ACTIVE_DISABLE);
6827
6828				new_led_state &= SGPIO0_TR_DRV_CLR(i);
6829				new_led_state |=
6830				    SGPIO0_TR_DRV_SET(drv_leds, i);
6831
6832				continue;
6833			}
6834
6835			if ((cmn->nvs_activity & (1 << i)) == 0) {
6836				/* connected, but not active */
6837				drv_leds &= ~TR_ACTIVE_MASK;
6838				drv_leds |= TR_ACTIVE_SET(TR_ACTIVE_ENABLE);
6839
6840				new_led_state &= SGPIO0_TR_DRV_CLR(i);
6841				new_led_state |=
6842				    SGPIO0_TR_DRV_SET(drv_leds, i);
6843
6844				continue;
6845			}
6846
6847			/* connected and active */
6848			if (TR_ACTIVE(drv_leds) == TR_ACTIVE_ENABLE) {
6849				/* was enabled, so disable */
6850				drv_leds &= ~TR_ACTIVE_MASK;
6851				drv_leds |=
6852				    TR_ACTIVE_SET(TR_ACTIVE_DISABLE);
6853
6854				new_led_state &= SGPIO0_TR_DRV_CLR(i);
6855				new_led_state |=
6856				    SGPIO0_TR_DRV_SET(drv_leds, i);
6857			} else {
6858				/* was disabled, so enable */
6859				drv_leds &= ~TR_ACTIVE_MASK;
6860				drv_leds |= TR_ACTIVE_SET(TR_ACTIVE_ENABLE);
6861
6862				new_led_state &= SGPIO0_TR_DRV_CLR(i);
6863				new_led_state |=
6864				    SGPIO0_TR_DRV_SET(drv_leds, i);
6865			}
6866
6867			/*
6868			 * clear the activity bit
6869			 * if there is drive activity again within the
6870			 * loop interval (now 1/16 second), nvs_activity
6871			 * will be reset and the "connected and active"
6872			 * condition above will cause the LED to blink
6873			 * off and on at the loop interval rate.  The
6874			 * rate may be increased (interval shortened) as
6875			 * long as it is not more than 1/30 second.
6876			 */
6877			mutex_enter(&cmn->nvs_slock);
6878			cmn->nvs_activity &= ~(1 << i);
6879			mutex_exit(&cmn->nvs_slock);
6880		}
6881
6882		DTRACE_PROBE1(sgpio__new__led__state, int, new_led_state);
6883
6884		/* write out LED values */
6885
6886		mutex_enter(&cmn->nvs_slock);
6887		cbp->sgpio0_tr &= ~TR_ACTIVE_MASK_ALL;
6888		cbp->sgpio0_tr |= new_led_state;
6889		cbp->sgpio_cr0 = SGP_CR0_ENABLE_MASK;
6890		mutex_exit(&cmn->nvs_slock);
6891
6892		if (nv_sgp_write_data(nvc) == NV_FAILURE) {
6893			NVLOG((NVDBG_VERBOSE, nvc, NULL,
6894			    "nv_sgp_write_data failure updating active LED"));
6895		}
6896
6897		/* now rest for the interval */
6898		mutex_enter(&cmn->nvs_tlock);
6899		ticks = drv_usectohz(cmn->nvs_taskq_delay);
6900		if (ticks > 0)
6901			(void) cv_timedwait(&cmn->nvs_cv, &cmn->nvs_tlock,
6902			    ddi_get_lbolt() + ticks);
6903		mutex_exit(&cmn->nvs_tlock);
6904	} while (ticks > 0);
6905}
6906
6907/*
6908 * nv_sgp_drive_connect
6909 * Set the flag used to indicate that the drive is attached to the HBA.
6910 * Used to let the taskq know that it should turn the Activity LED on.
6911 */
6912static void
6913nv_sgp_drive_connect(nv_ctl_t *nvc, int drive)
6914{
6915	nv_sgp_cmn_t *cmn;
6916
6917	if (nv_sgp_check_set_cmn(nvc) == NV_FAILURE)
6918		return;
6919	cmn = nvc->nvc_sgp_cmn;
6920
6921	mutex_enter(&cmn->nvs_slock);
6922	cmn->nvs_connected |= (1 << drive);
6923	mutex_exit(&cmn->nvs_slock);
6924}
6925
6926/*
6927 * nv_sgp_drive_disconnect
6928 * Clears the flag used to indicate that the drive is no longer attached
6929 * to the HBA.  Used to let the taskq know that it should turn the
6930 * Activity LED off.  The flag that indicates that the drive is in use is
6931 * also cleared.
6932 */
6933static void
6934nv_sgp_drive_disconnect(nv_ctl_t *nvc, int drive)
6935{
6936	nv_sgp_cmn_t *cmn;
6937
6938	if (nv_sgp_check_set_cmn(nvc) == NV_FAILURE)
6939		return;
6940	cmn = nvc->nvc_sgp_cmn;
6941
6942	mutex_enter(&cmn->nvs_slock);
6943	cmn->nvs_connected &= ~(1 << drive);
6944	cmn->nvs_activity &= ~(1 << drive);
6945	mutex_exit(&cmn->nvs_slock);
6946}
6947
6948/*
6949 * nv_sgp_drive_active
6950 * Sets the flag used to indicate that the drive has been accessed and the
6951 * LED should be flicked off, then on.  It is cleared at a fixed time
6952 * interval by the LED taskq and set by the sata command start.
6953 */
6954static void
6955nv_sgp_drive_active(nv_ctl_t *nvc, int drive)
6956{
6957	nv_sgp_cmn_t *cmn;
6958
6959	if (nv_sgp_check_set_cmn(nvc) == NV_FAILURE)
6960		return;
6961	cmn = nvc->nvc_sgp_cmn;
6962
6963	DTRACE_PROBE1(sgpio__active, int, drive);
6964
6965	mutex_enter(&cmn->nvs_slock);
6966	cmn->nvs_activity |= (1 << drive);
6967	mutex_exit(&cmn->nvs_slock);
6968}
6969
6970
6971/*
6972 * nv_sgp_locate
6973 * Turns the Locate/OK2RM LED off or on for a particular drive.  State is
6974 * maintained in the SGPIO Control Block.
6975 */
6976static void
6977nv_sgp_locate(nv_ctl_t *nvc, int drive, int value)
6978{
6979	uint8_t leds;
6980	volatile nv_sgp_cb_t *cb = nvc->nvc_sgp_cbp;
6981	nv_sgp_cmn_t *cmn;
6982
6983	if (nv_sgp_check_set_cmn(nvc) == NV_FAILURE)
6984		return;
6985	cmn = nvc->nvc_sgp_cmn;
6986
6987	if ((drive < 0) || (drive >= SGPIO_DRV_CNT_VALUE))
6988		return;
6989
6990	DTRACE_PROBE2(sgpio__locate, int, drive, int, value);
6991
6992	mutex_enter(&cmn->nvs_slock);
6993
6994	leds = SGPIO0_TR_DRV(cb->sgpio0_tr, drive);
6995
6996	leds &= ~TR_LOCATE_MASK;
6997	leds |= TR_LOCATE_SET(value);
6998
6999	cb->sgpio0_tr &= SGPIO0_TR_DRV_CLR(drive);
7000	cb->sgpio0_tr |= SGPIO0_TR_DRV_SET(leds, drive);
7001
7002	cb->sgpio_cr0 = SGP_CR0_ENABLE_MASK;
7003
7004	mutex_exit(&cmn->nvs_slock);
7005
7006	if (nv_sgp_write_data(nvc) == NV_FAILURE) {
7007		nv_cmn_err(CE_WARN, nvc, NULL,
7008		    "!nv_sgp_write_data failure updating OK2RM/Locate LED");
7009	}
7010}
7011
7012/*
7013 * nv_sgp_error
7014 * Turns the Error/Failure LED off or on for a particular drive.  State is
7015 * maintained in the SGPIO Control Block.
7016 */
7017static void
7018nv_sgp_error(nv_ctl_t *nvc, int drive, int value)
7019{
7020	uint8_t leds;
7021	volatile nv_sgp_cb_t *cb = nvc->nvc_sgp_cbp;
7022	nv_sgp_cmn_t *cmn;
7023
7024	if (nv_sgp_check_set_cmn(nvc) == NV_FAILURE)
7025		return;
7026	cmn = nvc->nvc_sgp_cmn;
7027
7028	if ((drive < 0) || (drive >= SGPIO_DRV_CNT_VALUE))
7029		return;
7030
7031	DTRACE_PROBE2(sgpio__error, int, drive, int, value);
7032
7033	mutex_enter(&cmn->nvs_slock);
7034
7035	leds = SGPIO0_TR_DRV(cb->sgpio0_tr, drive);
7036
7037	leds &= ~TR_ERROR_MASK;
7038	leds |= TR_ERROR_SET(value);
7039
7040	cb->sgpio0_tr &= SGPIO0_TR_DRV_CLR(drive);
7041	cb->sgpio0_tr |= SGPIO0_TR_DRV_SET(leds, drive);
7042
7043	cb->sgpio_cr0 = SGP_CR0_ENABLE_MASK;
7044
7045	mutex_exit(&cmn->nvs_slock);
7046
7047	if (nv_sgp_write_data(nvc) == NV_FAILURE) {
7048		nv_cmn_err(CE_WARN, nvc, NULL,
7049		    "!nv_sgp_write_data failure updating Fail/Error LED");
7050	}
7051}
7052
7053static void
7054nv_sgp_cleanup(nv_ctl_t *nvc)
7055{
7056	int drive, i;
7057	uint8_t drv_leds;
7058	uint32_t led_state;
7059	volatile nv_sgp_cb_t *cb = nvc->nvc_sgp_cbp;
7060	nv_sgp_cmn_t *cmn = nvc->nvc_sgp_cmn;
7061	extern void psm_unmap_phys(caddr_t, size_t);
7062
7063	/*
7064	 * If the SGPIO Control Block isn't mapped or the shared data
7065	 * structure isn't present in this instance, there isn't much that
7066	 * can be cleaned up.
7067	 */
7068	if ((cb == NULL) || (cmn == NULL))
7069		return;
7070
7071	/* turn off activity LEDs for this controller */
7072	drv_leds = TR_ACTIVE_SET(TR_ACTIVE_DISABLE);
7073
7074	/* get the existing LED state */
7075	led_state = cb->sgpio0_tr;
7076
7077	/* turn off port 0 */
7078	drive = SGP_CTLR_PORT_TO_DRV(nvc->nvc_ctlr_num, 0);
7079	led_state &= SGPIO0_TR_DRV_CLR(drive);
7080	led_state |= SGPIO0_TR_DRV_SET(drv_leds, drive);
7081
7082	/* turn off port 1 */
7083	drive = SGP_CTLR_PORT_TO_DRV(nvc->nvc_ctlr_num, 1);
7084	led_state &= SGPIO0_TR_DRV_CLR(drive);
7085	led_state |= SGPIO0_TR_DRV_SET(drv_leds, drive);
7086
7087	/* set the new led state, which should turn off this ctrl's LEDs */
7088	cb->sgpio_cr0 = SGP_CR0_ENABLE_MASK;
7089	(void) nv_sgp_write_data(nvc);
7090
7091	/* clear the controller's in use bit */
7092	mutex_enter(&cmn->nvs_slock);
7093	cmn->nvs_in_use &= ~(1 << nvc->nvc_ctlr_num);
7094	mutex_exit(&cmn->nvs_slock);
7095
7096	if (cmn->nvs_in_use == 0) {
7097		/* if all "in use" bits cleared, take everything down */
7098
7099		if (cmn->nvs_taskq != NULL) {
7100			/* allow activity taskq to exit */
7101			cmn->nvs_taskq_delay = 0;
7102			cv_broadcast(&cmn->nvs_cv);
7103
7104			/* then destroy it */
7105			ddi_taskq_destroy(cmn->nvs_taskq);
7106		}
7107
7108		/* turn off all of the LEDs */
7109		cb->sgpio0_tr = 0;
7110		cb->sgpio_cr0 = SGP_CR0_ENABLE_MASK;
7111		(void) nv_sgp_write_data(nvc);
7112
7113		cb->sgpio_sr = NULL;
7114
7115		/* zero out the CBP to cmn mapping */
7116		for (i = 0; i < NV_MAX_CBPS; i++) {
7117			if (nv_sgp_cbp2cmn[i].c2cm_cbp == cmn->nvs_cbp) {
7118				nv_sgp_cbp2cmn[i].c2cm_cmn = NULL;
7119				break;
7120			}
7121
7122			if (nv_sgp_cbp2cmn[i].c2cm_cbp == 0)
7123				break;
7124		}
7125
7126		/* free resources */
7127		cv_destroy(&cmn->nvs_cv);
7128		mutex_destroy(&cmn->nvs_tlock);
7129		mutex_destroy(&cmn->nvs_slock);
7130
7131		kmem_free(nvc->nvc_sgp_cmn, sizeof (nv_sgp_cmn_t));
7132	}
7133
7134	nvc->nvc_sgp_cmn = NULL;
7135
7136	/* unmap the SGPIO Control Block */
7137	psm_unmap_phys((caddr_t)nvc->nvc_sgp_cbp, sizeof (nv_sgp_cb_t));
7138}
7139#endif	/* SGPIO_SUPPORT */
7140