isci_controller.c revision 235751
1/*-
2 * BSD LICENSE
3 *
4 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 *   * Redistributions of source code must retain the above copyright
12 *     notice, this list of conditions and the following disclaimer.
13 *   * Redistributions in binary form must reproduce the above copyright
14 *     notice, this list of conditions and the following disclaimer in
15 *     the documentation and/or other materials provided with the
16 *     distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31#include <sys/cdefs.h>
32__FBSDID("$FreeBSD: head/sys/dev/isci/isci_controller.c 235751 2012-05-21 22:54:33Z jimharris $");
33
34#include <dev/isci/isci.h>
35
36#include <sys/conf.h>
37#include <sys/malloc.h>
38
39#include <cam/cam_periph.h>
40#include <cam/cam_xpt_periph.h>
41
42#include <dev/isci/scil/sci_memory_descriptor_list.h>
43#include <dev/isci/scil/sci_memory_descriptor_list_decorator.h>
44
45#include <dev/isci/scil/scif_controller.h>
46#include <dev/isci/scil/scif_library.h>
47#include <dev/isci/scil/scif_io_request.h>
48#include <dev/isci/scil/scif_task_request.h>
49#include <dev/isci/scil/scif_remote_device.h>
50#include <dev/isci/scil/scif_domain.h>
51#include <dev/isci/scil/scif_user_callback.h>
52
53void isci_action(struct cam_sim *sim, union ccb *ccb);
54void isci_poll(struct cam_sim *sim);
55
56#define ccb_sim_ptr sim_priv.entries[0].ptr
57
58/**
59 * @brief This user callback will inform the user that the controller has
60 *        had a serious unexpected error.  The user should not the error,
61 *        disable interrupts, and wait for current ongoing processing to
62 *        complete.  Subsequently, the user should reset the controller.
63 *
64 * @param[in]  controller This parameter specifies the controller that had
65 *                        an error.
66 *
67 * @return none
68 */
69void scif_cb_controller_error(SCI_CONTROLLER_HANDLE_T controller,
70    SCI_CONTROLLER_ERROR error)
71{
72
73	isci_log_message(0, "ISCI", "scif_cb_controller_error: 0x%x\n",
74	    error);
75}
76
77/**
78 * @brief This user callback will inform the user that the controller has
79 *        finished the start process.
80 *
81 * @param[in]  controller This parameter specifies the controller that was
82 *             started.
83 * @param[in]  completion_status This parameter specifies the results of
84 *             the start operation.  SCI_SUCCESS indicates successful
85 *             completion.
86 *
87 * @return none
88 */
89void scif_cb_controller_start_complete(SCI_CONTROLLER_HANDLE_T controller,
90    SCI_STATUS completion_status)
91{
92	uint32_t index;
93	struct ISCI_CONTROLLER *isci_controller = (struct ISCI_CONTROLLER *)
94	    sci_object_get_association(controller);
95
96	isci_controller->is_started = TRUE;
97
98	/* Set bits for all domains.  We will clear them one-by-one once
99	 *  the domains complete discovery, or return error when calling
100	 *  scif_domain_discover.  Once all bits are clear, we will register
101	 *  the controller with CAM.
102	 */
103	isci_controller->initial_discovery_mask = (1 << SCI_MAX_DOMAINS) - 1;
104
105	for(index = 0; index < SCI_MAX_DOMAINS; index++) {
106		SCI_STATUS status;
107		SCI_DOMAIN_HANDLE_T domain =
108		    isci_controller->domain[index].sci_object;
109
110		status = scif_domain_discover(
111			domain,
112			scif_domain_get_suggested_discover_timeout(domain),
113			DEVICE_TIMEOUT
114		);
115
116		if (status != SCI_SUCCESS)
117		{
118			isci_controller_domain_discovery_complete(
119			    isci_controller, &isci_controller->domain[index]);
120		}
121	}
122}
123
124/**
125 * @brief This user callback will inform the user that the controller has
126 *        finished the stop process. Note, after user calls
127 *        scif_controller_stop(), before user receives this controller stop
128 *        complete callback, user should not expect any callback from
129 *        framework, such like scif_cb_domain_change_notification().
130 *
131 * @param[in]  controller This parameter specifies the controller that was
132 *             stopped.
133 * @param[in]  completion_status This parameter specifies the results of
134 *             the stop operation.  SCI_SUCCESS indicates successful
135 *             completion.
136 *
137 * @return none
138 */
139void scif_cb_controller_stop_complete(SCI_CONTROLLER_HANDLE_T controller,
140    SCI_STATUS completion_status)
141{
142	struct ISCI_CONTROLLER *isci_controller = (struct ISCI_CONTROLLER *)
143	    sci_object_get_association(controller);
144
145	isci_controller->is_started = FALSE;
146}
147
148/**
149 * @brief This method will be invoked to allocate memory dynamically.
150 *
151 * @param[in]  controller This parameter represents the controller
152 *             object for which to allocate memory.
153 * @param[out] mde This parameter represents the memory descriptor to
154 *             be filled in by the user that will reference the newly
155 *             allocated memory.
156 *
157 * @return none
158 */
159void scif_cb_controller_allocate_memory(SCI_CONTROLLER_HANDLE_T controller,
160    SCI_PHYSICAL_MEMORY_DESCRIPTOR_T *mde)
161{
162
163}
164
165/**
166 * @brief This method will be invoked to allocate memory dynamically.
167 *
168 * @param[in]  controller This parameter represents the controller
169 *             object for which to allocate memory.
170 * @param[out] mde This parameter represents the memory descriptor to
171 *             be filled in by the user that will reference the newly
172 *             allocated memory.
173 *
174 * @return none
175 */
176void scif_cb_controller_free_memory(SCI_CONTROLLER_HANDLE_T controller,
177    SCI_PHYSICAL_MEMORY_DESCRIPTOR_T * mde)
178{
179
180}
181
182void isci_controller_construct(struct ISCI_CONTROLLER *controller,
183    struct isci_softc *isci)
184{
185	SCI_CONTROLLER_HANDLE_T scif_controller_handle;
186
187	scif_library_allocate_controller(isci->sci_library_handle,
188	    &scif_controller_handle);
189
190	scif_controller_construct(isci->sci_library_handle,
191	    scif_controller_handle, NULL);
192
193	controller->isci = isci;
194	controller->scif_controller_handle = scif_controller_handle;
195
196	/* This allows us to later use
197	 *  sci_object_get_association(scif_controller_handle)
198	 * inside of a callback routine to get our struct ISCI_CONTROLLER object
199	 */
200	sci_object_set_association(scif_controller_handle, (void *)controller);
201
202	controller->is_started = FALSE;
203	controller->is_frozen = FALSE;
204	controller->release_queued_ccbs = FALSE;
205	controller->sim = NULL;
206	controller->initial_discovery_mask = 0;
207
208	sci_fast_list_init(&controller->pending_device_reset_list);
209
210	mtx_init(&controller->lock, "isci", NULL, MTX_DEF);
211
212	uint32_t domain_index;
213
214	for(domain_index = 0; domain_index < SCI_MAX_DOMAINS; domain_index++) {
215		isci_domain_construct( &controller->domain[domain_index],
216		    domain_index, controller);
217	}
218
219	controller->timer_memory = malloc(
220	    sizeof(struct ISCI_TIMER) * SCI_MAX_TIMERS, M_ISCI,
221	    M_NOWAIT | M_ZERO);
222
223	sci_pool_initialize(controller->timer_pool);
224
225	struct ISCI_TIMER *timer = (struct ISCI_TIMER *)
226	    controller->timer_memory;
227
228	for ( int i = 0; i < SCI_MAX_TIMERS; i++ ) {
229		sci_pool_put(controller->timer_pool, timer++);
230	}
231}
232
233SCI_STATUS isci_controller_initialize(struct ISCI_CONTROLLER *controller)
234{
235	SCIC_USER_PARAMETERS_T scic_user_parameters;
236	SCI_CONTROLLER_HANDLE_T scic_controller_handle;
237	unsigned long tunable;
238	int i;
239
240	scic_controller_handle =
241	    scif_controller_get_scic_handle(controller->scif_controller_handle);
242
243	if (controller->isci->oem_parameters_found == TRUE)
244	{
245		scic_oem_parameters_set(
246		    scic_controller_handle,
247		    &controller->oem_parameters,
248		    (uint8_t)(controller->oem_parameters_version));
249	}
250
251	scic_user_parameters_get(scic_controller_handle, &scic_user_parameters);
252
253	if (TUNABLE_ULONG_FETCH("hw.isci.no_outbound_task_timeout", &tunable))
254		scic_user_parameters.sds1.no_outbound_task_timeout =
255		    (uint8_t)tunable;
256
257	if (TUNABLE_ULONG_FETCH("hw.isci.ssp_max_occupancy_timeout", &tunable))
258		scic_user_parameters.sds1.ssp_max_occupancy_timeout =
259		    (uint16_t)tunable;
260
261	if (TUNABLE_ULONG_FETCH("hw.isci.stp_max_occupancy_timeout", &tunable))
262		scic_user_parameters.sds1.stp_max_occupancy_timeout =
263		    (uint16_t)tunable;
264
265	if (TUNABLE_ULONG_FETCH("hw.isci.ssp_inactivity_timeout", &tunable))
266		scic_user_parameters.sds1.ssp_inactivity_timeout =
267		    (uint16_t)tunable;
268
269	if (TUNABLE_ULONG_FETCH("hw.isci.stp_inactivity_timeout", &tunable))
270		scic_user_parameters.sds1.stp_inactivity_timeout =
271		    (uint16_t)tunable;
272
273	if (TUNABLE_ULONG_FETCH("hw.isci.max_speed_generation", &tunable))
274		for (i = 0; i < SCI_MAX_PHYS; i++)
275			scic_user_parameters.sds1.phys[i].max_speed_generation =
276			    (uint8_t)tunable;
277
278	scic_user_parameters_set(scic_controller_handle, &scic_user_parameters);
279
280	/* Scheduler bug in SCU requires SCIL to reserve some task contexts as a
281	 *  a workaround - one per domain.
282	 */
283	controller->queue_depth = SCI_MAX_IO_REQUESTS - SCI_MAX_DOMAINS;
284
285	if (TUNABLE_INT_FETCH("hw.isci.controller_queue_depth",
286	    &controller->queue_depth)) {
287		controller->queue_depth = max(1, min(controller->queue_depth,
288		    SCI_MAX_IO_REQUESTS - SCI_MAX_DOMAINS));
289	}
290
291	/* Reserve one request so that we can ensure we have one available TC
292	 *  to do internal device resets.
293	 */
294	controller->sim_queue_depth = controller->queue_depth - 1;
295
296	/* Although we save one TC to do internal device resets, it is possible
297	 *  we could end up using several TCs for simultaneous device resets
298	 *  while at the same time having CAM fill our controller queue.  To
299	 *  simulate this condition, and how our driver handles it, we can set
300	 *  this io_shortage parameter, which will tell CAM that we have a
301	 *  large queue depth than we really do.
302	 */
303	uint32_t io_shortage = 0;
304	TUNABLE_INT_FETCH("hw.isci.io_shortage", &io_shortage);
305	controller->sim_queue_depth += io_shortage;
306
307	/* Attach to CAM using xpt_bus_register now, then immediately freeze
308	 *  the simq.  It will get released later when initial domain discovery
309	 *  is complete.
310	 */
311	controller->has_been_scanned = FALSE;
312	mtx_lock(&controller->lock);
313	isci_controller_attach_to_cam(controller);
314	xpt_freeze_simq(controller->sim, 1);
315	mtx_unlock(&controller->lock);
316
317	return (scif_controller_initialize(controller->scif_controller_handle));
318}
319
320int isci_controller_allocate_memory(struct ISCI_CONTROLLER *controller)
321{
322	int error;
323	device_t device =  controller->isci->device;
324	uint32_t max_segment_size = isci_io_request_get_max_io_size();
325	uint32_t status = 0;
326	struct ISCI_MEMORY *uncached_controller_memory =
327	    &controller->uncached_controller_memory;
328	struct ISCI_MEMORY *cached_controller_memory =
329	    &controller->cached_controller_memory;
330	struct ISCI_MEMORY *request_memory =
331	    &controller->request_memory;
332	POINTER_UINT virtual_address;
333	bus_addr_t physical_address;
334
335	controller->mdl = sci_controller_get_memory_descriptor_list_handle(
336	    controller->scif_controller_handle);
337
338	uncached_controller_memory->size = sci_mdl_decorator_get_memory_size(
339	    controller->mdl, SCI_MDE_ATTRIBUTE_PHYSICALLY_CONTIGUOUS);
340
341	error = isci_allocate_dma_buffer(device, uncached_controller_memory);
342
343	if (error != 0)
344	    return (error);
345
346	sci_mdl_decorator_assign_memory( controller->mdl,
347	    SCI_MDE_ATTRIBUTE_PHYSICALLY_CONTIGUOUS,
348	    uncached_controller_memory->virtual_address,
349	    uncached_controller_memory->physical_address);
350
351	cached_controller_memory->size = sci_mdl_decorator_get_memory_size(
352	    controller->mdl,
353	    SCI_MDE_ATTRIBUTE_CACHEABLE | SCI_MDE_ATTRIBUTE_PHYSICALLY_CONTIGUOUS
354	);
355
356	error = isci_allocate_dma_buffer(device, cached_controller_memory);
357
358	if (error != 0)
359	    return (error);
360
361	sci_mdl_decorator_assign_memory(controller->mdl,
362	    SCI_MDE_ATTRIBUTE_CACHEABLE | SCI_MDE_ATTRIBUTE_PHYSICALLY_CONTIGUOUS,
363	    cached_controller_memory->virtual_address,
364	    cached_controller_memory->physical_address);
365
366	request_memory->size =
367	    controller->queue_depth * isci_io_request_get_object_size();
368
369	error = isci_allocate_dma_buffer(device, request_memory);
370
371	if (error != 0)
372	    return (error);
373
374	/* For STP PIO testing, we want to ensure we can force multiple SGLs
375	 *  since this has been a problem area in SCIL.  This tunable parameter
376	 *  will allow us to force DMA segments to a smaller size, ensuring
377	 *  that even if a physically contiguous buffer is attached to this
378	 *  I/O, the DMA subsystem will pass us multiple segments in our DMA
379	 *  load callback.
380	 */
381	TUNABLE_INT_FETCH("hw.isci.max_segment_size", &max_segment_size);
382
383	/* Create DMA tag for our I/O requests.  Then we can create DMA maps based off
384	 *  of this tag and store them in each of our ISCI_IO_REQUEST objects.  This
385	 *  will enable better performance than creating the DMA maps everytime we get
386	 *  an I/O.
387	 */
388	status = bus_dma_tag_create(bus_get_dma_tag(device), 0x1, 0x0,
389	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
390	    isci_io_request_get_max_io_size(),
391	    SCI_MAX_SCATTER_GATHER_ELEMENTS, max_segment_size, 0, NULL, NULL,
392	    &controller->buffer_dma_tag);
393
394	sci_pool_initialize(controller->request_pool);
395
396	virtual_address = request_memory->virtual_address;
397	physical_address = request_memory->physical_address;
398
399	for (int i = 0; i < controller->queue_depth; i++) {
400		struct ISCI_REQUEST *request =
401		    (struct ISCI_REQUEST *)virtual_address;
402
403		isci_request_construct(request,
404		    controller->scif_controller_handle,
405		    controller->buffer_dma_tag, physical_address);
406
407		sci_pool_put(controller->request_pool, request);
408
409		virtual_address += isci_request_get_object_size();
410		physical_address += isci_request_get_object_size();
411	}
412
413	uint32_t remote_device_size = sizeof(struct ISCI_REMOTE_DEVICE) +
414	    scif_remote_device_get_object_size();
415
416	controller->remote_device_memory = (uint8_t *) malloc(
417	    remote_device_size * SCI_MAX_REMOTE_DEVICES, M_ISCI,
418	    M_NOWAIT | M_ZERO);
419
420	sci_pool_initialize(controller->remote_device_pool);
421
422	uint8_t *remote_device_memory_ptr = controller->remote_device_memory;
423
424	for (int i = 0; i < SCI_MAX_REMOTE_DEVICES; i++) {
425		struct ISCI_REMOTE_DEVICE *remote_device =
426		    (struct ISCI_REMOTE_DEVICE *)remote_device_memory_ptr;
427
428		controller->remote_device[i] = NULL;
429		remote_device->index = i;
430		remote_device->is_resetting = FALSE;
431		remote_device->frozen_lun_mask = 0;
432		sci_fast_list_element_init(remote_device,
433		    &remote_device->pending_device_reset_element);
434		TAILQ_INIT(&remote_device->queued_ccbs);
435		remote_device->release_queued_ccb = FALSE;
436		remote_device->queued_ccb_in_progress = NULL;
437
438		/*
439		 * For the first SCI_MAX_DOMAINS device objects, do not put
440		 *  them in the pool, rather assign them to each domain.  This
441		 *  ensures that any device attached directly to port "i" will
442		 *  always get CAM target id "i".
443		 */
444		if (i < SCI_MAX_DOMAINS)
445			controller->domain[i].da_remote_device = remote_device;
446		else
447			sci_pool_put(controller->remote_device_pool,
448			    remote_device);
449		remote_device_memory_ptr += remote_device_size;
450	}
451
452	return (0);
453}
454
455void isci_controller_start(void *controller_handle)
456{
457	struct ISCI_CONTROLLER *controller =
458	    (struct ISCI_CONTROLLER *)controller_handle;
459	SCI_CONTROLLER_HANDLE_T scif_controller_handle =
460	    controller->scif_controller_handle;
461
462	scif_controller_start(scif_controller_handle,
463	    scif_controller_get_suggested_start_timeout(scif_controller_handle));
464
465	scic_controller_enable_interrupts(
466	    scif_controller_get_scic_handle(controller->scif_controller_handle));
467}
468
469void isci_controller_domain_discovery_complete(
470    struct ISCI_CONTROLLER *isci_controller, struct ISCI_DOMAIN *isci_domain)
471{
472	if (!isci_controller->has_been_scanned)
473	{
474		/* Controller has not been scanned yet.  We'll clear
475		 *  the discovery bit for this domain, then check if all bits
476		 *  are now clear.  That would indicate that all domains are
477		 *  done with discovery and we can then proceed with initial
478		 *  scan.
479		 */
480
481		isci_controller->initial_discovery_mask &=
482		    ~(1 << isci_domain->index);
483
484		if (isci_controller->initial_discovery_mask == 0) {
485			struct isci_softc *driver = isci_controller->isci;
486			uint8_t next_index = isci_controller->index + 1;
487
488			isci_controller->has_been_scanned = TRUE;
489
490			/* Unfreeze simq to allow initial scan to proceed. */
491			xpt_release_simq(isci_controller->sim, TRUE);
492
493#if __FreeBSD_version < 800000
494			/* When driver is loaded after boot, we need to
495			 *  explicitly rescan here for versions <8.0, because
496			 *  CAM only automatically scans new buses at boot
497			 *  time.
498			 */
499			union ccb *ccb = xpt_alloc_ccb_nowait();
500
501			xpt_create_path(&ccb->ccb_h.path, xpt_periph,
502			    cam_sim_path(isci_controller->sim),
503			    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
504
505			xpt_rescan(ccb);
506#endif
507
508			if (next_index < driver->controller_count) {
509				/*  There are more controllers that need to
510				 *   start.  So start the next one.
511				 */
512				isci_controller_start(
513				    &driver->controllers[next_index]);
514			}
515			else
516			{
517				/* All controllers have been started and completed discovery.
518				 *  Disestablish the config hook while will signal to the
519				 *  kernel during boot that it is safe to try to find and
520				 *  mount the root partition.
521				 */
522				config_intrhook_disestablish(
523				    &driver->config_hook);
524			}
525		}
526	}
527}
528
529int isci_controller_attach_to_cam(struct ISCI_CONTROLLER *controller)
530{
531	struct isci_softc *isci = controller->isci;
532	device_t parent = device_get_parent(isci->device);
533	int unit = device_get_unit(isci->device);
534	struct cam_devq *isci_devq = cam_simq_alloc(controller->sim_queue_depth);
535
536	if(isci_devq == NULL) {
537		isci_log_message(0, "ISCI", "isci_devq is NULL \n");
538		return (-1);
539	}
540
541	controller->sim = cam_sim_alloc(isci_action, isci_poll, "isci",
542	    controller, unit, &controller->lock, controller->sim_queue_depth,
543	    controller->sim_queue_depth, isci_devq);
544
545	if(controller->sim == NULL) {
546		isci_log_message(0, "ISCI", "cam_sim_alloc... fails\n");
547		cam_simq_free(isci_devq);
548		return (-1);
549	}
550
551	if(xpt_bus_register(controller->sim, parent, controller->index)
552	    != CAM_SUCCESS) {
553		isci_log_message(0, "ISCI", "xpt_bus_register...fails \n");
554		cam_sim_free(controller->sim, TRUE);
555		mtx_unlock(&controller->lock);
556		return (-1);
557	}
558
559	if(xpt_create_path(&controller->path, NULL,
560	    cam_sim_path(controller->sim), CAM_TARGET_WILDCARD,
561	    CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
562		isci_log_message(0, "ISCI", "xpt_create_path....fails\n");
563		xpt_bus_deregister(cam_sim_path(controller->sim));
564		cam_sim_free(controller->sim, TRUE);
565		mtx_unlock(&controller->lock);
566		return (-1);
567	}
568
569	return (0);
570}
571
572void isci_poll(struct cam_sim *sim)
573{
574	struct ISCI_CONTROLLER *controller =
575	    (struct ISCI_CONTROLLER *)cam_sim_softc(sim);
576
577	isci_interrupt_poll_handler(controller);
578}
579
580void isci_action(struct cam_sim *sim, union ccb *ccb)
581{
582	struct ISCI_CONTROLLER *controller =
583	    (struct ISCI_CONTROLLER *)cam_sim_softc(sim);
584
585	switch ( ccb->ccb_h.func_code ) {
586	case XPT_PATH_INQ:
587		{
588			struct ccb_pathinq *cpi = &ccb->cpi;
589			int bus = cam_sim_bus(sim);
590			ccb->ccb_h.ccb_sim_ptr = sim;
591			cpi->version_num = 1;
592			cpi->hba_inquiry = PI_TAG_ABLE;
593			cpi->target_sprt = 0;
594			cpi->hba_misc = PIM_NOBUSRESET | PIM_SEQSCAN;
595			cpi->hba_eng_cnt = 0;
596			cpi->max_target = SCI_MAX_REMOTE_DEVICES - 1;
597			cpi->max_lun = ISCI_MAX_LUN;
598#if __FreeBSD_version >= 800102
599			cpi->maxio = isci_io_request_get_max_io_size();
600#endif
601			cpi->unit_number = cam_sim_unit(sim);
602			cpi->bus_id = bus;
603			cpi->initiator_id = SCI_MAX_REMOTE_DEVICES;
604			cpi->base_transfer_speed = 300000;
605			strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
606			strncpy(cpi->hba_vid, "Intel Corp.", HBA_IDLEN);
607			strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
608			cpi->transport = XPORT_SAS;
609			cpi->transport_version = 0;
610			cpi->protocol = PROTO_SCSI;
611			cpi->protocol_version = SCSI_REV_SPC2;
612			cpi->ccb_h.status = CAM_REQ_CMP;
613			xpt_done(ccb);
614		}
615		break;
616	case XPT_GET_TRAN_SETTINGS:
617		{
618			struct ccb_trans_settings *general_settings = &ccb->cts;
619			struct ccb_trans_settings_sas *sas_settings =
620			    &general_settings->xport_specific.sas;
621			struct ccb_trans_settings_scsi *scsi_settings =
622			    &general_settings->proto_specific.scsi;
623			struct ISCI_REMOTE_DEVICE *remote_device;
624
625			remote_device = controller->remote_device[ccb->ccb_h.target_id];
626
627			if (remote_device == NULL) {
628				ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
629				ccb->ccb_h.status &= ~CAM_STATUS_MASK;
630				ccb->ccb_h.status |= CAM_DEV_NOT_THERE;
631				xpt_done(ccb);
632				break;
633			}
634
635			general_settings->protocol = PROTO_SCSI;
636			general_settings->transport = XPORT_SAS;
637			general_settings->protocol_version = SCSI_REV_SPC2;
638			general_settings->transport_version = 0;
639			scsi_settings->valid = CTS_SCSI_VALID_TQ;
640			scsi_settings->flags = CTS_SCSI_FLAGS_TAG_ENB;
641			ccb->ccb_h.status &= ~CAM_STATUS_MASK;
642			ccb->ccb_h.status |= CAM_REQ_CMP;
643
644			sas_settings->bitrate =
645			    isci_remote_device_get_bitrate(remote_device);
646
647			if (sas_settings->bitrate != 0)
648				sas_settings->valid = CTS_SAS_VALID_SPEED;
649
650			xpt_done(ccb);
651		}
652		break;
653	case XPT_SCSI_IO:
654		isci_io_request_execute_scsi_io(ccb, controller);
655		break;
656#if __FreeBSD_version >= 900026
657	case XPT_SMP_IO:
658		isci_io_request_execute_smp_io(ccb, controller);
659		break;
660#endif
661	case XPT_SET_TRAN_SETTINGS:
662		ccb->ccb_h.status &= ~CAM_STATUS_MASK;
663		ccb->ccb_h.status |= CAM_REQ_CMP;
664		xpt_done(ccb);
665		break;
666	case XPT_CALC_GEOMETRY:
667		cam_calc_geometry(&ccb->ccg, /*extended*/1);
668		xpt_done(ccb);
669		break;
670	case XPT_RESET_DEV:
671		{
672			struct ISCI_REMOTE_DEVICE *remote_device =
673			    controller->remote_device[ccb->ccb_h.target_id];
674
675			if (remote_device != NULL)
676				isci_remote_device_reset(remote_device, ccb);
677			else {
678				ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
679				ccb->ccb_h.status &= ~CAM_STATUS_MASK;
680				ccb->ccb_h.status |= CAM_DEV_NOT_THERE;
681				xpt_done(ccb);
682			}
683		}
684		break;
685	case XPT_RESET_BUS:
686		ccb->ccb_h.status = CAM_REQ_CMP;
687		xpt_done(ccb);
688		break;
689	default:
690		isci_log_message(0, "ISCI", "Unhandled func_code 0x%x\n",
691		    ccb->ccb_h.func_code);
692		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
693		ccb->ccb_h.status &= ~CAM_STATUS_MASK;
694		ccb->ccb_h.status |= CAM_REQ_INVALID;
695		xpt_done(ccb);
696		break;
697	}
698}
699
700/*
701 * Unfortunately, SCIL doesn't cleanly handle retry conditions.
702 *  CAM_REQUEUE_REQ works only when no one is using the pass(4) interface.  So
703 *  when SCIL denotes an I/O needs to be retried (typically because of mixing
704 *  tagged/non-tagged ATA commands, or running out of NCQ slots), we queue
705 *  these I/O internally.  Once SCIL completes an I/O to this device, or we get
706 *  a ready notification, we will retry the first I/O on the queue.
707 *  Unfortunately, SCIL also doesn't cleanly handle starting the new I/O within
708 *  the context of the completion handler, so we need to retry these I/O after
709 *  the completion handler is done executing.
710 */
711void
712isci_controller_release_queued_ccbs(struct ISCI_CONTROLLER *controller)
713{
714	struct ISCI_REMOTE_DEVICE *dev;
715	struct ccb_hdr *ccb_h;
716	int dev_idx;
717
718	KASSERT(mtx_owned(&controller->lock), ("controller lock not owned"));
719
720	controller->release_queued_ccbs = FALSE;
721	for (dev_idx = 0;
722	     dev_idx < SCI_MAX_REMOTE_DEVICES;
723	     dev_idx++) {
724
725		dev = controller->remote_device[dev_idx];
726		if (dev != NULL &&
727		    dev->release_queued_ccb == TRUE &&
728		    dev->queued_ccb_in_progress == NULL) {
729			dev->release_queued_ccb = FALSE;
730			ccb_h = TAILQ_FIRST(&dev->queued_ccbs);
731
732			if (ccb_h == NULL)
733				continue;
734
735			isci_log_message(1, "ISCI", "release %p %x\n", ccb_h,
736			    ((union ccb *)ccb_h)->csio.cdb_io.cdb_bytes[0]);
737
738			dev->queued_ccb_in_progress = (union ccb *)ccb_h;
739			isci_io_request_execute_scsi_io(
740			    (union ccb *)ccb_h, controller);
741		}
742	}
743}
744