isci_controller.c revision 233622
118962Ssos/*-
278627Sobrien * BSD LICENSE
318962Ssos *
418962Ssos * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
518962Ssos * All rights reserved.
618962Ssos *
718962Ssos * Redistribution and use in source and binary forms, with or without
818962Ssos * modification, are permitted provided that the following conditions
918962Ssos * are met:
1018962Ssos *
1118962Ssos *   * Redistributions of source code must retain the above copyright
1218962Ssos *     notice, this list of conditions and the following disclaimer.
1318962Ssos *   * Redistributions in binary form must reproduce the above copyright
1418962Ssos *     notice, this list of conditions and the following disclaimer in
1518962Ssos *     the documentation and/or other materials provided with the
1697748Sschweikh *     distribution.
1718962Ssos *
1818962Ssos * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
1918962Ssos * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
2018962Ssos * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
2118962Ssos * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
2218962Ssos * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
2318962Ssos * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
2418962Ssos * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
2518962Ssos * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
2618962Ssos * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
2718962Ssos * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
2818962Ssos * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
2918962Ssos */
3087248Smarkm
3187248Smarkm#include <sys/cdefs.h>
3287248Smarkm__FBSDID("$FreeBSD: head/sys/dev/isci/isci_controller.c 233622 2012-03-28 18:38:13Z jimharris $");
3376224Sobrien
3476224Sobrien#include <dev/isci/isci.h>
3587248Smarkm
3687248Smarkm#include <sys/conf.h>
3725984Sjdp#include <sys/malloc.h>
3825984Sjdp
3918962Ssos#include <cam/cam_periph.h>
4022499Sjoerg#include <cam/cam_xpt_periph.h>
4122499Sjoerg
4218962Ssos#include <dev/isci/scil/sci_memory_descriptor_list.h>
4359342Sobrien#include <dev/isci/scil/sci_memory_descriptor_list_decorator.h>
4459342Sobrien
4555377Swes#include <dev/isci/scil/scif_controller.h>
4687248Smarkm#include <dev/isci/scil/scif_library.h>
4718962Ssos#include <dev/isci/scil/scif_io_request.h>
4859342Sobrien#include <dev/isci/scil/scif_task_request.h>
4959342Sobrien#include <dev/isci/scil/scif_remote_device.h>
5059342Sobrien#include <dev/isci/scil/scif_domain.h>
5159342Sobrien#include <dev/isci/scil/scif_user_callback.h>
5259342Sobrien
5359342Sobrienvoid isci_action(struct cam_sim *sim, union ccb *ccb);
5459342Sobrienvoid isci_poll(struct cam_sim *sim);
5562313Sgreen
5662313Sgreen#define ccb_sim_ptr sim_priv.entries[0].ptr
5762313Sgreen
5859342Sobrien/**
5959342Sobrien * @brief This user callback will inform the user that the controller has
6022499Sjoerg *        had a serious unexpected error.  The user should not the error,
6118962Ssos *        disable interrupts, and wait for current ongoing processing to
6218962Ssos *        complete.  Subsequently, the user should reset the controller.
6318962Ssos *
6459342Sobrien * @param[in]  controller This parameter specifies the controller that had
6559342Sobrien *                        an error.
6622499Sjoerg *
6755377Swes * @return none
6818962Ssos */
6959342Sobrienvoid scif_cb_controller_error(SCI_CONTROLLER_HANDLE_T controller,
7018962Ssos    SCI_CONTROLLER_ERROR error)
7135364Seivind{
7259342Sobrien
7372093Sasmodai	isci_log_message(0, "ISCI", "scif_cb_controller_error: 0x%x\n",
7435364Seivind	    error);
7559342Sobrien}
7659342Sobrien
7759342Sobrien/**
7859342Sobrien * @brief This user callback will inform the user that the controller has
7959342Sobrien *        finished the start process.
8059342Sobrien *
8135364Seivind * @param[in]  controller This parameter specifies the controller that was
8255377Swes *             started.
8355377Swes * @param[in]  completion_status This parameter specifies the results of
8455377Swes *             the start operation.  SCI_SUCCESS indicates successful
8555377Swes *             completion.
8618962Ssos *
8718962Ssos * @return none
8818962Ssos */
8918962Ssosvoid scif_cb_controller_start_complete(SCI_CONTROLLER_HANDLE_T controller,
9059342Sobrien    SCI_STATUS completion_status)
9172093Sasmodai{
9218962Ssos	uint32_t index;
9359342Sobrien	struct ISCI_CONTROLLER *isci_controller = (struct ISCI_CONTROLLER *)
9418962Ssos	    sci_object_get_association(controller);
9518962Ssos
9618962Ssos	isci_controller->is_started = TRUE;
9718962Ssos
9818962Ssos	/* Set bits for all domains.  We will clear them one-by-one once
9918962Ssos	 *  the domains complete discovery, or return error when calling
10055377Swes	 *  scif_domain_discover.  Once all bits are clear, we will register
10155377Swes	 *  the controller with CAM.
10255377Swes	 */
10355377Swes	isci_controller->initial_discovery_mask = (1 << SCI_MAX_DOMAINS) - 1;
10455377Swes
10555377Swes	for(index = 0; index < SCI_MAX_DOMAINS; index++) {
10655377Swes		SCI_STATUS status;
10755377Swes		SCI_DOMAIN_HANDLE_T domain =
10835364Seivind		    isci_controller->domain[index].sci_object;
10959342Sobrien
11059342Sobrien		status = scif_domain_discover(
11155377Swes			domain,
11255377Swes			scif_domain_get_suggested_discover_timeout(domain),
11355377Swes			DEVICE_TIMEOUT
11435364Seivind		);
11518962Ssos
11618962Ssos		if (status != SCI_SUCCESS)
11725984Sjdp		{
11818962Ssos			isci_controller_domain_discovery_complete(
11959342Sobrien			    isci_controller, &isci_controller->domain[index]);
12028620Sjoerg		}
12122499Sjoerg	}
12218962Ssos}
12318962Ssos
12425984Sjdp/**
12526837Scharnier * @brief This user callback will inform the user that the controller has
12622499Sjoerg *        finished the stop process. Note, after user calls
12718962Ssos *        scif_controller_stop(), before user receives this controller stop
12818962Ssos *        complete callback, user should not expect any callback from
12918962Ssos *        framework, such like scif_cb_domain_change_notification().
13018962Ssos *
13126837Scharnier * @param[in]  controller This parameter specifies the controller that was
13222499Sjoerg *             stopped.
13318962Ssos * @param[in]  completion_status This parameter specifies the results of
13435364Seivind *             the stop operation.  SCI_SUCCESS indicates successful
13559342Sobrien *             completion.
13659342Sobrien *
13759342Sobrien * @return none
13859342Sobrien */
13959342Sobrienvoid scif_cb_controller_stop_complete(SCI_CONTROLLER_HANDLE_T controller,
14059342Sobrien    SCI_STATUS completion_status)
14159342Sobrien{
14259342Sobrien	struct ISCI_CONTROLLER *isci_controller = (struct ISCI_CONTROLLER *)
14359342Sobrien	    sci_object_get_association(controller);
14418962Ssos
14518962Ssos	isci_controller->is_started = FALSE;
14618962Ssos}
14759342Sobrien
14818962Ssos/**
14925984Sjdp * @brief This method will be invoked to allocate memory dynamically.
15059342Sobrien *
15126837Scharnier * @param[in]  controller This parameter represents the controller
15218962Ssos *             object for which to allocate memory.
15318962Ssos * @param[out] mde This parameter represents the memory descriptor to
15418962Ssos *             be filled in by the user that will reference the newly
15518962Ssos *             allocated memory.
15662375Simp *
15718962Ssos * @return none
15818962Ssos */
15918962Ssosvoid scif_cb_controller_allocate_memory(SCI_CONTROLLER_HANDLE_T controller,
16022499Sjoerg    SCI_PHYSICAL_MEMORY_DESCRIPTOR_T *mde)
16122499Sjoerg{
16218962Ssos
16318962Ssos}
16426837Scharnier
16587248Smarkm/**
16618962Ssos * @brief This method will be invoked to allocate memory dynamically.
167146466Sru *
168146466Sru * @param[in]  controller This parameter represents the controller
16922499Sjoerg *             object for which to allocate memory.
17018962Ssos * @param[out] mde This parameter represents the memory descriptor to
17135364Seivind *             be filled in by the user that will reference the newly
17259342Sobrien *             allocated memory.
17378627Sobrien *
17459342Sobrien * @return none
17578627Sobrien */
17655377Swesvoid scif_cb_controller_free_memory(SCI_CONTROLLER_HANDLE_T controller,
17759342Sobrien    SCI_PHYSICAL_MEMORY_DESCRIPTOR_T * mde)
17859342Sobrien{
17959342Sobrien
18078627Sobrien}
18159342Sobrien
18259342Sobrienvoid isci_controller_construct(struct ISCI_CONTROLLER *controller,
18359342Sobrien    struct isci_softc *isci)
18459342Sobrien{
18535366Seivind	SCI_CONTROLLER_HANDLE_T scif_controller_handle;
18659342Sobrien
18755377Swes	scif_library_allocate_controller(isci->sci_library_handle,
18878627Sobrien	    &scif_controller_handle);
18935364Seivind
19035364Seivind	scif_controller_construct(isci->sci_library_handle,
19135364Seivind	    scif_controller_handle, NULL);
19235364Seivind
193145807Sgreen	controller->isci = isci;
19459342Sobrien	controller->scif_controller_handle = scif_controller_handle;
19559342Sobrien
19635364Seivind	/* This allows us to later use
19755377Swes	 *  sci_object_get_association(scif_controller_handle)
19855377Swes	 * inside of a callback routine to get our struct ISCI_CONTROLLER object
19987248Smarkm	 */
20055377Swes	sci_object_set_association(scif_controller_handle, (void *)controller);
20178627Sobrien
20255377Swes	controller->is_started = FALSE;
20355377Swes	controller->is_frozen = FALSE;
20455377Swes	controller->sim = NULL;
20555377Swes	controller->initial_discovery_mask = 0;
20655377Swes
20759342Sobrien	sci_fast_list_init(&controller->pending_device_reset_list);
20859342Sobrien
20955377Swes	mtx_init(&controller->lock, "isci", NULL, MTX_DEF);
21055377Swes
211	uint32_t domain_index;
212
213	for(domain_index = 0; domain_index < SCI_MAX_DOMAINS; domain_index++) {
214		isci_domain_construct( &controller->domain[domain_index],
215		    domain_index, controller);
216	}
217
218	controller->timer_memory = malloc(
219	    sizeof(struct ISCI_TIMER) * SCI_MAX_TIMERS, M_ISCI,
220	    M_NOWAIT | M_ZERO);
221
222	sci_pool_initialize(controller->timer_pool);
223
224	struct ISCI_TIMER *timer = (struct ISCI_TIMER *)
225	    controller->timer_memory;
226
227	for ( int i = 0; i < SCI_MAX_TIMERS; i++ ) {
228		sci_pool_put(controller->timer_pool, timer++);
229	}
230}
231
232SCI_STATUS isci_controller_initialize(struct ISCI_CONTROLLER *controller)
233{
234	SCIC_USER_PARAMETERS_T scic_user_parameters;
235	SCI_CONTROLLER_HANDLE_T scic_controller_handle;
236	unsigned long tunable;
237	int i;
238
239	scic_controller_handle =
240	    scif_controller_get_scic_handle(controller->scif_controller_handle);
241
242	if (controller->isci->oem_parameters_found == TRUE)
243	{
244		scic_oem_parameters_set(
245		    scic_controller_handle,
246		    &controller->oem_parameters,
247		    (uint8_t)(controller->oem_parameters_version));
248	}
249
250	scic_user_parameters_get(scic_controller_handle, &scic_user_parameters);
251
252	if (TUNABLE_ULONG_FETCH("hw.isci.no_outbound_task_timeout", &tunable))
253		scic_user_parameters.sds1.no_outbound_task_timeout =
254		    (uint8_t)tunable;
255
256	if (TUNABLE_ULONG_FETCH("hw.isci.ssp_max_occupancy_timeout", &tunable))
257		scic_user_parameters.sds1.ssp_max_occupancy_timeout =
258		    (uint16_t)tunable;
259
260	if (TUNABLE_ULONG_FETCH("hw.isci.stp_max_occupancy_timeout", &tunable))
261		scic_user_parameters.sds1.stp_max_occupancy_timeout =
262		    (uint16_t)tunable;
263
264	if (TUNABLE_ULONG_FETCH("hw.isci.ssp_inactivity_timeout", &tunable))
265		scic_user_parameters.sds1.ssp_inactivity_timeout =
266		    (uint16_t)tunable;
267
268	if (TUNABLE_ULONG_FETCH("hw.isci.stp_inactivity_timeout", &tunable))
269		scic_user_parameters.sds1.stp_inactivity_timeout =
270		    (uint16_t)tunable;
271
272	if (TUNABLE_ULONG_FETCH("hw.isci.max_speed_generation", &tunable))
273		for (i = 0; i < SCI_MAX_PHYS; i++)
274			scic_user_parameters.sds1.phys[i].max_speed_generation =
275			    (uint8_t)tunable;
276
277	scic_user_parameters_set(scic_controller_handle, &scic_user_parameters);
278
279	/* Scheduler bug in SCU requires SCIL to reserve some task contexts as a
280	 *  a workaround - one per domain.
281	 */
282	controller->queue_depth = SCI_MAX_IO_REQUESTS - SCI_MAX_DOMAINS;
283
284	if (TUNABLE_INT_FETCH("hw.isci.controller_queue_depth",
285	    &controller->queue_depth)) {
286		controller->queue_depth = max(1, min(controller->queue_depth,
287		    SCI_MAX_IO_REQUESTS - SCI_MAX_DOMAINS));
288	}
289
290	/* Reserve one request so that we can ensure we have one available TC
291	 *  to do internal device resets.
292	 */
293	controller->sim_queue_depth = controller->queue_depth - 1;
294
295	/* Although we save one TC to do internal device resets, it is possible
296	 *  we could end up using several TCs for simultaneous device resets
297	 *  while at the same time having CAM fill our controller queue.  To
298	 *  simulate this condition, and how our driver handles it, we can set
299	 *  this io_shortage parameter, which will tell CAM that we have a
300	 *  large queue depth than we really do.
301	 */
302	uint32_t io_shortage = 0;
303	TUNABLE_INT_FETCH("hw.isci.io_shortage", &io_shortage);
304	controller->sim_queue_depth += io_shortage;
305
306	/* Attach to CAM using xpt_bus_register now, then immediately freeze
307	 *  the simq.  It will get released later when initial domain discovery
308	 *  is complete.
309	 */
310	controller->has_been_scanned = FALSE;
311	mtx_lock(&controller->lock);
312	isci_controller_attach_to_cam(controller);
313	xpt_freeze_simq(controller->sim, 1);
314	mtx_unlock(&controller->lock);
315
316	return (scif_controller_initialize(controller->scif_controller_handle));
317}
318
319int isci_controller_allocate_memory(struct ISCI_CONTROLLER *controller)
320{
321	int error;
322	device_t device =  controller->isci->device;
323	uint32_t max_segment_size = isci_io_request_get_max_io_size();
324	uint32_t status = 0;
325	struct ISCI_MEMORY *uncached_controller_memory =
326	    &controller->uncached_controller_memory;
327	struct ISCI_MEMORY *cached_controller_memory =
328	    &controller->cached_controller_memory;
329	struct ISCI_MEMORY *request_memory =
330	    &controller->request_memory;
331	POINTER_UINT virtual_address;
332	bus_addr_t physical_address;
333
334	controller->mdl = sci_controller_get_memory_descriptor_list_handle(
335	    controller->scif_controller_handle);
336
337	uncached_controller_memory->size = sci_mdl_decorator_get_memory_size(
338	    controller->mdl, SCI_MDE_ATTRIBUTE_PHYSICALLY_CONTIGUOUS);
339
340	error = isci_allocate_dma_buffer(device, uncached_controller_memory);
341
342	if (error != 0)
343	    return (error);
344
345	sci_mdl_decorator_assign_memory( controller->mdl,
346	    SCI_MDE_ATTRIBUTE_PHYSICALLY_CONTIGUOUS,
347	    uncached_controller_memory->virtual_address,
348	    uncached_controller_memory->physical_address);
349
350	cached_controller_memory->size = sci_mdl_decorator_get_memory_size(
351	    controller->mdl,
352	    SCI_MDE_ATTRIBUTE_CACHEABLE | SCI_MDE_ATTRIBUTE_PHYSICALLY_CONTIGUOUS
353	);
354
355	error = isci_allocate_dma_buffer(device, cached_controller_memory);
356
357	if (error != 0)
358	    return (error);
359
360	sci_mdl_decorator_assign_memory(controller->mdl,
361	    SCI_MDE_ATTRIBUTE_CACHEABLE | SCI_MDE_ATTRIBUTE_PHYSICALLY_CONTIGUOUS,
362	    cached_controller_memory->virtual_address,
363	    cached_controller_memory->physical_address);
364
365	request_memory->size =
366	    controller->queue_depth * isci_io_request_get_object_size();
367
368	error = isci_allocate_dma_buffer(device, request_memory);
369
370	if (error != 0)
371	    return (error);
372
373	/* For STP PIO testing, we want to ensure we can force multiple SGLs
374	 *  since this has been a problem area in SCIL.  This tunable parameter
375	 *  will allow us to force DMA segments to a smaller size, ensuring
376	 *  that even if a physically contiguous buffer is attached to this
377	 *  I/O, the DMA subsystem will pass us multiple segments in our DMA
378	 *  load callback.
379	 */
380	TUNABLE_INT_FETCH("hw.isci.max_segment_size", &max_segment_size);
381
382	/* Create DMA tag for our I/O requests.  Then we can create DMA maps based off
383	 *  of this tag and store them in each of our ISCI_IO_REQUEST objects.  This
384	 *  will enable better performance than creating the DMA maps everytime we get
385	 *  an I/O.
386	 */
387	status = bus_dma_tag_create(bus_get_dma_tag(device), 0x1, 0x0,
388	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
389	    isci_io_request_get_max_io_size(),
390	    SCI_MAX_SCATTER_GATHER_ELEMENTS, max_segment_size, 0, NULL, NULL,
391	    &controller->buffer_dma_tag);
392
393	sci_pool_initialize(controller->request_pool);
394
395	virtual_address = request_memory->virtual_address;
396	physical_address = request_memory->physical_address;
397
398	for (int i = 0; i < controller->queue_depth; i++) {
399		struct ISCI_REQUEST *request =
400		    (struct ISCI_REQUEST *)virtual_address;
401
402		isci_request_construct(request,
403		    controller->scif_controller_handle,
404		    controller->buffer_dma_tag, physical_address);
405
406		sci_pool_put(controller->request_pool, request);
407
408		virtual_address += isci_request_get_object_size();
409		physical_address += isci_request_get_object_size();
410	}
411
412	uint32_t remote_device_size = sizeof(struct ISCI_REMOTE_DEVICE) +
413	    scif_remote_device_get_object_size();
414
415	controller->remote_device_memory = (uint8_t *) malloc(
416	    remote_device_size * SCI_MAX_REMOTE_DEVICES, M_ISCI,
417	    M_NOWAIT | M_ZERO);
418
419	sci_pool_initialize(controller->remote_device_pool);
420
421	uint8_t *remote_device_memory_ptr = controller->remote_device_memory;
422
423	for (int i = 0; i < SCI_MAX_REMOTE_DEVICES; i++) {
424		struct ISCI_REMOTE_DEVICE *remote_device =
425		    (struct ISCI_REMOTE_DEVICE *)remote_device_memory_ptr;
426
427		controller->remote_device[i] = NULL;
428		remote_device->index = i;
429		remote_device->is_resetting = FALSE;
430		remote_device->frozen_lun_mask = 0;
431		sci_fast_list_element_init(remote_device,
432		    &remote_device->pending_device_reset_element);
433
434		/*
435		 * For the first SCI_MAX_DOMAINS device objects, do not put
436		 *  them in the pool, rather assign them to each domain.  This
437		 *  ensures that any device attached directly to port "i" will
438		 *  always get CAM target id "i".
439		 */
440		if (i < SCI_MAX_DOMAINS)
441			controller->domain[i].da_remote_device = remote_device;
442		else
443			sci_pool_put(controller->remote_device_pool,
444			    remote_device);
445		remote_device_memory_ptr += remote_device_size;
446	}
447
448	return (0);
449}
450
451void isci_controller_start(void *controller_handle)
452{
453	struct ISCI_CONTROLLER *controller =
454	    (struct ISCI_CONTROLLER *)controller_handle;
455	SCI_CONTROLLER_HANDLE_T scif_controller_handle =
456	    controller->scif_controller_handle;
457
458	scif_controller_start(scif_controller_handle,
459	    scif_controller_get_suggested_start_timeout(scif_controller_handle));
460
461	scic_controller_enable_interrupts(
462	    scif_controller_get_scic_handle(controller->scif_controller_handle));
463}
464
465void isci_controller_domain_discovery_complete(
466    struct ISCI_CONTROLLER *isci_controller, struct ISCI_DOMAIN *isci_domain)
467{
468	if (!isci_controller->has_been_scanned)
469	{
470		/* Controller has not been scanned yet.  We'll clear
471		 *  the discovery bit for this domain, then check if all bits
472		 *  are now clear.  That would indicate that all domains are
473		 *  done with discovery and we can then proceed with initial
474		 *  scan.
475		 */
476
477		isci_controller->initial_discovery_mask &=
478		    ~(1 << isci_domain->index);
479
480		if (isci_controller->initial_discovery_mask == 0) {
481			struct isci_softc *driver = isci_controller->isci;
482			uint8_t next_index = isci_controller->index + 1;
483
484			isci_controller->has_been_scanned = TRUE;
485
486			/* Unfreeze simq to allow initial scan to proceed. */
487			xpt_release_simq(isci_controller->sim, TRUE);
488
489#if __FreeBSD_version < 800000
490			/* When driver is loaded after boot, we need to
491			 *  explicitly rescan here for versions <8.0, because
492			 *  CAM only automatically scans new buses at boot
493			 *  time.
494			 */
495			union ccb *ccb = xpt_alloc_ccb_nowait();
496
497			xpt_create_path(&ccb->ccb_h.path, xpt_periph,
498			    cam_sim_path(isci_controller->sim),
499			    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
500
501			xpt_rescan(ccb);
502#endif
503
504			if (next_index < driver->controller_count) {
505				/*  There are more controllers that need to
506				 *   start.  So start the next one.
507				 */
508				isci_controller_start(
509				    &driver->controllers[next_index]);
510			}
511			else
512			{
513				/* All controllers have been started and completed discovery.
514				 *  Disestablish the config hook while will signal to the
515				 *  kernel during boot that it is safe to try to find and
516				 *  mount the root partition.
517				 */
518				config_intrhook_disestablish(
519				    &driver->config_hook);
520			}
521		}
522	}
523}
524
525int isci_controller_attach_to_cam(struct ISCI_CONTROLLER *controller)
526{
527	struct isci_softc *isci = controller->isci;
528	device_t parent = device_get_parent(isci->device);
529	int unit = device_get_unit(isci->device);
530	struct cam_devq *isci_devq = cam_simq_alloc(controller->sim_queue_depth);
531
532	if(isci_devq == NULL) {
533		isci_log_message(0, "ISCI", "isci_devq is NULL \n");
534		return (-1);
535	}
536
537	controller->sim = cam_sim_alloc(isci_action, isci_poll, "isci",
538	    controller, unit, &controller->lock, controller->sim_queue_depth,
539	    controller->sim_queue_depth, isci_devq);
540
541	if(controller->sim == NULL) {
542		isci_log_message(0, "ISCI", "cam_sim_alloc... fails\n");
543		cam_simq_free(isci_devq);
544		return (-1);
545	}
546
547	if(xpt_bus_register(controller->sim, parent, controller->index)
548	    != CAM_SUCCESS) {
549		isci_log_message(0, "ISCI", "xpt_bus_register...fails \n");
550		cam_sim_free(controller->sim, TRUE);
551		mtx_unlock(&controller->lock);
552		return (-1);
553	}
554
555	if(xpt_create_path(&controller->path, NULL,
556	    cam_sim_path(controller->sim), CAM_TARGET_WILDCARD,
557	    CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
558		isci_log_message(0, "ISCI", "xpt_create_path....fails\n");
559		xpt_bus_deregister(cam_sim_path(controller->sim));
560		cam_sim_free(controller->sim, TRUE);
561		mtx_unlock(&controller->lock);
562		return (-1);
563	}
564
565	return (0);
566}
567
568void isci_poll(struct cam_sim *sim)
569{
570	struct ISCI_CONTROLLER *controller =
571	    (struct ISCI_CONTROLLER *)cam_sim_softc(sim);
572
573	isci_interrupt_poll_handler(controller);
574}
575
576void isci_action(struct cam_sim *sim, union ccb *ccb)
577{
578	struct ISCI_CONTROLLER *controller =
579	    (struct ISCI_CONTROLLER *)cam_sim_softc(sim);
580
581	switch ( ccb->ccb_h.func_code ) {
582	case XPT_PATH_INQ:
583		{
584			struct ccb_pathinq *cpi = &ccb->cpi;
585			int bus = cam_sim_bus(sim);
586			ccb->ccb_h.ccb_sim_ptr = sim;
587			cpi->version_num = 1;
588			cpi->hba_inquiry = PI_TAG_ABLE;
589			cpi->target_sprt = 0;
590			cpi->hba_misc = PIM_NOBUSRESET | PIM_SEQSCAN;
591			cpi->hba_eng_cnt = 0;
592			cpi->max_target = SCI_MAX_REMOTE_DEVICES - 1;
593			cpi->max_lun = ISCI_MAX_LUN;
594#if __FreeBSD_version >= 800102
595			cpi->maxio = isci_io_request_get_max_io_size();
596#endif
597			cpi->unit_number = cam_sim_unit(sim);
598			cpi->bus_id = bus;
599			cpi->initiator_id = SCI_MAX_REMOTE_DEVICES;
600			cpi->base_transfer_speed = 300000;
601			strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
602			strncpy(cpi->hba_vid, "Intel Corp.", HBA_IDLEN);
603			strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
604			cpi->transport = XPORT_SAS;
605			cpi->transport_version = 0;
606			cpi->protocol = PROTO_SCSI;
607			cpi->protocol_version = SCSI_REV_SPC2;
608			cpi->ccb_h.status = CAM_REQ_CMP;
609			xpt_done(ccb);
610		}
611		break;
612	case XPT_GET_TRAN_SETTINGS:
613		{
614			struct ccb_trans_settings *general_settings = &ccb->cts;
615			struct ccb_trans_settings_sas *sas_settings =
616			    &general_settings->xport_specific.sas;
617			struct ccb_trans_settings_scsi *scsi_settings =
618			    &general_settings->proto_specific.scsi;
619			struct ISCI_REMOTE_DEVICE *remote_device;
620
621			remote_device = controller->remote_device[ccb->ccb_h.target_id];
622
623			if (remote_device == NULL) {
624				ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
625				ccb->ccb_h.status &= ~CAM_STATUS_MASK;
626				ccb->ccb_h.status |= CAM_DEV_NOT_THERE;
627				xpt_done(ccb);
628				break;
629			}
630
631			general_settings->protocol = PROTO_SCSI;
632			general_settings->transport = XPORT_SAS;
633			general_settings->protocol_version = SCSI_REV_SPC2;
634			general_settings->transport_version = 0;
635			scsi_settings->valid = CTS_SCSI_VALID_TQ;
636			scsi_settings->flags = CTS_SCSI_FLAGS_TAG_ENB;
637			ccb->ccb_h.status &= ~CAM_STATUS_MASK;
638			ccb->ccb_h.status |= CAM_REQ_CMP;
639
640			sas_settings->bitrate =
641			    isci_remote_device_get_bitrate(remote_device);
642
643			if (sas_settings->bitrate != 0)
644				sas_settings->valid = CTS_SAS_VALID_SPEED;
645
646			xpt_done(ccb);
647		}
648		break;
649	case XPT_SCSI_IO:
650		isci_io_request_execute_scsi_io(ccb, controller);
651		break;
652#if __FreeBSD_version >= 900026
653	case XPT_SMP_IO:
654		isci_io_request_execute_smp_io(ccb, controller);
655		break;
656#endif
657	case XPT_SET_TRAN_SETTINGS:
658		ccb->ccb_h.status &= ~CAM_STATUS_MASK;
659		ccb->ccb_h.status |= CAM_REQ_CMP;
660		xpt_done(ccb);
661		break;
662	case XPT_CALC_GEOMETRY:
663		cam_calc_geometry(&ccb->ccg, /*extended*/1);
664		xpt_done(ccb);
665		break;
666	case XPT_RESET_DEV:
667		{
668			struct ISCI_REMOTE_DEVICE *remote_device =
669			    controller->remote_device[ccb->ccb_h.target_id];
670
671			if (remote_device != NULL)
672				isci_remote_device_reset(remote_device, ccb);
673			else {
674				ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
675				ccb->ccb_h.status &= ~CAM_STATUS_MASK;
676				ccb->ccb_h.status |= CAM_DEV_NOT_THERE;
677				xpt_done(ccb);
678			}
679		}
680		break;
681	case XPT_RESET_BUS:
682		ccb->ccb_h.status = CAM_REQ_CMP;
683		xpt_done(ccb);
684		break;
685	default:
686		isci_log_message(0, "ISCI", "Unhandled func_code 0x%x\n",
687		    ccb->ccb_h.func_code);
688		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
689		ccb->ccb_h.status &= ~CAM_STATUS_MASK;
690		ccb->ccb_h.status |= CAM_REQ_INVALID;
691		xpt_done(ccb);
692		break;
693	}
694}
695
696