1/*
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2020-2024, Broadcom Inc. All rights reserved.
5 * Support: <fbsd-storage-driver.pdl@broadcom.com>
6 *
7 * Authors: Sumit Saxena <sumit.saxena@broadcom.com>
8 *	    Chandrakanth Patil <chandrakanth.patil@broadcom.com>
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions are
12 * met:
13 *
14 * 1. Redistributions of source code must retain the above copyright notice,
15 *    this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright notice,
17 *    this list of conditions and the following disclaimer in the documentation and/or other
18 *    materials provided with the distribution.
19 * 3. Neither the name of the Broadcom Inc. nor the names of its contributors
20 *    may be used to endorse or promote products derived from this software without
21 *    specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
27 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
30 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
31 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
32 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
33 * POSSIBILITY OF SUCH DAMAGE.
34 *
35 * The views and conclusions contained in the software and documentation are
36 * those of the authors and should not be interpreted as representing
37 * official policies,either expressed or implied, of the FreeBSD Project.
38 *
39 * Mail to: Broadcom Inc 1320 Ridder Park Dr, San Jose, CA 95131
40 *
41 * Broadcom Inc. (Broadcom) MPI3MR Adapter FreeBSD
42 */
43
44#include <sys/types.h>
45#include <sys/param.h>
46#include <sys/systm.h>
47#include <sys/kernel.h>
48#include <sys/selinfo.h>
49#include <sys/module.h>
50#include <sys/bus.h>
51#include <sys/conf.h>
52#include <sys/bio.h>
53#include <sys/malloc.h>
54#include <sys/uio.h>
55#include <sys/sysctl.h>
56#include <sys/endian.h>
57#include <sys/queue.h>
58#include <sys/kthread.h>
59#include <sys/taskqueue.h>
60#include <sys/sbuf.h>
61
62#include <machine/bus.h>
63#include <machine/resource.h>
64#include <sys/rman.h>
65
66#include <machine/stdarg.h>
67
68#include <cam/cam.h>
69#include <cam/cam_ccb.h>
70#include <cam/cam_debug.h>
71#include <cam/cam_sim.h>
72#include <cam/cam_xpt_sim.h>
73#include <cam/cam_xpt_periph.h>
74#include <cam/cam_periph.h>
75#include <cam/scsi/scsi_all.h>
76#include <cam/scsi/scsi_message.h>
77#include <cam/scsi/smp_all.h>
78
79#include <dev/nvme/nvme.h>
80#include "mpi/mpi30_api.h"
81#include "mpi3mr_cam.h"
82#include "mpi3mr.h"
83#include <sys/time.h>			/* XXX for pcpu.h */
84#include <sys/pcpu.h>			/* XXX for PCPU_GET */
85#include <asm/unaligned.h>
86
87#define	smp_processor_id()  PCPU_GET(cpuid)
88
89static void
90mpi3mr_enqueue_request(struct mpi3mr_softc *sc, struct mpi3mr_cmd *cm);
91static void
92mpi3mr_map_request(struct mpi3mr_softc *sc, struct mpi3mr_cmd *cm);
93void
94mpi3mr_release_simq_reinit(struct mpi3mr_cam_softc *cam_sc);
95static void
96mpi3mr_freeup_events(struct mpi3mr_softc *sc);
97
98extern int
99mpi3mr_register_events(struct mpi3mr_softc *sc);
100extern void mpi3mr_add_sg_single(void *paddr, U8 flags, U32 length,
101    bus_addr_t dma_addr);
102
103static U32 event_count;
104
105static
106inline void mpi3mr_divert_ws(Mpi3SCSIIORequest_t *req,
107			     struct ccb_scsiio *csio,
108			     U16 ws_len)
109{
110	U8 unmap = 0, ndob = 0;
111	U32 num_blocks = 0;
112	U8 opcode = scsiio_cdb_ptr(csio)[0];
113	U16 service_action = ((scsiio_cdb_ptr(csio)[8] << 8) | scsiio_cdb_ptr(csio)[9]);
114
115
116	if (opcode == WRITE_SAME_16 ||
117	   (opcode == VARIABLE_LEN_CDB &&
118	    service_action == WRITE_SAME_32)) {
119
120		int unmap_ndob_index = (opcode == WRITE_SAME_16) ? 1 : 10;
121
122		unmap = scsiio_cdb_ptr(csio)[unmap_ndob_index] & 0x08;
123		ndob = scsiio_cdb_ptr(csio)[unmap_ndob_index] & 0x01;
124		num_blocks = get_unaligned_be32(scsiio_cdb_ptr(csio) +
125						((opcode == WRITE_SAME_16) ? 10 : 28));
126
127		/* Check conditions for diversion to firmware */
128		if (unmap && ndob && num_blocks > ws_len) {
129			req->MsgFlags |= MPI3_SCSIIO_MSGFLAGS_DIVERT_TO_FIRMWARE;
130			req->Flags = htole32(le32toh(req->Flags) |
131					     MPI3_SCSIIO_FLAGS_DIVERT_REASON_WRITE_SAME_TOO_LARGE);
132		}
133	}
134}
135
136static void mpi3mr_prepare_sgls(void *arg,
137	bus_dma_segment_t *segs, int nsegs, int error)
138{
139	struct mpi3mr_softc *sc;
140	struct mpi3mr_cmd *cm;
141	u_int i;
142	bus_addr_t chain_dma;
143	void *chain;
144	U8 *sg_local;
145	U32 chain_length;
146	int sges_left;
147	U32 sges_in_segment;
148	U8 simple_sgl_flags;
149	U8 simple_sgl_flags_last;
150	U8 last_chain_sgl_flags;
151	struct mpi3mr_chain *chain_req;
152	Mpi3SCSIIORequest_t *scsiio_req;
153	union ccb *ccb;
154
155	cm = (struct mpi3mr_cmd *)arg;
156	sc = cm->sc;
157	scsiio_req = (Mpi3SCSIIORequest_t *) &cm->io_request;
158	ccb = cm->ccb;
159
160	if (error) {
161		device_printf(sc->mpi3mr_dev, "%s: error=%d\n",__func__, error);
162		if (error == EFBIG) {
163			mpi3mr_set_ccbstatus(ccb, CAM_REQ_TOO_BIG);
164		} else {
165			mpi3mr_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
166		}
167		mpi3mr_release_command(cm);
168		xpt_done(ccb);
169		return;
170	}
171
172	if (cm->data_dir == MPI3MR_READ)
173		bus_dmamap_sync(sc->buffer_dmat, cm->dmamap,
174		    BUS_DMASYNC_PREREAD);
175	if (cm->data_dir == MPI3MR_WRITE)
176		bus_dmamap_sync(sc->buffer_dmat, cm->dmamap,
177		    BUS_DMASYNC_PREWRITE);
178
179	KASSERT(nsegs <= MPI3MR_SG_DEPTH && nsegs > 0,
180	    ("%s: bad SGE count: %d\n", device_get_nameunit(sc->mpi3mr_dev), nsegs));
181	KASSERT(scsiio_req->DataLength != 0,
182	    ("%s: Data segments (%d), but DataLength == 0\n",
183		device_get_nameunit(sc->mpi3mr_dev), nsegs));
184
185	simple_sgl_flags = MPI3_SGE_FLAGS_ELEMENT_TYPE_SIMPLE |
186	    MPI3_SGE_FLAGS_DLAS_SYSTEM;
187	simple_sgl_flags_last = simple_sgl_flags |
188	    MPI3_SGE_FLAGS_END_OF_LIST;
189	last_chain_sgl_flags = MPI3_SGE_FLAGS_ELEMENT_TYPE_LAST_CHAIN |
190	    MPI3_SGE_FLAGS_DLAS_SYSTEM;
191
192	sg_local = (U8 *)&scsiio_req->SGL;
193
194	sges_left = nsegs;
195
196	sges_in_segment = (sc->facts.op_req_sz -
197	    offsetof(Mpi3SCSIIORequest_t, SGL))/sizeof(Mpi3SGESimple_t);
198
199	i = 0;
200
201	mpi3mr_dprint(sc, MPI3MR_TRACE, "SGE count: %d IO size: %d\n",
202		nsegs, scsiio_req->DataLength);
203
204	if (sges_left <= sges_in_segment)
205		goto fill_in_last_segment;
206
207	/* fill in main message segment when there is a chain following */
208	while (sges_in_segment > 1) {
209		mpi3mr_add_sg_single(sg_local, simple_sgl_flags,
210		    segs[i].ds_len, segs[i].ds_addr);
211		sg_local += sizeof(Mpi3SGESimple_t);
212		sges_left--;
213		sges_in_segment--;
214		i++;
215	}
216
217	chain_req = &sc->chain_sgl_list[cm->hosttag];
218
219	chain = chain_req->buf;
220	chain_dma = chain_req->buf_phys;
221	memset(chain_req->buf, 0, PAGE_SIZE);
222	sges_in_segment = sges_left;
223	chain_length = sges_in_segment * sizeof(Mpi3SGESimple_t);
224
225	mpi3mr_add_sg_single(sg_local, last_chain_sgl_flags,
226	    chain_length, chain_dma);
227
228	sg_local = chain;
229
230fill_in_last_segment:
231	while (sges_left > 0) {
232		if (sges_left == 1)
233			mpi3mr_add_sg_single(sg_local,
234			    simple_sgl_flags_last, segs[i].ds_len,
235			    segs[i].ds_addr);
236		else
237			mpi3mr_add_sg_single(sg_local, simple_sgl_flags,
238			    segs[i].ds_len, segs[i].ds_addr);
239		sg_local += sizeof(Mpi3SGESimple_t);
240		sges_left--;
241		i++;
242	}
243
244	/*
245	 * Now that we've created the sgls, we send the request to the device.
246	 * Unlike in Linux, dmaload isn't guaranteed to load every time, but
247	 * this function is always called when the resources are available, so
248	 * we can send the request to hardware here always. mpi3mr_map_request
249	 * knows about this quirk and will only take evasive action when an
250	 * error other than EINPROGRESS is returned from dmaload.
251	 */
252	mpi3mr_enqueue_request(sc, cm);
253
254	return;
255}
256
257static void
258mpi3mr_map_request(struct mpi3mr_softc *sc, struct mpi3mr_cmd *cm)
259{
260	u_int32_t retcode = 0;
261	union ccb *ccb;
262
263	ccb = cm->ccb;
264	if (cm->data != NULL) {
265		mtx_lock(&sc->io_lock);
266		/* Map data buffer into bus space */
267		retcode = bus_dmamap_load_ccb(sc->buffer_dmat, cm->dmamap,
268		    ccb, mpi3mr_prepare_sgls, cm, 0);
269		mtx_unlock(&sc->io_lock);
270		if (retcode != 0 && retcode != EINPROGRESS) {
271			device_printf(sc->mpi3mr_dev,
272			    "bus_dmamap_load(): retcode = %d\n", retcode);
273			/*
274			 * Any other error means prepare_sgls wasn't called, and
275			 * will never be called, so we have to mop up. This error
276			 * should never happen, though.
277			 */
278			mpi3mr_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
279			mpi3mr_release_command(cm);
280			xpt_done(ccb);
281		}
282	} else {
283		/*
284		 * No data, we enqueue it directly here.
285		 */
286		mpi3mr_enqueue_request(sc, cm);
287	}
288}
289
290void
291mpi3mr_unmap_request(struct mpi3mr_softc *sc, struct mpi3mr_cmd *cmd)
292{
293	if (cmd->data != NULL) {
294		if (cmd->data_dir == MPI3MR_READ)
295			bus_dmamap_sync(sc->buffer_dmat, cmd->dmamap, BUS_DMASYNC_POSTREAD);
296		if (cmd->data_dir == MPI3MR_WRITE)
297			bus_dmamap_sync(sc->buffer_dmat, cmd->dmamap, BUS_DMASYNC_POSTWRITE);
298		mtx_lock(&sc->io_lock);
299		bus_dmamap_unload(sc->buffer_dmat, cmd->dmamap);
300		mtx_unlock(&sc->io_lock);
301	}
302}
303
304/**
305 * mpi3mr_allow_unmap_to_fw - Whether an unmap is allowed to fw
306 * @sc: Adapter instance reference
307 * @ccb: SCSI Command reference
308 *
309 * The controller hardware cannot handle certain unmap commands
310 * for NVMe drives, this routine checks those and return true
311 * and completes the SCSI command with proper status and sense
312 * data.
313 *
314 * Return: TRUE for allowed unmap, FALSE otherwise.
315 */
316static bool mpi3mr_allow_unmap_to_fw(struct mpi3mr_softc *sc,
317	union ccb *ccb)
318{
319	struct ccb_scsiio *csio;
320	uint16_t param_list_len, block_desc_len, trunc_param_len = 0;
321
322	csio = &ccb->csio;
323	param_list_len = (uint16_t) ((scsiio_cdb_ptr(csio)[7] << 8) | scsiio_cdb_ptr(csio)[8]);
324
325	switch(pci_get_revid(sc->mpi3mr_dev)) {
326	case SAS4116_CHIP_REV_A0:
327		if (!param_list_len) {
328			mpi3mr_dprint(sc, MPI3MR_ERROR,
329			    "%s: CDB received with zero parameter length\n",
330			    __func__);
331			mpi3mr_print_cdb(ccb);
332			mpi3mr_set_ccbstatus(ccb, CAM_REQ_CMP);
333			xpt_done(ccb);
334			return false;
335		}
336
337		if (param_list_len < 24) {
338			mpi3mr_dprint(sc, MPI3MR_ERROR,
339			    "%s: CDB received with invalid param_list_len: %d\n",
340			    __func__, param_list_len);
341			mpi3mr_print_cdb(ccb);
342			scsi_set_sense_data(&ccb->csio.sense_data,
343				/*sense_format*/ SSD_TYPE_FIXED,
344				/*current_error*/ 1,
345				/*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
346				/*asc*/ 0x1A,
347				/*ascq*/ 0x00,
348				/*extra args*/ SSD_ELEM_NONE);
349			ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
350			ccb->ccb_h.status =
351			    CAM_SCSI_STATUS_ERROR |
352			    CAM_AUTOSNS_VALID;
353			return false;
354		}
355
356		if (param_list_len != csio->dxfer_len) {
357			mpi3mr_dprint(sc, MPI3MR_ERROR,
358			    "%s: CDB received with param_list_len: %d bufflen: %d\n",
359			    __func__, param_list_len, csio->dxfer_len);
360			mpi3mr_print_cdb(ccb);
361			scsi_set_sense_data(&ccb->csio.sense_data,
362				/*sense_format*/ SSD_TYPE_FIXED,
363				/*current_error*/ 1,
364				/*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
365				/*asc*/ 0x1A,
366				/*ascq*/ 0x00,
367				/*extra args*/ SSD_ELEM_NONE);
368			ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
369			ccb->ccb_h.status =
370			    CAM_SCSI_STATUS_ERROR |
371			    CAM_AUTOSNS_VALID;
372			xpt_done(ccb);
373			return false;
374		}
375
376		block_desc_len = (uint16_t) (csio->data_ptr[2] << 8 | csio->data_ptr[3]);
377
378		if (block_desc_len < 16) {
379			mpi3mr_dprint(sc, MPI3MR_ERROR,
380			    "%s: Invalid descriptor length in param list: %d\n",
381			    __func__, block_desc_len);
382			mpi3mr_print_cdb(ccb);
383			scsi_set_sense_data(&ccb->csio.sense_data,
384				/*sense_format*/ SSD_TYPE_FIXED,
385				/*current_error*/ 1,
386				/*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
387				/*asc*/ 0x26,
388				/*ascq*/ 0x00,
389				/*extra args*/ SSD_ELEM_NONE);
390			ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
391			ccb->ccb_h.status =
392			    CAM_SCSI_STATUS_ERROR |
393			    CAM_AUTOSNS_VALID;
394			xpt_done(ccb);
395			return false;
396		}
397
398		if (param_list_len > (block_desc_len + 8)) {
399			mpi3mr_print_cdb(ccb);
400			mpi3mr_dprint(sc, MPI3MR_INFO,
401			    "%s: Truncating param_list_len(%d) to block_desc_len+8(%d)\n",
402			    __func__, param_list_len, (block_desc_len + 8));
403			param_list_len = block_desc_len + 8;
404			scsiio_cdb_ptr(csio)[7] = (param_list_len >> 8) | 0xff;
405			scsiio_cdb_ptr(csio)[8] = param_list_len | 0xff;
406			mpi3mr_print_cdb(ccb);
407		}
408		break;
409
410	case SAS4116_CHIP_REV_B0:
411		if ((param_list_len > 24) && ((param_list_len - 8) & 0xF)) {
412			trunc_param_len -= (param_list_len - 8) & 0xF;
413			mpi3mr_print_cdb(ccb);
414			mpi3mr_dprint(sc, MPI3MR_INFO,
415			    "%s: Truncating param_list_len from (%d) to (%d)\n",
416			    __func__, param_list_len, trunc_param_len);
417			scsiio_cdb_ptr(csio)[7] = (param_list_len >> 8) | 0xff;
418			scsiio_cdb_ptr(csio)[8] = param_list_len | 0xff;
419			mpi3mr_print_cdb(ccb);
420		}
421		break;
422	}
423
424	return true;
425}
426
427/**
428 * mpi3mr_tm_response_name -  get TM response as a string
429 * @resp_code: TM response code
430 *
431 * Convert known task management response code as a readable
432 * string.
433 *
434 * Return: response code string.
435 */
436static const char* mpi3mr_tm_response_name(U8 resp_code)
437{
438	char *desc;
439
440	switch (resp_code) {
441	case MPI3_SCSITASKMGMT_RSPCODE_TM_COMPLETE:
442		desc = "task management request completed";
443		break;
444	case MPI3_SCSITASKMGMT_RSPCODE_INVALID_FRAME:
445		desc = "invalid frame";
446		break;
447	case MPI3_SCSITASKMGMT_RSPCODE_TM_FUNCTION_NOT_SUPPORTED:
448		desc = "task management request not supported";
449		break;
450	case MPI3_SCSITASKMGMT_RSPCODE_TM_FAILED:
451		desc = "task management request failed";
452		break;
453	case MPI3_SCSITASKMGMT_RSPCODE_TM_SUCCEEDED:
454		desc = "task management request succeeded";
455		break;
456	case MPI3_SCSITASKMGMT_RSPCODE_TM_INVALID_LUN:
457		desc = "invalid LUN";
458		break;
459	case MPI3_SCSITASKMGMT_RSPCODE_TM_OVERLAPPED_TAG:
460		desc = "overlapped tag attempted";
461		break;
462	case MPI3_SCSITASKMGMT_RSPCODE_IO_QUEUED_ON_IOC:
463		desc = "task queued, however not sent to target";
464		break;
465	case MPI3_SCSITASKMGMT_RSPCODE_TM_NVME_DENIED:
466		desc = "task management request denied by NVMe device";
467		break;
468	default:
469		desc = "unknown";
470		break;
471	}
472
473	return desc;
474}
475
476void mpi3mr_poll_pend_io_completions(struct mpi3mr_softc *sc)
477{
478	int i;
479	int num_of_reply_queues = sc->num_queues;
480	struct mpi3mr_irq_context *irq_ctx;
481
482	for (i = 0; i < num_of_reply_queues; i++) {
483		irq_ctx = &sc->irq_ctx[i];
484		mpi3mr_complete_io_cmd(sc, irq_ctx);
485	}
486}
487
488void
489trigger_reset_from_watchdog(struct mpi3mr_softc *sc, U8 reset_type, U16 reset_reason)
490{
491	if (sc->reset_in_progress) {
492		mpi3mr_dprint(sc, MPI3MR_INFO, "Another reset is in progress, no need to trigger the reset\n");
493		return;
494	}
495	sc->reset.type = reset_type;
496	sc->reset.reason = reset_reason;
497
498	return;
499}
500
501/**
502 * mpi3mr_issue_tm - Issue Task Management request
503 * @sc: Adapter instance reference
504 * @tm_type: Task Management type
505 * @handle: Device handle
506 * @lun: lun ID
507 * @htag: Host tag of the TM request
508 * @timeout: TM timeout value
509 * @drv_cmd: Internal command tracker
510 * @resp_code: Response code place holder
511 * @cmd: Timed out command reference
512 *
513 * Issues a Task Management Request to the controller for a
514 * specified target, lun and command and wait for its completion
515 * and check TM response. Recover the TM if it timed out by
516 * issuing controller reset.
517 *
518 * Return: 0 on success, non-zero on errors
519 */
520static int
521mpi3mr_issue_tm(struct mpi3mr_softc *sc, struct mpi3mr_cmd *cmd,
522		U8 tm_type, unsigned long timeout)
523{
524	int retval = 0;
525	MPI3_SCSI_TASK_MGMT_REQUEST tm_req;
526	MPI3_SCSI_TASK_MGMT_REPLY *tm_reply = NULL;
527	struct mpi3mr_drvr_cmd *drv_cmd = NULL;
528	struct mpi3mr_target *tgtdev = NULL;
529	struct mpi3mr_op_req_queue *op_req_q = NULL;
530	union ccb *ccb;
531	U8 resp_code;
532
533
534	if (sc->unrecoverable) {
535		mpi3mr_dprint(sc, MPI3MR_INFO,
536			"Controller is in unrecoverable state!! TM not required\n");
537		return retval;
538	}
539	if (sc->reset_in_progress) {
540		mpi3mr_dprint(sc, MPI3MR_INFO,
541			"controller reset in progress!! TM not required\n");
542		return retval;
543	}
544
545	if (!cmd->ccb) {
546		mpi3mr_dprint(sc, MPI3MR_ERROR, "SCSIIO command timed-out with NULL ccb\n");
547		return retval;
548	}
549	ccb = cmd->ccb;
550
551	tgtdev = cmd->targ;
552	if (tgtdev == NULL)  {
553		mpi3mr_dprint(sc, MPI3MR_ERROR, "Device does not exist target ID:0x%x,"
554			      "TM is not required\n", ccb->ccb_h.target_id);
555		return retval;
556	}
557	if (tgtdev->dev_removed == 1)  {
558		mpi3mr_dprint(sc, MPI3MR_ERROR, "Device(0x%x) is removed, TM is not required\n",
559			      ccb->ccb_h.target_id);
560		return retval;
561	}
562
563	drv_cmd = &sc->host_tm_cmds;
564	mtx_lock(&drv_cmd->lock);
565
566	memset(&tm_req, 0, sizeof(tm_req));
567	tm_req.DevHandle = htole16(tgtdev->dev_handle);
568	tm_req.TaskType = tm_type;
569	tm_req.HostTag = htole16(MPI3MR_HOSTTAG_TMS);
570	int_to_lun(ccb->ccb_h.target_lun, tm_req.LUN);
571	tm_req.Function = MPI3_FUNCTION_SCSI_TASK_MGMT;
572	drv_cmd->state = MPI3MR_CMD_PENDING;
573	drv_cmd->is_waiting = 1;
574	drv_cmd->callback = NULL;
575
576	if (ccb) {
577		if (tm_type == MPI3_SCSITASKMGMT_TASKTYPE_ABORT_TASK) {
578			op_req_q = &sc->op_req_q[cmd->req_qidx];
579			tm_req.TaskHostTag = htole16(cmd->hosttag);
580			tm_req.TaskRequestQueueID = htole16(op_req_q->qid);
581		}
582	}
583
584	if (tgtdev)
585		mpi3mr_atomic_inc(&tgtdev->block_io);
586
587	if (tgtdev && (tgtdev->dev_type == MPI3_DEVICE_DEVFORM_PCIE)) {
588		if ((tm_type == MPI3_SCSITASKMGMT_TASKTYPE_ABORT_TASK)
589		     && tgtdev->dev_spec.pcie_inf.abort_to)
590 			timeout = tgtdev->dev_spec.pcie_inf.abort_to;
591		else if ((tm_type == MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET)
592			 && tgtdev->dev_spec.pcie_inf.reset_to)
593			 timeout = tgtdev->dev_spec.pcie_inf.reset_to;
594	}
595
596	sc->tm_chan = (void *)&drv_cmd;
597
598	mpi3mr_dprint(sc, MPI3MR_DEBUG_TM,
599		      "posting task management request: type(%d), handle(0x%04x)\n",
600		       tm_type, tgtdev->dev_handle);
601
602	init_completion(&drv_cmd->completion);
603	retval = mpi3mr_submit_admin_cmd(sc, &tm_req, sizeof(tm_req));
604	if (retval) {
605		mpi3mr_dprint(sc, MPI3MR_ERROR,
606			      "posting task management request is failed\n");
607		retval = -1;
608		goto out_unlock;
609	}
610	wait_for_completion_timeout_tm(&drv_cmd->completion, timeout, sc);
611
612	if (!(drv_cmd->state & MPI3MR_CMD_COMPLETE)) {
613		drv_cmd->is_waiting = 0;
614		retval = -1;
615		if (!(drv_cmd->state & MPI3MR_CMD_RESET)) {
616			mpi3mr_dprint(sc, MPI3MR_ERROR,
617				      "task management request timed out after %ld seconds\n", timeout);
618			if (sc->mpi3mr_debug & MPI3MR_DEBUG_TM) {
619				mpi3mr_dprint(sc, MPI3MR_INFO, "tm_request dump\n");
620				mpi3mr_hexdump(&tm_req, sizeof(tm_req), 8);
621			}
622			trigger_reset_from_watchdog(sc, MPI3MR_TRIGGER_SOFT_RESET, MPI3MR_RESET_FROM_TM_TIMEOUT);
623			retval = ETIMEDOUT;
624		}
625		goto out_unlock;
626	}
627
628	if (!(drv_cmd->state & MPI3MR_CMD_REPLYVALID)) {
629		mpi3mr_dprint(sc, MPI3MR_ERROR,
630			      "invalid task management reply message\n");
631		retval = -1;
632		goto out_unlock;
633	}
634	tm_reply = (MPI3_SCSI_TASK_MGMT_REPLY *)drv_cmd->reply;
635
636	switch (drv_cmd->ioc_status) {
637	case MPI3_IOCSTATUS_SUCCESS:
638		resp_code = tm_reply->ResponseData & MPI3MR_RI_MASK_RESPCODE;
639		break;
640	case MPI3_IOCSTATUS_SCSI_IOC_TERMINATED:
641		resp_code = MPI3_SCSITASKMGMT_RSPCODE_TM_COMPLETE;
642		break;
643	default:
644		mpi3mr_dprint(sc, MPI3MR_ERROR,
645			      "task management request to handle(0x%04x) is failed with ioc_status(0x%04x) log_info(0x%08x)\n",
646			       tgtdev->dev_handle, drv_cmd->ioc_status, drv_cmd->ioc_loginfo);
647		retval = -1;
648		goto out_unlock;
649	}
650
651	switch (resp_code) {
652	case MPI3_SCSITASKMGMT_RSPCODE_TM_SUCCEEDED:
653	case MPI3_SCSITASKMGMT_RSPCODE_TM_COMPLETE:
654		break;
655	case MPI3_SCSITASKMGMT_RSPCODE_IO_QUEUED_ON_IOC:
656		if (tm_type != MPI3_SCSITASKMGMT_TASKTYPE_QUERY_TASK)
657			retval = -1;
658		break;
659	default:
660		retval = -1;
661		break;
662	}
663
664	mpi3mr_dprint(sc, MPI3MR_DEBUG_TM,
665		      "task management request type(%d) completed for handle(0x%04x) with ioc_status(0x%04x), log_info(0x%08x)"
666		      "termination_count(%u), response:%s(0x%x)\n", tm_type, tgtdev->dev_handle, drv_cmd->ioc_status, drv_cmd->ioc_loginfo,
667		      tm_reply->TerminationCount, mpi3mr_tm_response_name(resp_code), resp_code);
668
669	if (retval)
670		goto out_unlock;
671
672	mpi3mr_disable_interrupts(sc);
673	mpi3mr_poll_pend_io_completions(sc);
674	mpi3mr_enable_interrupts(sc);
675	mpi3mr_poll_pend_io_completions(sc);
676
677	switch (tm_type) {
678	case MPI3_SCSITASKMGMT_TASKTYPE_ABORT_TASK:
679		if (cmd->state == MPI3MR_CMD_STATE_IN_TM) {
680			mpi3mr_dprint(sc, MPI3MR_ERROR,
681				      "%s: task abort returned success from firmware but corresponding CCB (%p) was not terminated"
682				      "marking task abort failed!\n", sc->name, cmd->ccb);
683			retval = -1;
684		}
685		break;
686	case MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
687		if (mpi3mr_atomic_read(&tgtdev->outstanding)) {
688			mpi3mr_dprint(sc, MPI3MR_ERROR,
689				      "%s: target reset returned success from firmware but IOs are still pending on the target (%p)"
690				      "marking target reset failed!\n",
691				      sc->name, tgtdev);
692			retval = -1;
693		}
694		break;
695	default:
696		break;
697	}
698
699out_unlock:
700	drv_cmd->state = MPI3MR_CMD_NOTUSED;
701	mtx_unlock(&drv_cmd->lock);
702	if (tgtdev && mpi3mr_atomic_read(&tgtdev->block_io) > 0)
703		mpi3mr_atomic_dec(&tgtdev->block_io);
704
705	return retval;
706}
707
708/**
709 * mpi3mr_task_abort- Abort error handling callback
710 * @cmd: Timed out command reference
711 *
712 * Issue Abort Task Management if the command is in LLD scope
713 * and verify if it is aborted successfully and return status
714 * accordingly.
715 *
716 * Return: SUCCESS of successful abort the SCSI command else FAILED
717 */
718static int mpi3mr_task_abort(struct mpi3mr_cmd *cmd)
719{
720	int retval = 0;
721	struct mpi3mr_softc *sc;
722	union ccb *ccb;
723
724	sc = cmd->sc;
725
726	if (!cmd->ccb) {
727		mpi3mr_dprint(sc, MPI3MR_ERROR, "SCSIIO command timed-out with NULL ccb\n");
728		return retval;
729	}
730	ccb = cmd->ccb;
731
732	mpi3mr_dprint(sc, MPI3MR_INFO,
733		      "attempting abort task for ccb(%p)\n", ccb);
734
735	mpi3mr_print_cdb(ccb);
736
737	if (cmd->state != MPI3MR_CMD_STATE_BUSY) {
738		mpi3mr_dprint(sc, MPI3MR_INFO,
739			      "%s: ccb is not in driver scope, abort task is not required\n",
740			      sc->name);
741		return retval;
742	}
743	cmd->state = MPI3MR_CMD_STATE_IN_TM;
744
745	retval = mpi3mr_issue_tm(sc, cmd, MPI3_SCSITASKMGMT_TASKTYPE_ABORT_TASK, MPI3MR_ABORTTM_TIMEOUT);
746
747	mpi3mr_dprint(sc, MPI3MR_INFO,
748		      "abort task is %s for ccb(%p)\n", ((retval == 0) ? "SUCCESS" : "FAILED"), ccb);
749
750	return retval;
751}
752
753/**
754 * mpi3mr_target_reset - Target reset error handling callback
755 * @cmd: Timed out command reference
756 *
757 * Issue Target reset Task Management and verify the SCSI commands are
758 * terminated successfully and return status accordingly.
759 *
760 * Return: SUCCESS of successful termination of the SCSI commands else
761 *         FAILED
762 */
763static int mpi3mr_target_reset(struct mpi3mr_cmd *cmd)
764{
765	int retval = 0;
766	struct mpi3mr_softc *sc;
767	struct mpi3mr_target *target;
768
769	sc = cmd->sc;
770
771	target = cmd->targ;
772	if (target == NULL)  {
773		mpi3mr_dprint(sc, MPI3MR_XINFO, "Device does not exist for target:0x%p,"
774			      "target reset is not required\n", target);
775		return retval;
776	}
777
778	mpi3mr_dprint(sc, MPI3MR_INFO,
779		      "attempting target reset on target(%d)\n", target->per_id);
780
781
782	if (mpi3mr_atomic_read(&target->outstanding)) {
783		mpi3mr_dprint(sc, MPI3MR_INFO,
784			      "no outstanding IOs on the target(%d),"
785			      " target reset not required.\n", target->per_id);
786		return retval;
787	}
788
789	retval = mpi3mr_issue_tm(sc, cmd, MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET, MPI3MR_RESETTM_TIMEOUT);
790
791	mpi3mr_dprint(sc, MPI3MR_INFO,
792		      "target reset is %s for target(%d)\n", ((retval == 0) ? "SUCCESS" : "FAILED"),
793		      target->per_id);
794
795	return retval;
796}
797
798/**
799 * mpi3mr_get_fw_pending_ios - Calculate pending I/O count
800 * @sc: Adapter instance reference
801 *
802 * Calculate the pending I/Os for the controller and return.
803 *
804 * Return: Number of pending I/Os
805 */
806static inline int mpi3mr_get_fw_pending_ios(struct mpi3mr_softc *sc)
807{
808	U16 i, pend_ios = 0;
809
810	for (i = 0; i < sc->num_queues; i++)
811		pend_ios += mpi3mr_atomic_read(&sc->op_reply_q[i].pend_ios);
812	return pend_ios;
813}
814
815/**
816 * mpi3mr_wait_for_host_io - block for I/Os to complete
817 * @sc: Adapter instance reference
818 * @timeout: time out in seconds
819 *
820 * Waits for pending I/Os for the given adapter to complete or
821 * to hit the timeout.
822 *
823 * Return: Nothing
824 */
825static int mpi3mr_wait_for_host_io(struct mpi3mr_softc *sc, U32 timeout)
826{
827	enum mpi3mr_iocstate iocstate;
828
829	iocstate = mpi3mr_get_iocstate(sc);
830	if (iocstate != MRIOC_STATE_READY) {
831		mpi3mr_dprint(sc, MPI3MR_XINFO, "%s :Controller is in NON-READY state! Proceed with Reset\n", __func__);
832		return -1;
833	}
834
835	if (!mpi3mr_get_fw_pending_ios(sc))
836		return 0;
837
838	mpi3mr_dprint(sc, MPI3MR_INFO,
839		      "%s :Waiting for %d seconds prior to reset for %d pending I/Os to complete\n",
840		      __func__, timeout, mpi3mr_get_fw_pending_ios(sc));
841
842	int i;
843	for (i = 0; i < timeout; i++) {
844		if (!mpi3mr_get_fw_pending_ios(sc)) {
845			mpi3mr_dprint(sc, MPI3MR_INFO, "%s :All pending I/Os got completed while waiting! Reset not required\n", __func__);
846			return 0;
847
848		}
849		iocstate = mpi3mr_get_iocstate(sc);
850		if (iocstate != MRIOC_STATE_READY) {
851			mpi3mr_dprint(sc, MPI3MR_XINFO, "%s :Controller state becomes NON-READY while waiting! dont wait further"
852				      "Proceed with Reset\n", __func__);
853			return -1;
854		}
855		DELAY(1000 * 1000);
856	}
857
858	mpi3mr_dprint(sc, MPI3MR_INFO, "%s :Pending I/Os after wait exaust is %d! Proceed with Reset\n", __func__,
859		      mpi3mr_get_fw_pending_ios(sc));
860
861	return -1;
862}
863
864static void
865mpi3mr_scsiio_timeout(void *data)
866{
867	int retval = 0;
868	struct mpi3mr_softc *sc;
869	struct mpi3mr_cmd *cmd;
870	struct mpi3mr_target *targ_dev = NULL;
871
872	if (!data)
873		return;
874
875	cmd = (struct mpi3mr_cmd *)data;
876	sc = cmd->sc;
877
878	if (cmd->ccb == NULL) {
879		mpi3mr_dprint(sc, MPI3MR_ERROR, "SCSIIO command timed-out with NULL ccb\n");
880		return;
881	}
882
883	/*
884	 * TMs are not supported for IO timeouts on VD/LD, so directly issue controller reset
885	 * with max timeout for outstanding IOs to complete is 180sec.
886	 */
887	targ_dev = cmd->targ;
888	if (targ_dev && (targ_dev->dev_type == MPI3_DEVICE_DEVFORM_VD)) {
889		if (mpi3mr_wait_for_host_io(sc, MPI3MR_RAID_ERRREC_RESET_TIMEOUT))
890			trigger_reset_from_watchdog(sc, MPI3MR_TRIGGER_SOFT_RESET, MPI3MR_RESET_FROM_SCSIIO_TIMEOUT);
891		return;
892 	}
893
894	/* Issue task abort to recover the timed out IO */
895	retval = mpi3mr_task_abort(cmd);
896	if (!retval || (retval == ETIMEDOUT))
897		return;
898
899	/*
900	 * task abort has failed to recover the timed out IO,
901	 * try with the target reset
902	 */
903	retval = mpi3mr_target_reset(cmd);
904	if (!retval || (retval == ETIMEDOUT))
905		return;
906
907	/*
908	 * task abort and target reset has failed. So issue Controller reset(soft reset)
909	 * through OCR thread context
910	 */
911	trigger_reset_from_watchdog(sc, MPI3MR_TRIGGER_SOFT_RESET, MPI3MR_RESET_FROM_SCSIIO_TIMEOUT);
912
913	return;
914}
915
916void int_to_lun(unsigned int lun, U8 *req_lun)
917{
918	int i;
919
920	memset(req_lun, 0, sizeof(*req_lun));
921
922	for (i = 0; i < sizeof(lun); i += 2) {
923		req_lun[i] = (lun >> 8) & 0xFF;
924		req_lun[i+1] = lun & 0xFF;
925		lun = lun >> 16;
926	}
927
928}
929
930static U16 get_req_queue_index(struct mpi3mr_softc *sc)
931{
932	U16 i = 0, reply_q_index = 0, reply_q_pend_ios = 0;
933
934	reply_q_pend_ios = mpi3mr_atomic_read(&sc->op_reply_q[0].pend_ios);
935	for (i = 0; i < sc->num_queues; i++) {
936		if (reply_q_pend_ios > mpi3mr_atomic_read(&sc->op_reply_q[i].pend_ios)) {
937			reply_q_pend_ios = mpi3mr_atomic_read(&sc->op_reply_q[i].pend_ios);
938			reply_q_index = i;
939		}
940	}
941
942	return reply_q_index;
943}
944
945static void
946mpi3mr_action_scsiio(struct mpi3mr_cam_softc *cam_sc, union ccb *ccb)
947{
948	Mpi3SCSIIORequest_t *req = NULL;
949	struct ccb_scsiio *csio;
950	struct mpi3mr_softc *sc;
951	struct mpi3mr_target *targ;
952	struct mpi3mr_cmd *cm;
953	uint8_t scsi_opcode, queue_idx;
954	uint32_t mpi_control;
955
956	sc = cam_sc->sc;
957	mtx_assert(&sc->mpi3mr_mtx, MA_OWNED);
958
959	if (sc->unrecoverable) {
960		mpi3mr_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
961		xpt_done(ccb);
962		return;
963	}
964
965	csio = &ccb->csio;
966	KASSERT(csio->ccb_h.target_id < cam_sc->maxtargets,
967	    ("Target %d out of bounds in XPT_SCSI_IO\n",
968	     csio->ccb_h.target_id));
969
970	scsi_opcode = scsiio_cdb_ptr(csio)[0];
971
972	if ((sc->mpi3mr_flags & MPI3MR_FLAGS_SHUTDOWN) &&
973	    !((scsi_opcode == SYNCHRONIZE_CACHE) ||
974	      (scsi_opcode == START_STOP_UNIT))) {
975		mpi3mr_set_ccbstatus(ccb, CAM_REQ_CMP);
976		xpt_done(ccb);
977		return;
978	}
979
980	targ = mpi3mr_find_target_by_per_id(cam_sc, csio->ccb_h.target_id);
981	if (targ == NULL)  {
982		mpi3mr_dprint(sc, MPI3MR_XINFO, "Device with target ID: 0x%x does not exist\n",
983			      csio->ccb_h.target_id);
984		mpi3mr_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
985		xpt_done(ccb);
986		return;
987	}
988
989	if (targ && targ->is_hidden)  {
990		mpi3mr_dprint(sc, MPI3MR_XINFO, "Device with target ID: 0x%x is hidden\n",
991			      csio->ccb_h.target_id);
992		mpi3mr_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
993		xpt_done(ccb);
994		return;
995	}
996
997	if (targ->dev_removed == 1)  {
998		mpi3mr_dprint(sc, MPI3MR_XINFO, "Device with target ID: 0x%x is removed\n", csio->ccb_h.target_id);
999		mpi3mr_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1000		xpt_done(ccb);
1001		return;
1002	}
1003
1004	if (targ->dev_handle == 0x0) {
1005		mpi3mr_dprint(sc, MPI3MR_ERROR, "%s NULL handle for target 0x%x\n",
1006		    __func__, csio->ccb_h.target_id);
1007		mpi3mr_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1008		xpt_done(ccb);
1009		return;
1010	}
1011
1012	if (mpi3mr_atomic_read(&targ->block_io) ||
1013		(sc->reset_in_progress == 1) || (sc->prepare_for_reset == 1)) {
1014		mpi3mr_dprint(sc, MPI3MR_TRACE, "%s target is busy target_id: 0x%x\n",
1015		    __func__, csio->ccb_h.target_id);
1016		mpi3mr_set_ccbstatus(ccb, CAM_REQUEUE_REQ);
1017		xpt_done(ccb);
1018		return;
1019	}
1020
1021	/*
1022	 * Sometimes, it is possible to get a command that is not "In
1023	 * Progress" and was actually aborted by the upper layer.  Check for
1024	 * this here and complete the command without error.
1025	 */
1026	if (mpi3mr_get_ccbstatus(ccb) != CAM_REQ_INPROG) {
1027		mpi3mr_dprint(sc, MPI3MR_TRACE, "%s Command is not in progress for "
1028		    "target %u\n", __func__, csio->ccb_h.target_id);
1029		xpt_done(ccb);
1030		return;
1031	}
1032	/*
1033	 * If devinfo is 0 this will be a volume.  In that case don't tell CAM
1034	 * that the volume has timed out.  We want volumes to be enumerated
1035	 * until they are deleted/removed, not just failed.
1036	 */
1037	if (targ->flags & MPI3MRSAS_TARGET_INREMOVAL) {
1038		if (targ->devinfo == 0)
1039			mpi3mr_set_ccbstatus(ccb, CAM_REQ_CMP);
1040		else
1041			mpi3mr_set_ccbstatus(ccb, CAM_SEL_TIMEOUT);
1042		xpt_done(ccb);
1043		return;
1044	}
1045
1046	if ((scsi_opcode == UNMAP) &&
1047		(pci_get_device(sc->mpi3mr_dev) == MPI3_MFGPAGE_DEVID_SAS4116) &&
1048		(targ->dev_type == MPI3_DEVICE_DEVFORM_PCIE) &&
1049		(mpi3mr_allow_unmap_to_fw(sc, ccb) == false))
1050		return;
1051
1052	cm = mpi3mr_get_command(sc);
1053	if (cm == NULL || (sc->mpi3mr_flags & MPI3MR_FLAGS_DIAGRESET)) {
1054		if (cm != NULL) {
1055			mpi3mr_release_command(cm);
1056		}
1057		if ((cam_sc->flags & MPI3MRSAS_QUEUE_FROZEN) == 0) {
1058			xpt_freeze_simq(cam_sc->sim, 1);
1059			cam_sc->flags |= MPI3MRSAS_QUEUE_FROZEN;
1060		}
1061		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1062		mpi3mr_set_ccbstatus(ccb, CAM_REQUEUE_REQ);
1063		xpt_done(ccb);
1064		return;
1065	}
1066
1067	switch (csio->ccb_h.flags & CAM_DIR_MASK) {
1068	case CAM_DIR_IN:
1069		mpi_control = MPI3_SCSIIO_FLAGS_DATADIRECTION_READ;
1070		cm->data_dir = MPI3MR_READ;
1071		break;
1072	case CAM_DIR_OUT:
1073		mpi_control = MPI3_SCSIIO_FLAGS_DATADIRECTION_WRITE;
1074		cm->data_dir = MPI3MR_WRITE;
1075		break;
1076	case CAM_DIR_NONE:
1077	default:
1078		mpi_control = MPI3_SCSIIO_FLAGS_DATADIRECTION_NO_DATA_TRANSFER;
1079		break;
1080	}
1081
1082	if (csio->cdb_len > 16)
1083		mpi_control |= MPI3_SCSIIO_FLAGS_CDB_GREATER_THAN_16;
1084
1085	req = (Mpi3SCSIIORequest_t *)&cm->io_request;
1086	bzero(req, sizeof(*req));
1087	req->Function = MPI3_FUNCTION_SCSI_IO;
1088	req->HostTag = cm->hosttag;
1089	req->DataLength = htole32(csio->dxfer_len);
1090	req->DevHandle = htole16(targ->dev_handle);
1091
1092	/*
1093	 * It looks like the hardware doesn't require an explicit tag
1094	 * number for each transaction.  SAM Task Management not supported
1095	 * at the moment.
1096	 */
1097	switch (csio->tag_action) {
1098	case MSG_HEAD_OF_Q_TAG:
1099		mpi_control |= MPI3_SCSIIO_FLAGS_TASKATTRIBUTE_HEADOFQ;
1100		break;
1101	case MSG_ORDERED_Q_TAG:
1102		mpi_control |= MPI3_SCSIIO_FLAGS_TASKATTRIBUTE_ORDEREDQ;
1103		break;
1104	case MSG_ACA_TASK:
1105		mpi_control |= MPI3_SCSIIO_FLAGS_TASKATTRIBUTE_ACAQ;
1106		break;
1107	case CAM_TAG_ACTION_NONE:
1108	case MSG_SIMPLE_Q_TAG:
1109	default:
1110		mpi_control |= MPI3_SCSIIO_FLAGS_TASKATTRIBUTE_SIMPLEQ;
1111		break;
1112	}
1113
1114	if (targ->ws_len)
1115		mpi3mr_divert_ws(req, csio, targ->ws_len);
1116
1117	req->Flags = htole32(mpi_control);
1118
1119	if (csio->ccb_h.flags & CAM_CDB_POINTER)
1120		bcopy(csio->cdb_io.cdb_ptr, &req->CDB.CDB32[0], csio->cdb_len);
1121	else {
1122		KASSERT(csio->cdb_len <= IOCDBLEN,
1123		    ("cdb_len %d is greater than IOCDBLEN but CAM_CDB_POINTER "
1124		    "is not set", csio->cdb_len));
1125		bcopy(csio->cdb_io.cdb_bytes, &req->CDB.CDB32[0],csio->cdb_len);
1126	}
1127
1128	cm->length = csio->dxfer_len;
1129	cm->targ = targ;
1130	int_to_lun(csio->ccb_h.target_lun, req->LUN);
1131	cm->ccb = ccb;
1132	csio->ccb_h.qos.sim_data = sbinuptime();
1133	queue_idx = get_req_queue_index(sc);
1134	cm->req_qidx = queue_idx;
1135
1136	mpi3mr_dprint(sc, MPI3MR_TRACE, "[QID:%d]: func: %s line:%d CDB: 0x%x targetid: %x SMID: 0x%x\n",
1137		(queue_idx + 1), __func__, __LINE__, scsi_opcode, csio->ccb_h.target_id, cm->hosttag);
1138
1139	switch ((ccb->ccb_h.flags & CAM_DATA_MASK)) {
1140	case CAM_DATA_PADDR:
1141	case CAM_DATA_SG_PADDR:
1142		device_printf(sc->mpi3mr_dev, "%s: physical addresses not supported\n",
1143		    __func__);
1144		mpi3mr_set_ccbstatus(ccb, CAM_REQ_INVALID);
1145		mpi3mr_release_command(cm);
1146		xpt_done(ccb);
1147		return;
1148	case CAM_DATA_SG:
1149		device_printf(sc->mpi3mr_dev, "%s: scatter gather is not supported\n",
1150		    __func__);
1151		mpi3mr_set_ccbstatus(ccb, CAM_REQ_INVALID);
1152		mpi3mr_release_command(cm);
1153		xpt_done(ccb);
1154		return;
1155	case CAM_DATA_VADDR:
1156	case CAM_DATA_BIO:
1157		if (csio->dxfer_len > (MPI3MR_SG_DEPTH * MPI3MR_4K_PGSZ)) {
1158			mpi3mr_set_ccbstatus(ccb, CAM_REQ_TOO_BIG);
1159			mpi3mr_release_command(cm);
1160			xpt_done(ccb);
1161			return;
1162		}
1163		ccb->ccb_h.status |= CAM_SIM_QUEUED;
1164		cm->length = csio->dxfer_len;
1165		if (cm->length)
1166			cm->data = csio->data_ptr;
1167		break;
1168	default:
1169		mpi3mr_set_ccbstatus(ccb, CAM_REQ_INVALID);
1170		mpi3mr_release_command(cm);
1171		xpt_done(ccb);
1172		return;
1173	}
1174
1175	/* Prepare SGEs and queue to hardware */
1176	mpi3mr_map_request(sc, cm);
1177}
1178
1179static void
1180mpi3mr_enqueue_request(struct mpi3mr_softc *sc, struct mpi3mr_cmd *cm)
1181{
1182	static int ratelimit;
1183	struct mpi3mr_op_req_queue *opreqq = &sc->op_req_q[cm->req_qidx];
1184	struct mpi3mr_throttle_group_info *tg = NULL;
1185	uint32_t data_len_blks = 0;
1186	uint32_t tracked_io_sz = 0;
1187	uint32_t ioc_pend_data_len = 0, tg_pend_data_len = 0;
1188	struct mpi3mr_target *targ = cm->targ;
1189	union ccb *ccb = cm->ccb;
1190	Mpi3SCSIIORequest_t *req = (Mpi3SCSIIORequest_t *)&cm->io_request;
1191
1192	if (sc->iot_enable) {
1193		data_len_blks = ccb->csio.dxfer_len >> 9;
1194
1195		if ((data_len_blks >= sc->io_throttle_data_length) &&
1196		    targ->io_throttle_enabled) {
1197
1198			tracked_io_sz = data_len_blks;
1199			tg = targ->throttle_group;
1200			if (tg) {
1201				mpi3mr_atomic_add(&sc->pend_large_data_sz, data_len_blks);
1202				mpi3mr_atomic_add(&tg->pend_large_data_sz, data_len_blks);
1203
1204				ioc_pend_data_len = mpi3mr_atomic_read(&sc->pend_large_data_sz);
1205				tg_pend_data_len = mpi3mr_atomic_read(&tg->pend_large_data_sz);
1206
1207				if (ratelimit % 1000) {
1208					mpi3mr_dprint(sc, MPI3MR_IOT,
1209						"large vd_io persist_id(%d), handle(0x%04x), data_len(%d),"
1210						"ioc_pending(%d), tg_pending(%d), ioc_high(%d), tg_high(%d)\n",
1211						targ->per_id, targ->dev_handle,
1212						data_len_blks, ioc_pend_data_len,
1213						tg_pend_data_len, sc->io_throttle_high,
1214						tg->high);
1215					ratelimit++;
1216				}
1217
1218				if (!tg->io_divert  && ((ioc_pend_data_len >=
1219				    sc->io_throttle_high) ||
1220				    (tg_pend_data_len >= tg->high))) {
1221					tg->io_divert = 1;
1222					mpi3mr_dprint(sc, MPI3MR_IOT,
1223						"VD: Setting divert flag for tg_id(%d), persist_id(%d)\n",
1224						tg->id, targ->per_id);
1225					if (sc->mpi3mr_debug & MPI3MR_IOT)
1226						mpi3mr_print_cdb(ccb);
1227					mpi3mr_set_io_divert_for_all_vd_in_tg(sc,
1228					    tg, 1);
1229				}
1230			} else {
1231				mpi3mr_atomic_add(&sc->pend_large_data_sz, data_len_blks);
1232				ioc_pend_data_len = mpi3mr_atomic_read(&sc->pend_large_data_sz);
1233				if (ratelimit % 1000) {
1234					mpi3mr_dprint(sc, MPI3MR_IOT,
1235					    "large pd_io persist_id(%d), handle(0x%04x), data_len(%d), ioc_pending(%d), ioc_high(%d)\n",
1236					    targ->per_id, targ->dev_handle,
1237					    data_len_blks, ioc_pend_data_len,
1238					    sc->io_throttle_high);
1239					ratelimit++;
1240				}
1241
1242				if (ioc_pend_data_len >= sc->io_throttle_high) {
1243					targ->io_divert = 1;
1244					mpi3mr_dprint(sc, MPI3MR_IOT,
1245						"PD: Setting divert flag for persist_id(%d)\n",
1246						targ->per_id);
1247					if (sc->mpi3mr_debug & MPI3MR_IOT)
1248						mpi3mr_print_cdb(ccb);
1249				}
1250			}
1251		}
1252
1253		if (targ->io_divert) {
1254			req->MsgFlags |= MPI3_SCSIIO_MSGFLAGS_DIVERT_TO_FIRMWARE;
1255			req->Flags = htole32(le32toh(req->Flags) | MPI3_SCSIIO_FLAGS_DIVERT_REASON_IO_THROTTLING);
1256		}
1257	}
1258
1259	if (mpi3mr_submit_io(sc, opreqq, (U8 *)&cm->io_request)) {
1260		if (tracked_io_sz) {
1261			mpi3mr_atomic_sub(&sc->pend_large_data_sz, tracked_io_sz);
1262			if (tg)
1263				mpi3mr_atomic_sub(&tg->pend_large_data_sz, tracked_io_sz);
1264		}
1265		mpi3mr_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
1266		mpi3mr_release_command(cm);
1267		xpt_done(ccb);
1268	} else {
1269		callout_reset_sbt(&cm->callout, mstosbt(ccb->ccb_h.timeout), 0,
1270		    mpi3mr_scsiio_timeout, cm, 0);
1271		cm->callout_owner = true;
1272		mpi3mr_atomic_inc(&sc->fw_outstanding);
1273		mpi3mr_atomic_inc(&targ->outstanding);
1274		if (mpi3mr_atomic_read(&sc->fw_outstanding) > sc->io_cmds_highwater)
1275			sc->io_cmds_highwater++;
1276	}
1277
1278	return;
1279}
1280
1281static void
1282mpi3mr_cam_poll(struct cam_sim *sim)
1283{
1284	struct mpi3mr_cam_softc *cam_sc;
1285	struct mpi3mr_irq_context *irq_ctx;
1286	struct mpi3mr_softc *sc;
1287	int i;
1288
1289	cam_sc = cam_sim_softc(sim);
1290	sc = cam_sc->sc;
1291
1292	mpi3mr_dprint(cam_sc->sc, MPI3MR_TRACE, "func: %s line: %d is called\n",
1293		__func__, __LINE__);
1294
1295	for (i = 0; i < sc->num_queues; i++) {
1296		irq_ctx = sc->irq_ctx + i;
1297		if (irq_ctx->op_reply_q->qid) {
1298			mpi3mr_complete_io_cmd(sc, irq_ctx);
1299		}
1300	}
1301}
1302
1303static void
1304mpi3mr_cam_action(struct cam_sim *sim, union ccb *ccb)
1305{
1306	struct mpi3mr_cam_softc *cam_sc;
1307	struct mpi3mr_target *targ;
1308
1309	cam_sc = cam_sim_softc(sim);
1310
1311	mpi3mr_dprint(cam_sc->sc, MPI3MR_TRACE, "ccb func_code 0x%x target id: 0x%x\n",
1312	    ccb->ccb_h.func_code, ccb->ccb_h.target_id);
1313
1314	mtx_assert(&cam_sc->sc->mpi3mr_mtx, MA_OWNED);
1315
1316	switch (ccb->ccb_h.func_code) {
1317	case XPT_PATH_INQ:
1318	{
1319		struct ccb_pathinq *cpi = &ccb->cpi;
1320
1321		cpi->version_num = 1;
1322		cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
1323		cpi->target_sprt = 0;
1324		cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED | PIM_NOSCAN;
1325		cpi->hba_eng_cnt = 0;
1326		cpi->max_target = cam_sc->maxtargets - 1;
1327		cpi->max_lun = 0;
1328
1329		/*
1330		 * initiator_id is set here to an ID outside the set of valid
1331		 * target IDs (including volumes).
1332		 */
1333		cpi->initiator_id = cam_sc->maxtargets;
1334		strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
1335		strlcpy(cpi->hba_vid, "Broadcom", HBA_IDLEN);
1336		strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
1337		cpi->unit_number = cam_sim_unit(sim);
1338		cpi->bus_id = cam_sim_bus(sim);
1339		/*
1340		 * XXXSLM-I think this needs to change based on config page or
1341		 * something instead of hardcoded to 150000.
1342		 */
1343		cpi->base_transfer_speed = 150000;
1344		cpi->transport = XPORT_SAS;
1345		cpi->transport_version = 0;
1346		cpi->protocol = PROTO_SCSI;
1347		cpi->protocol_version = SCSI_REV_SPC;
1348
1349		targ = mpi3mr_find_target_by_per_id(cam_sc, ccb->ccb_h.target_id);
1350
1351		if (targ && (targ->dev_type == MPI3_DEVICE_DEVFORM_PCIE) &&
1352		    ((targ->dev_spec.pcie_inf.dev_info &
1353		    MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) ==
1354		    MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE)) {
1355			cpi->maxio = targ->dev_spec.pcie_inf.mdts;
1356			mpi3mr_dprint(cam_sc->sc, MPI3MR_XINFO,
1357				"PCI device target_id: %u max io size: %u\n",
1358				ccb->ccb_h.target_id, cpi->maxio);
1359		} else {
1360			cpi->maxio = PAGE_SIZE * (MPI3MR_SG_DEPTH - 1);
1361		}
1362		mpi3mr_set_ccbstatus(ccb, CAM_REQ_CMP);
1363		break;
1364	}
1365	case XPT_GET_TRAN_SETTINGS:
1366	{
1367		struct ccb_trans_settings	*cts;
1368		struct ccb_trans_settings_sas	*sas;
1369		struct ccb_trans_settings_scsi	*scsi;
1370
1371		cts = &ccb->cts;
1372		sas = &cts->xport_specific.sas;
1373		scsi = &cts->proto_specific.scsi;
1374
1375		KASSERT(cts->ccb_h.target_id < cam_sc->maxtargets,
1376		    ("Target %d out of bounds in XPT_GET_TRAN_SETTINGS\n",
1377		    cts->ccb_h.target_id));
1378		targ = mpi3mr_find_target_by_per_id(cam_sc, cts->ccb_h.target_id);
1379
1380		if (targ == NULL) {
1381			mpi3mr_dprint(cam_sc->sc, MPI3MR_TRACE, "Device with target ID: 0x%x does not exist\n",
1382			cts->ccb_h.target_id);
1383			mpi3mr_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1384			break;
1385		}
1386
1387		if ((targ->dev_handle == 0x0) || (targ->dev_removed == 1))  {
1388			mpi3mr_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1389			break;
1390		}
1391
1392		cts->protocol_version = SCSI_REV_SPC2;
1393		cts->transport = XPORT_SAS;
1394		cts->transport_version = 0;
1395
1396		sas->valid = CTS_SAS_VALID_SPEED;
1397
1398		switch (targ->link_rate) {
1399		case 0x08:
1400			sas->bitrate = 150000;
1401			break;
1402		case 0x09:
1403			sas->bitrate = 300000;
1404			break;
1405		case 0x0a:
1406			sas->bitrate = 600000;
1407			break;
1408		case 0x0b:
1409			sas->bitrate = 1200000;
1410			break;
1411		default:
1412			sas->valid = 0;
1413		}
1414
1415		cts->protocol = PROTO_SCSI;
1416		scsi->valid = CTS_SCSI_VALID_TQ;
1417		scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
1418
1419		mpi3mr_set_ccbstatus(ccb, CAM_REQ_CMP);
1420		break;
1421	}
1422	case XPT_CALC_GEOMETRY:
1423		cam_calc_geometry(&ccb->ccg, /*extended*/1);
1424		mpi3mr_set_ccbstatus(ccb, CAM_REQ_CMP);
1425		break;
1426	case XPT_RESET_DEV:
1427		mpi3mr_dprint(cam_sc->sc, MPI3MR_INFO, "mpi3mr_action "
1428		    "XPT_RESET_DEV\n");
1429		return;
1430	case XPT_RESET_BUS:
1431	case XPT_ABORT:
1432	case XPT_TERM_IO:
1433		mpi3mr_dprint(cam_sc->sc, MPI3MR_INFO, "mpi3mr_action faking success "
1434		    "for abort or reset\n");
1435		mpi3mr_set_ccbstatus(ccb, CAM_REQ_CMP);
1436		break;
1437	case XPT_SCSI_IO:
1438		mpi3mr_action_scsiio(cam_sc, ccb);
1439		return;
1440	default:
1441		mpi3mr_set_ccbstatus(ccb, CAM_FUNC_NOTAVAIL);
1442		break;
1443	}
1444	xpt_done(ccb);
1445}
1446
1447void
1448mpi3mr_startup_increment(struct mpi3mr_cam_softc *cam_sc)
1449{
1450	if ((cam_sc->flags & MPI3MRSAS_IN_STARTUP) != 0) {
1451		if (cam_sc->startup_refcount++ == 0) {
1452			/* just starting, freeze the simq */
1453			mpi3mr_dprint(cam_sc->sc, MPI3MR_XINFO,
1454			    "%s freezing simq\n", __func__);
1455			xpt_hold_boot();
1456		}
1457		mpi3mr_dprint(cam_sc->sc, MPI3MR_XINFO, "%s refcount %u\n", __func__,
1458		    cam_sc->startup_refcount);
1459	}
1460}
1461
1462void
1463mpi3mr_release_simq_reinit(struct mpi3mr_cam_softc *cam_sc)
1464{
1465	if (cam_sc->flags & MPI3MRSAS_QUEUE_FROZEN) {
1466		cam_sc->flags &= ~MPI3MRSAS_QUEUE_FROZEN;
1467		xpt_release_simq(cam_sc->sim, 1);
1468		mpi3mr_dprint(cam_sc->sc, MPI3MR_INFO, "Unfreezing SIM queue\n");
1469	}
1470}
1471
1472void
1473mpi3mr_rescan_target(struct mpi3mr_softc *sc, struct mpi3mr_target *targ)
1474{
1475	struct mpi3mr_cam_softc *cam_sc = sc->cam_sc;
1476	path_id_t pathid;
1477	target_id_t targetid;
1478	union ccb *ccb;
1479
1480	pathid = cam_sim_path(cam_sc->sim);
1481	if (targ == NULL)
1482		targetid = CAM_TARGET_WILDCARD;
1483	else
1484		targetid = targ->per_id;
1485
1486	/*
1487	 * Allocate a CCB and schedule a rescan.
1488	 */
1489	ccb = xpt_alloc_ccb_nowait();
1490	if (ccb == NULL) {
1491		mpi3mr_dprint(sc, MPI3MR_ERROR, "unable to alloc CCB for rescan\n");
1492		return;
1493	}
1494
1495	if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid, targetid,
1496	    CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
1497		mpi3mr_dprint(sc, MPI3MR_ERROR, "unable to create path for rescan\n");
1498		xpt_free_ccb(ccb);
1499		return;
1500	}
1501
1502	if (targetid == CAM_TARGET_WILDCARD)
1503		ccb->ccb_h.func_code = XPT_SCAN_BUS;
1504	else
1505		ccb->ccb_h.func_code = XPT_SCAN_TGT;
1506
1507	mpi3mr_dprint(sc, MPI3MR_EVENT, "%s target id 0x%x\n", __func__, targetid);
1508	xpt_rescan(ccb);
1509}
1510
1511void
1512mpi3mr_startup_decrement(struct mpi3mr_cam_softc *cam_sc)
1513{
1514	if ((cam_sc->flags & MPI3MRSAS_IN_STARTUP) != 0) {
1515		if (--cam_sc->startup_refcount == 0) {
1516			/* finished all discovery-related actions, release
1517			 * the simq and rescan for the latest topology.
1518			 */
1519			mpi3mr_dprint(cam_sc->sc, MPI3MR_XINFO,
1520			    "%s releasing simq\n", __func__);
1521			cam_sc->flags &= ~MPI3MRSAS_IN_STARTUP;
1522			xpt_release_simq(cam_sc->sim, 1);
1523			xpt_release_boot();
1524		}
1525		mpi3mr_dprint(cam_sc->sc, MPI3MR_XINFO, "%s refcount %u\n", __func__,
1526		    cam_sc->startup_refcount);
1527	}
1528}
1529
1530static void
1531mpi3mr_fw_event_free(struct mpi3mr_softc *sc, struct mpi3mr_fw_event_work *fw_event)
1532{
1533	if (!fw_event)
1534		return;
1535
1536	if (fw_event->event_data != NULL) {
1537		free(fw_event->event_data, M_MPI3MR);
1538		fw_event->event_data = NULL;
1539	}
1540
1541	free(fw_event, M_MPI3MR);
1542	fw_event = NULL;
1543}
1544
1545static void
1546mpi3mr_freeup_events(struct mpi3mr_softc *sc)
1547{
1548	struct mpi3mr_fw_event_work *fw_event = NULL;
1549	mtx_lock(&sc->mpi3mr_mtx);
1550	while ((fw_event = TAILQ_FIRST(&sc->cam_sc->ev_queue)) != NULL) {
1551		TAILQ_REMOVE(&sc->cam_sc->ev_queue, fw_event, ev_link);
1552		mpi3mr_fw_event_free(sc, fw_event);
1553	}
1554	mtx_unlock(&sc->mpi3mr_mtx);
1555}
1556
1557static void
1558mpi3mr_sastopochg_evt_debug(struct mpi3mr_softc *sc,
1559	Mpi3EventDataSasTopologyChangeList_t *event_data)
1560{
1561	int i;
1562	U16 handle;
1563	U8 reason_code, phy_number;
1564	char *status_str = NULL;
1565	U8 link_rate, prev_link_rate;
1566
1567	switch (event_data->ExpStatus) {
1568	case MPI3_EVENT_SAS_TOPO_ES_NOT_RESPONDING:
1569		status_str = "remove";
1570		break;
1571	case MPI3_EVENT_SAS_TOPO_ES_RESPONDING:
1572		status_str =  "responding";
1573		break;
1574	case MPI3_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING:
1575		status_str = "remove delay";
1576		break;
1577	case MPI3_EVENT_SAS_TOPO_ES_NO_EXPANDER:
1578		status_str = "direct attached";
1579		break;
1580	default:
1581		status_str = "unknown status";
1582		break;
1583	}
1584
1585	mpi3mr_dprint(sc, MPI3MR_INFO, "%s :sas topology change: (%s)\n",
1586	    __func__, status_str);
1587	mpi3mr_dprint(sc, MPI3MR_INFO,
1588		"%s :\texpander_handle(0x%04x), enclosure_handle(0x%04x) "
1589	    "start_phy(%02d), num_entries(%d)\n", __func__,
1590	    (event_data->ExpanderDevHandle),
1591	    (event_data->EnclosureHandle),
1592	    event_data->StartPhyNum, event_data->NumEntries);
1593	for (i = 0; i < event_data->NumEntries; i++) {
1594		handle = (event_data->PhyEntry[i].AttachedDevHandle);
1595		if (!handle)
1596			continue;
1597		phy_number = event_data->StartPhyNum + i;
1598		reason_code = event_data->PhyEntry[i].PhyStatus &
1599		    MPI3_EVENT_SAS_TOPO_PHY_RC_MASK;
1600		switch (reason_code) {
1601		case MPI3_EVENT_SAS_TOPO_PHY_RC_TARG_NOT_RESPONDING:
1602			status_str = "target remove";
1603			break;
1604		case MPI3_EVENT_SAS_TOPO_PHY_RC_DELAY_NOT_RESPONDING:
1605			status_str = "delay target remove";
1606			break;
1607		case MPI3_EVENT_SAS_TOPO_PHY_RC_PHY_CHANGED:
1608			status_str = "link rate change";
1609			break;
1610		case MPI3_EVENT_SAS_TOPO_PHY_RC_NO_CHANGE:
1611			status_str = "target responding";
1612			break;
1613		default:
1614			status_str = "unknown";
1615			break;
1616		}
1617		link_rate = event_data->PhyEntry[i].LinkRate >> 4;
1618		prev_link_rate = event_data->PhyEntry[i].LinkRate & 0xF;
1619		mpi3mr_dprint(sc, MPI3MR_INFO, "%s :\tphy(%02d), attached_handle(0x%04x): %s:"
1620		    " link rate: new(0x%02x), old(0x%02x)\n", __func__,
1621		    phy_number, handle, status_str, link_rate, prev_link_rate);
1622	}
1623}
1624
1625static void
1626mpi3mr_process_sastopochg_evt(struct mpi3mr_softc *sc, struct mpi3mr_fw_event_work *fwevt)
1627{
1628
1629	Mpi3EventDataSasTopologyChangeList_t *event_data =
1630		    (Mpi3EventDataSasTopologyChangeList_t *)fwevt->event_data;
1631	int i;
1632	U16 handle;
1633	U8 reason_code, link_rate;
1634	struct mpi3mr_target *target = NULL;
1635
1636
1637	mpi3mr_sastopochg_evt_debug(sc, event_data);
1638
1639	for (i = 0; i < event_data->NumEntries; i++) {
1640		handle = le16toh(event_data->PhyEntry[i].AttachedDevHandle);
1641		link_rate = event_data->PhyEntry[i].LinkRate >> 4;
1642
1643		if (!handle)
1644			continue;
1645		target = mpi3mr_find_target_by_dev_handle(sc->cam_sc, handle);
1646
1647		if (!target)
1648			continue;
1649
1650		target->link_rate = link_rate;
1651		reason_code = event_data->PhyEntry[i].PhyStatus &
1652			MPI3_EVENT_SAS_TOPO_PHY_RC_MASK;
1653
1654		switch (reason_code) {
1655		case MPI3_EVENT_SAS_TOPO_PHY_RC_TARG_NOT_RESPONDING:
1656			if (target->exposed_to_os)
1657				mpi3mr_remove_device_from_os(sc, target->dev_handle);
1658			mpi3mr_remove_device_from_list(sc, target, false);
1659			break;
1660		case MPI3_EVENT_SAS_TOPO_PHY_RC_PHY_CHANGED:
1661			break;
1662		default:
1663			break;
1664		}
1665	}
1666
1667	/*
1668	 * refcount was incremented for this event in
1669	 * mpi3mr_evt_handler. Decrement it here because the event has
1670	 * been processed.
1671	 */
1672	mpi3mr_startup_decrement(sc->cam_sc);
1673	return;
1674}
1675
1676static inline void
1677mpi3mr_logdata_evt_bh(struct mpi3mr_softc *sc,
1678		      struct mpi3mr_fw_event_work *fwevt)
1679{
1680	mpi3mr_app_save_logdata(sc, fwevt->event_data,
1681				fwevt->event_data_size);
1682}
1683
1684static void
1685mpi3mr_pcietopochg_evt_debug(struct mpi3mr_softc *sc,
1686	Mpi3EventDataPcieTopologyChangeList_t *event_data)
1687{
1688	int i;
1689	U16 handle;
1690	U16 reason_code;
1691	U8 port_number;
1692	char *status_str = NULL;
1693	U8 link_rate, prev_link_rate;
1694
1695	switch (event_data->SwitchStatus) {
1696	case MPI3_EVENT_PCIE_TOPO_SS_NOT_RESPONDING:
1697		status_str = "remove";
1698		break;
1699	case MPI3_EVENT_PCIE_TOPO_SS_RESPONDING:
1700		status_str =  "responding";
1701		break;
1702	case MPI3_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING:
1703		status_str = "remove delay";
1704		break;
1705	case MPI3_EVENT_PCIE_TOPO_SS_NO_PCIE_SWITCH:
1706		status_str = "direct attached";
1707		break;
1708	default:
1709		status_str = "unknown status";
1710		break;
1711	}
1712	mpi3mr_dprint(sc, MPI3MR_INFO, "%s :pcie topology change: (%s)\n",
1713		__func__, status_str);
1714	mpi3mr_dprint(sc, MPI3MR_INFO,
1715		"%s :\tswitch_handle(0x%04x), enclosure_handle(0x%04x)"
1716		"start_port(%02d), num_entries(%d)\n", __func__,
1717		le16toh(event_data->SwitchDevHandle),
1718		le16toh(event_data->EnclosureHandle),
1719		event_data->StartPortNum, event_data->NumEntries);
1720	for (i = 0; i < event_data->NumEntries; i++) {
1721		handle =
1722			le16toh(event_data->PortEntry[i].AttachedDevHandle);
1723		if (!handle)
1724			continue;
1725		port_number = event_data->StartPortNum + i;
1726		reason_code = event_data->PortEntry[i].PortStatus;
1727		switch (reason_code) {
1728		case MPI3_EVENT_PCIE_TOPO_PS_NOT_RESPONDING:
1729			status_str = "target remove";
1730			break;
1731		case MPI3_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING:
1732			status_str = "delay target remove";
1733			break;
1734		case MPI3_EVENT_PCIE_TOPO_PS_PORT_CHANGED:
1735			status_str = "link rate change";
1736			break;
1737		case MPI3_EVENT_PCIE_TOPO_PS_NO_CHANGE:
1738			status_str = "target responding";
1739			break;
1740		default:
1741			status_str = "unknown";
1742			break;
1743		}
1744		link_rate = event_data->PortEntry[i].CurrentPortInfo &
1745			MPI3_EVENT_PCIE_TOPO_PI_RATE_MASK;
1746		prev_link_rate = event_data->PortEntry[i].PreviousPortInfo &
1747			MPI3_EVENT_PCIE_TOPO_PI_RATE_MASK;
1748		mpi3mr_dprint(sc, MPI3MR_INFO, "%s :\tport(%02d), attached_handle(0x%04x): %s:"
1749		    " link rate: new(0x%02x), old(0x%02x)\n", __func__,
1750		    port_number, handle, status_str, link_rate, prev_link_rate);
1751	}
1752}
1753
1754static void mpi3mr_process_pcietopochg_evt(struct mpi3mr_softc *sc,
1755    struct mpi3mr_fw_event_work *fwevt)
1756{
1757	Mpi3EventDataPcieTopologyChangeList_t *event_data =
1758		    (Mpi3EventDataPcieTopologyChangeList_t *)fwevt->event_data;
1759	int i;
1760	U16 handle;
1761	U8 reason_code, link_rate;
1762	struct mpi3mr_target *target = NULL;
1763
1764
1765	mpi3mr_pcietopochg_evt_debug(sc, event_data);
1766
1767	for (i = 0; i < event_data->NumEntries; i++) {
1768		handle =
1769			le16toh(event_data->PortEntry[i].AttachedDevHandle);
1770		if (!handle)
1771			continue;
1772		target = mpi3mr_find_target_by_dev_handle(sc->cam_sc, handle);
1773		if (!target)
1774			continue;
1775
1776		link_rate = event_data->PortEntry[i].CurrentPortInfo &
1777			MPI3_EVENT_PCIE_TOPO_PI_RATE_MASK;
1778		target->link_rate = link_rate;
1779
1780		reason_code = event_data->PortEntry[i].PortStatus;
1781
1782		switch (reason_code) {
1783		case MPI3_EVENT_PCIE_TOPO_PS_NOT_RESPONDING:
1784			if (target->exposed_to_os)
1785				mpi3mr_remove_device_from_os(sc, target->dev_handle);
1786			mpi3mr_remove_device_from_list(sc, target, false);
1787			break;
1788		case MPI3_EVENT_PCIE_TOPO_PS_PORT_CHANGED:
1789			break;
1790		default:
1791			break;
1792		}
1793	}
1794
1795	/*
1796	 * refcount was incremented for this event in
1797	 * mpi3mr_evt_handler. Decrement it here because the event has
1798	 * been processed.
1799	 */
1800	mpi3mr_startup_decrement(sc->cam_sc);
1801	return;
1802}
1803
1804void mpi3mr_add_device(struct mpi3mr_softc *sc, U16 per_id)
1805{
1806	struct mpi3mr_target *target;
1807
1808	mpi3mr_dprint(sc, MPI3MR_EVENT,
1809		"Adding device(persistent id: 0x%x)\n", per_id);
1810
1811	mpi3mr_startup_increment(sc->cam_sc);
1812	target = mpi3mr_find_target_by_per_id(sc->cam_sc, per_id);
1813
1814	if (!target) {
1815		mpi3mr_dprint(sc, MPI3MR_INFO, "Not available in driver's"
1816		    "internal target list, persistent_id: %d\n",
1817		    per_id);
1818		goto out;
1819	}
1820
1821	if (target->is_hidden) {
1822		mpi3mr_dprint(sc, MPI3MR_EVENT, "Target is hidden, persistent_id: %d\n",
1823			per_id);
1824		goto out;
1825	}
1826
1827	if (!target->exposed_to_os && !sc->reset_in_progress) {
1828		mpi3mr_rescan_target(sc, target);
1829		mpi3mr_dprint(sc, MPI3MR_INFO,
1830			"Added device persistent_id: %d dev_handle: %d\n", per_id, target->dev_handle);
1831		target->exposed_to_os = 1;
1832	}
1833
1834out:
1835	mpi3mr_startup_decrement(sc->cam_sc);
1836}
1837
1838int mpi3mr_remove_device_from_os(struct mpi3mr_softc *sc, U16 handle)
1839{
1840	int retval = 0;
1841	struct mpi3mr_target *target;
1842	unsigned int target_outstanding;
1843
1844	mpi3mr_dprint(sc, MPI3MR_EVENT,
1845		"Removing Device (dev_handle: %d)\n", handle);
1846
1847	target = mpi3mr_find_target_by_dev_handle(sc->cam_sc, handle);
1848
1849	if (!target) {
1850		mpi3mr_dprint(sc, MPI3MR_INFO,
1851			"Device (persistent_id: %d dev_handle: %d) is already removed from driver's list\n",
1852			target->per_id, handle);
1853		mpi3mr_rescan_target(sc, NULL);
1854		retval = -1;
1855		goto out;
1856	}
1857
1858	target->flags |= MPI3MRSAS_TARGET_INREMOVAL;
1859
1860	target_outstanding = mpi3mr_atomic_read(&target->outstanding);
1861	if (target_outstanding) {
1862		mpi3mr_dprint(sc, MPI3MR_ERROR, "there are [%2d] outstanding IOs on target: %d "
1863			      "Poll reply queue once\n", target_outstanding, target->per_id);
1864 		mpi3mr_poll_pend_io_completions(sc);
1865		target_outstanding = mpi3mr_atomic_read(&target->outstanding);
1866		if (target_outstanding)
1867			target_outstanding = mpi3mr_atomic_read(&target->outstanding);
1868			mpi3mr_dprint(sc, MPI3MR_ERROR, "[%2d] outstanding IOs present on target: %d "
1869				      "despite poll\n", target_outstanding, target->per_id);
1870 	}
1871
1872	if (target->exposed_to_os && !sc->reset_in_progress) {
1873		mpi3mr_rescan_target(sc, target);
1874		mpi3mr_dprint(sc, MPI3MR_INFO,
1875			"Removed device(persistent_id: %d dev_handle: %d)\n", target->per_id, handle);
1876		target->exposed_to_os = 0;
1877	}
1878
1879	target->flags &= ~MPI3MRSAS_TARGET_INREMOVAL;
1880out:
1881	return retval;
1882}
1883
1884void mpi3mr_remove_device_from_list(struct mpi3mr_softc *sc,
1885	struct mpi3mr_target *target, bool must_delete)
1886{
1887	if ((must_delete == false) &&
1888	    (target->state != MPI3MR_DEV_REMOVE_HS_COMPLETED))
1889		return;
1890
1891	mtx_lock_spin(&sc->target_lock);
1892	TAILQ_REMOVE(&sc->cam_sc->tgt_list, target, tgt_next);
1893	mtx_unlock_spin(&sc->target_lock);
1894
1895	free(target, M_MPI3MR);
1896	target = NULL;
1897
1898	return;
1899}
1900
1901/**
1902 * mpi3mr_devstatuschg_evt_bh - DevStatusChange evt bottomhalf
1903 * @sc: Adapter instance reference
1904 * @fwevt: Firmware event
1905 *
1906 * Process Device Status Change event and based on device's new
1907 * information, either expose the device to the upper layers, or
1908 * remove the device from upper layers.
1909 *
1910 * Return: Nothing.
1911 */
1912static void mpi3mr_devstatuschg_evt_bh(struct mpi3mr_softc *sc,
1913	struct mpi3mr_fw_event_work *fwevt)
1914{
1915	U16 dev_handle = 0;
1916	U8 uhide = 0, delete = 0, cleanup = 0;
1917	struct mpi3mr_target *tgtdev = NULL;
1918	Mpi3EventDataDeviceStatusChange_t *evtdata =
1919	    (Mpi3EventDataDeviceStatusChange_t *)fwevt->event_data;
1920
1921
1922
1923	dev_handle = le16toh(evtdata->DevHandle);
1924	mpi3mr_dprint(sc, MPI3MR_INFO,
1925	    "%s :device status change: handle(0x%04x): reason code(0x%x)\n",
1926	    __func__, dev_handle, evtdata->ReasonCode);
1927	switch (evtdata->ReasonCode) {
1928	case MPI3_EVENT_DEV_STAT_RC_HIDDEN:
1929		delete = 1;
1930		break;
1931	case MPI3_EVENT_DEV_STAT_RC_NOT_HIDDEN:
1932		uhide = 1;
1933		break;
1934	case MPI3_EVENT_DEV_STAT_RC_VD_NOT_RESPONDING:
1935		delete = 1;
1936		cleanup = 1;
1937		break;
1938	default:
1939		mpi3mr_dprint(sc, MPI3MR_INFO, "%s :Unhandled reason code(0x%x)\n", __func__,
1940		    evtdata->ReasonCode);
1941		break;
1942	}
1943
1944	tgtdev = mpi3mr_find_target_by_dev_handle(sc->cam_sc, dev_handle);
1945	if (!tgtdev)
1946		return;
1947
1948	if (uhide) {
1949		if (!tgtdev->exposed_to_os)
1950			mpi3mr_add_device(sc, tgtdev->per_id);
1951	}
1952
1953	if (delete)
1954		mpi3mr_remove_device_from_os(sc, dev_handle);
1955
1956	if (cleanup)
1957		mpi3mr_remove_device_from_list(sc, tgtdev, false);
1958}
1959
1960/**
1961 * mpi3mr_devinfochg_evt_bh - DeviceInfoChange evt bottomhalf
1962 * @sc: Adapter instance reference
1963 * @dev_pg0: New device page0
1964 *
1965 * Process Device Info Change event and based on device's new
1966 * information, either expose the device to the upper layers, or
1967 * remove the device from upper layers or update the details of
1968 * the device.
1969 *
1970 * Return: Nothing.
1971 */
1972static void mpi3mr_devinfochg_evt_bh(struct mpi3mr_softc *sc,
1973	Mpi3DevicePage0_t *dev_pg0)
1974{
1975	struct mpi3mr_target *tgtdev = NULL;
1976	U16 dev_handle = 0, perst_id = 0;
1977
1978	perst_id = le16toh(dev_pg0->PersistentID);
1979	dev_handle = le16toh(dev_pg0->DevHandle);
1980	mpi3mr_dprint(sc, MPI3MR_INFO,
1981	    "%s :Device info change: handle(0x%04x): persist_id(0x%x)\n",
1982	    __func__, dev_handle, perst_id);
1983	tgtdev = mpi3mr_find_target_by_dev_handle(sc->cam_sc, dev_handle);
1984	if (!tgtdev)
1985		return;
1986
1987	mpi3mr_update_device(sc, tgtdev, dev_pg0, false);
1988	if (!tgtdev->is_hidden && !tgtdev->exposed_to_os)
1989		mpi3mr_add_device(sc, perst_id);
1990
1991	if (tgtdev->is_hidden && tgtdev->exposed_to_os)
1992		mpi3mr_remove_device_from_os(sc, tgtdev->dev_handle);
1993}
1994
1995static void
1996mpi3mr_fw_work(struct mpi3mr_softc *sc, struct mpi3mr_fw_event_work *fw_event)
1997{
1998	if (sc->mpi3mr_flags & MPI3MR_FLAGS_SHUTDOWN)
1999		goto out;
2000
2001	if (!fw_event->process_event)
2002		goto evt_ack;
2003
2004	mpi3mr_dprint(sc, MPI3MR_EVENT, "(%d)->(%s) Working on  Event: [%x]\n",
2005	    event_count++, __func__, fw_event->event);
2006
2007	switch (fw_event->event) {
2008	case MPI3_EVENT_DEVICE_ADDED:
2009	{
2010		Mpi3DevicePage0_t *dev_pg0 =
2011			(Mpi3DevicePage0_t *) fw_event->event_data;
2012		mpi3mr_add_device(sc, dev_pg0->PersistentID);
2013		break;
2014	}
2015	case MPI3_EVENT_DEVICE_INFO_CHANGED:
2016	{
2017		mpi3mr_devinfochg_evt_bh(sc,
2018		    (Mpi3DevicePage0_t *) fw_event->event_data);
2019		break;
2020	}
2021	case MPI3_EVENT_DEVICE_STATUS_CHANGE:
2022	{
2023		mpi3mr_devstatuschg_evt_bh(sc, fw_event);
2024		break;
2025	}
2026	case MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
2027	{
2028		mpi3mr_process_sastopochg_evt(sc, fw_event);
2029		break;
2030	}
2031	case MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
2032	{
2033		mpi3mr_process_pcietopochg_evt(sc, fw_event);
2034		break;
2035	}
2036	case MPI3_EVENT_LOG_DATA:
2037	{
2038		mpi3mr_logdata_evt_bh(sc, fw_event);
2039		break;
2040	}
2041	default:
2042		mpi3mr_dprint(sc, MPI3MR_TRACE,"Unhandled event 0x%0X\n",
2043		    fw_event->event);
2044		break;
2045
2046	}
2047
2048evt_ack:
2049	if (fw_event->send_ack) {
2050		mpi3mr_dprint(sc, MPI3MR_EVENT,"Process event ACK for event 0x%0X\n",
2051		    fw_event->event);
2052		mpi3mr_process_event_ack(sc, fw_event->event,
2053		    fw_event->event_context);
2054	}
2055
2056out:
2057	mpi3mr_dprint(sc, MPI3MR_EVENT, "(%d)->(%s) Event Free: [%x]\n", event_count,
2058	    __func__, fw_event->event);
2059
2060	mpi3mr_fw_event_free(sc, fw_event);
2061}
2062
2063void
2064mpi3mr_firmware_event_work(void *arg, int pending)
2065{
2066	struct mpi3mr_fw_event_work *fw_event;
2067	struct mpi3mr_softc *sc;
2068
2069	sc = (struct mpi3mr_softc *)arg;
2070
2071	mtx_lock(&sc->fwevt_lock);
2072	while ((fw_event = TAILQ_FIRST(&sc->cam_sc->ev_queue)) != NULL) {
2073		TAILQ_REMOVE(&sc->cam_sc->ev_queue, fw_event, ev_link);
2074		mtx_unlock(&sc->fwevt_lock);
2075		mpi3mr_fw_work(sc, fw_event);
2076		mtx_lock(&sc->fwevt_lock);
2077	}
2078	mtx_unlock(&sc->fwevt_lock);
2079}
2080
2081
2082/*
2083 * mpi3mr_cam_attach - CAM layer registration
2084 * @sc: Adapter reference
2085 *
2086 * This function does simq allocation, cam registration, xpt_bus registration,
2087 * event taskqueue initialization and async event handler registration.
2088 *
2089 * Return: 0 on success and proper error codes on failure
2090 */
2091int
2092mpi3mr_cam_attach(struct mpi3mr_softc *sc)
2093{
2094	struct mpi3mr_cam_softc *cam_sc;
2095	cam_status status;
2096	int unit, error = 0, reqs;
2097
2098	mpi3mr_dprint(sc, MPI3MR_XINFO, "Starting CAM Attach\n");
2099
2100	cam_sc = malloc(sizeof(struct mpi3mr_cam_softc), M_MPI3MR, M_WAITOK|M_ZERO);
2101	if (!cam_sc) {
2102		mpi3mr_dprint(sc, MPI3MR_ERROR,
2103		    "Failed to allocate memory for controller CAM instance\n");
2104		return (ENOMEM);
2105	}
2106
2107	cam_sc->maxtargets = sc->facts.max_perids + 1;
2108
2109	TAILQ_INIT(&cam_sc->tgt_list);
2110
2111	sc->cam_sc = cam_sc;
2112	cam_sc->sc = sc;
2113
2114	reqs = sc->max_host_ios;
2115
2116	if ((cam_sc->devq = cam_simq_alloc(reqs)) == NULL) {
2117		mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to allocate SIMQ\n");
2118		error = ENOMEM;
2119		goto out;
2120	}
2121
2122	unit = device_get_unit(sc->mpi3mr_dev);
2123	cam_sc->sim = cam_sim_alloc(mpi3mr_cam_action, mpi3mr_cam_poll, "mpi3mr", cam_sc,
2124	    unit, &sc->mpi3mr_mtx, reqs, reqs, cam_sc->devq);
2125	if (cam_sc->sim == NULL) {
2126		mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to allocate SIM\n");
2127		error = EINVAL;
2128		goto out;
2129	}
2130
2131	TAILQ_INIT(&cam_sc->ev_queue);
2132
2133	/* Initialize taskqueue for Event Handling */
2134	TASK_INIT(&cam_sc->ev_task, 0, mpi3mr_firmware_event_work, sc);
2135	cam_sc->ev_tq = taskqueue_create("mpi3mr_taskq", M_NOWAIT | M_ZERO,
2136	    taskqueue_thread_enqueue, &cam_sc->ev_tq);
2137	taskqueue_start_threads(&cam_sc->ev_tq, 1, PRIBIO, "%s taskq",
2138	    device_get_nameunit(sc->mpi3mr_dev));
2139
2140	mtx_lock(&sc->mpi3mr_mtx);
2141
2142	/*
2143	 * XXX There should be a bus for every port on the adapter, but since
2144	 * we're just going to fake the topology for now, we'll pretend that
2145	 * everything is just a target on a single bus.
2146	 */
2147	if ((error = xpt_bus_register(cam_sc->sim, sc->mpi3mr_dev, 0)) != 0) {
2148		mpi3mr_dprint(sc, MPI3MR_ERROR,
2149		    "Error 0x%x registering SCSI bus\n", error);
2150		mtx_unlock(&sc->mpi3mr_mtx);
2151		goto out;
2152	}
2153
2154	/*
2155	 * Assume that discovery events will start right away.
2156	 *
2157	 * Hold off boot until discovery is complete.
2158	 */
2159	cam_sc->flags |= MPI3MRSAS_IN_STARTUP | MPI3MRSAS_IN_DISCOVERY;
2160	sc->cam_sc->startup_refcount = 0;
2161	mpi3mr_startup_increment(cam_sc);
2162
2163	callout_init(&cam_sc->discovery_callout, 1 /*mpsafe*/);
2164
2165	/*
2166	 * Register for async events so we can determine the EEDP
2167	 * capabilities of devices.
2168	 */
2169	status = xpt_create_path(&cam_sc->path, /*periph*/NULL,
2170	    cam_sim_path(sc->cam_sc->sim), CAM_TARGET_WILDCARD,
2171	    CAM_LUN_WILDCARD);
2172	if (status != CAM_REQ_CMP) {
2173		mpi3mr_dprint(sc, MPI3MR_ERROR,
2174		    "Error 0x%x creating sim path\n", status);
2175		cam_sc->path = NULL;
2176	}
2177
2178	if (status != CAM_REQ_CMP) {
2179		/*
2180		 * EEDP use is the exception, not the rule.
2181		 * Warn the user, but do not fail to attach.
2182		 */
2183		mpi3mr_dprint(sc, MPI3MR_INFO, "EEDP capabilities disabled.\n");
2184	}
2185
2186	mtx_unlock(&sc->mpi3mr_mtx);
2187
2188	error = mpi3mr_register_events(sc);
2189
2190out:
2191	mpi3mr_dprint(sc, MPI3MR_XINFO, "%s Exiting CAM attach, error: 0x%x n", __func__, error);
2192	return (error);
2193}
2194
2195int
2196mpi3mr_cam_detach(struct mpi3mr_softc *sc)
2197{
2198	struct mpi3mr_cam_softc *cam_sc;
2199	struct mpi3mr_target *target;
2200
2201	mpi3mr_dprint(sc, MPI3MR_XINFO, "%s, Starting CAM detach\n", __func__);
2202	if (sc->cam_sc == NULL)
2203		return (0);
2204
2205	cam_sc = sc->cam_sc;
2206
2207	mpi3mr_freeup_events(sc);
2208
2209	/*
2210	 * Drain and free the event handling taskqueue with the lock
2211	 * unheld so that any parallel processing tasks drain properly
2212	 * without deadlocking.
2213	 */
2214	if (cam_sc->ev_tq != NULL)
2215		taskqueue_free(cam_sc->ev_tq);
2216
2217	mtx_lock(&sc->mpi3mr_mtx);
2218
2219	while (cam_sc->startup_refcount != 0)
2220		mpi3mr_startup_decrement(cam_sc);
2221
2222	/* Deregister our async handler */
2223	if (cam_sc->path != NULL) {
2224		xpt_free_path(cam_sc->path);
2225		cam_sc->path = NULL;
2226	}
2227
2228	if (cam_sc->flags & MPI3MRSAS_IN_STARTUP)
2229		xpt_release_simq(cam_sc->sim, 1);
2230
2231	if (cam_sc->sim != NULL) {
2232		xpt_bus_deregister(cam_sim_path(cam_sc->sim));
2233		cam_sim_free(cam_sc->sim, FALSE);
2234	}
2235
2236	mtx_unlock(&sc->mpi3mr_mtx);
2237
2238	if (cam_sc->devq != NULL)
2239		cam_simq_free(cam_sc->devq);
2240
2241get_target:
2242	mtx_lock_spin(&sc->target_lock);
2243 	TAILQ_FOREACH(target, &cam_sc->tgt_list, tgt_next) {
2244 		TAILQ_REMOVE(&sc->cam_sc->tgt_list, target, tgt_next);
2245		mtx_unlock_spin(&sc->target_lock);
2246		goto out_tgt_free;
2247	}
2248	mtx_unlock_spin(&sc->target_lock);
2249out_tgt_free:
2250	if (target) {
2251		free(target, M_MPI3MR);
2252		target = NULL;
2253		goto get_target;
2254 	}
2255
2256	free(cam_sc, M_MPI3MR);
2257	sc->cam_sc = NULL;
2258
2259	mpi3mr_dprint(sc, MPI3MR_XINFO, "%s, Exiting CAM detach\n", __func__);
2260	return (0);
2261}
2262