Deleted Added
full compact
1/*
2 * Copyright (c) 2015, AVAGO Tech. All rights reserved. Author: Marian Choy
3 * Copyright (c) 2014, LSI Corp. All rights reserved. Author: Marian Choy
4 * Support: freebsdraid@avagotech.com
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met:
9 *
10 * 1. Redistributions of source code must retain the above copyright notice,
11 * this list of conditions and the following disclaimer. 2. Redistributions
12 * in binary form must reproduce the above copyright notice, this list of
13 * conditions and the following disclaimer in the documentation and/or other
14 * materials provided with the distribution. 3. Neither the name of the
15 * <ORGANIZATION> nor the names of its contributors may be used to endorse or
16 * promote products derived from this software without specific prior written
17 * permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 *
31 */
32
33#include <sys/cdefs.h>
34__FBSDID("$FreeBSD: head/sys/dev/mrsas/mrsas_cam.c 299668 2016-05-13 12:12:09Z kadesai $");
34__FBSDID("$FreeBSD: head/sys/dev/mrsas/mrsas_cam.c 299670 2016-05-13 12:18:12Z kadesai $");
35
36#include "dev/mrsas/mrsas.h"
37
38#include <cam/cam.h>
39#include <cam/cam_ccb.h>
40#include <cam/cam_sim.h>
41#include <cam/cam_xpt_sim.h>
42#include <cam/cam_debug.h>
43#include <cam/cam_periph.h>
44#include <cam/cam_xpt_periph.h>
45
46#include <cam/scsi/scsi_all.h>
47#include <cam/scsi/scsi_message.h>
48#include <sys/taskqueue.h>
49#include <sys/kernel.h>
50
51
52#include <sys/time.h> /* XXX for pcpu.h */
53#include <sys/pcpu.h> /* XXX for PCPU_GET */
54
55#define smp_processor_id() PCPU_GET(cpuid)
56
57/*
58 * Function prototypes
59 */
60int mrsas_cam_attach(struct mrsas_softc *sc);
61int mrsas_find_io_type(struct cam_sim *sim, union ccb *ccb);
62int mrsas_bus_scan(struct mrsas_softc *sc);
63int mrsas_bus_scan_sim(struct mrsas_softc *sc, struct cam_sim *sim);
64int
65mrsas_map_request(struct mrsas_softc *sc,
66 struct mrsas_mpt_cmd *cmd, union ccb *ccb);
67int
68mrsas_build_ldio_rw(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
69 union ccb *ccb);
70int
71mrsas_build_ldio_nonrw(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
72 union ccb *ccb);
73int
74mrsas_build_syspdio(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
75 union ccb *ccb, struct cam_sim *sim, u_int8_t fp_possible);
76int
77mrsas_setup_io(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
78 union ccb *ccb, u_int32_t device_id,
79 MRSAS_RAID_SCSI_IO_REQUEST * io_request);
80void mrsas_xpt_freeze(struct mrsas_softc *sc);
81void mrsas_xpt_release(struct mrsas_softc *sc);
82void mrsas_cam_detach(struct mrsas_softc *sc);
83void mrsas_release_mpt_cmd(struct mrsas_mpt_cmd *cmd);
84void mrsas_unmap_request(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd);
85void mrsas_cmd_done(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd);
86void
87mrsas_fire_cmd(struct mrsas_softc *sc, u_int32_t req_desc_lo,
88 u_int32_t req_desc_hi);
89void
90mrsas_set_pd_lba(MRSAS_RAID_SCSI_IO_REQUEST * io_request,
91 u_int8_t cdb_len, struct IO_REQUEST_INFO *io_info, union ccb *ccb,
92 MR_DRV_RAID_MAP_ALL * local_map_ptr, u_int32_t ref_tag,
93 u_int32_t ld_block_size);
94static void mrsas_freeze_simq(struct mrsas_mpt_cmd *cmd, struct cam_sim *sim);
95static void mrsas_cam_poll(struct cam_sim *sim);
96static void mrsas_action(struct cam_sim *sim, union ccb *ccb);
97static void mrsas_scsiio_timeout(void *data);
98static void
99mrsas_data_load_cb(void *arg, bus_dma_segment_t *segs,
100 int nseg, int error);
101static int32_t
102mrsas_startio(struct mrsas_softc *sc, struct cam_sim *sim,
103 union ccb *ccb);
104struct mrsas_mpt_cmd *mrsas_get_mpt_cmd(struct mrsas_softc *sc);
105MRSAS_REQUEST_DESCRIPTOR_UNION *
106 mrsas_get_request_desc(struct mrsas_softc *sc, u_int16_t index);
107
108extern u_int16_t MR_TargetIdToLdGet(u_int32_t ldTgtId, MR_DRV_RAID_MAP_ALL * map);
109extern u_int32_t
110MR_LdBlockSizeGet(u_int32_t ldTgtId, MR_DRV_RAID_MAP_ALL * map,
111 struct mrsas_softc *sc);
112extern void mrsas_isr(void *arg);
113extern void mrsas_aen_handler(struct mrsas_softc *sc);
114extern u_int8_t
115MR_BuildRaidContext(struct mrsas_softc *sc,
116 struct IO_REQUEST_INFO *io_info, RAID_CONTEXT * pRAID_Context,
117 MR_DRV_RAID_MAP_ALL * map);
118extern u_int16_t
119MR_LdSpanArrayGet(u_int32_t ld, u_int32_t span,
120 MR_DRV_RAID_MAP_ALL * map);
121extern u_int16_t
122mrsas_get_updated_dev_handle(struct mrsas_softc *sc,
123 PLD_LOAD_BALANCE_INFO lbInfo, struct IO_REQUEST_INFO *io_info);
124extern u_int8_t
125megasas_get_best_arm(PLD_LOAD_BALANCE_INFO lbInfo, u_int8_t arm,
126 u_int64_t block, u_int32_t count);
127
128
129/*
130 * mrsas_cam_attach: Main entry to CAM subsystem
131 * input: Adapter instance soft state
132 *
133 * This function is called from mrsas_attach() during initialization to perform
134 * SIM allocations and XPT bus registration. If the kernel version is 7.4 or
135 * earlier, it would also initiate a bus scan.
136 */
137int
138mrsas_cam_attach(struct mrsas_softc *sc)
139{
140 struct cam_devq *devq;
141 int mrsas_cam_depth;
142
143 mrsas_cam_depth = sc->max_fw_cmds - MRSAS_INTERNAL_CMDS;
144
145 if ((devq = cam_simq_alloc(mrsas_cam_depth)) == NULL) {
146 device_printf(sc->mrsas_dev, "Cannot allocate SIM queue\n");
147 return (ENOMEM);
148 }
149 /*
150 * Create SIM for bus 0 and register, also create path
151 */
152 sc->sim_0 = cam_sim_alloc(mrsas_action, mrsas_cam_poll, "mrsas", sc,
153 device_get_unit(sc->mrsas_dev), &sc->sim_lock, mrsas_cam_depth,
154 mrsas_cam_depth, devq);
155 if (sc->sim_0 == NULL) {
156 cam_simq_free(devq);
157 device_printf(sc->mrsas_dev, "Cannot register SIM\n");
158 return (ENXIO);
159 }
160 /* Initialize taskqueue for Event Handling */
161 TASK_INIT(&sc->ev_task, 0, (void *)mrsas_aen_handler, sc);
162 sc->ev_tq = taskqueue_create("mrsas_taskq", M_NOWAIT | M_ZERO,
163 taskqueue_thread_enqueue, &sc->ev_tq);
164
165 /* Run the task queue with lowest priority */
166 taskqueue_start_threads(&sc->ev_tq, 1, 255, "%s taskq",
167 device_get_nameunit(sc->mrsas_dev));
168 mtx_lock(&sc->sim_lock);
169 if (xpt_bus_register(sc->sim_0, sc->mrsas_dev, 0) != CAM_SUCCESS) {
170 cam_sim_free(sc->sim_0, TRUE); /* passing true frees the devq */
171 mtx_unlock(&sc->sim_lock);
172 return (ENXIO);
173 }
174 if (xpt_create_path(&sc->path_0, NULL, cam_sim_path(sc->sim_0),
175 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
176 xpt_bus_deregister(cam_sim_path(sc->sim_0));
177 cam_sim_free(sc->sim_0, TRUE); /* passing true will free the
178 * devq */
179 mtx_unlock(&sc->sim_lock);
180 return (ENXIO);
181 }
182 mtx_unlock(&sc->sim_lock);
183
184 /*
185 * Create SIM for bus 1 and register, also create path
186 */
187 sc->sim_1 = cam_sim_alloc(mrsas_action, mrsas_cam_poll, "mrsas", sc,
188 device_get_unit(sc->mrsas_dev), &sc->sim_lock, mrsas_cam_depth,
189 mrsas_cam_depth, devq);
190 if (sc->sim_1 == NULL) {
191 cam_simq_free(devq);
192 device_printf(sc->mrsas_dev, "Cannot register SIM\n");
193 return (ENXIO);
194 }
195 mtx_lock(&sc->sim_lock);
196 if (xpt_bus_register(sc->sim_1, sc->mrsas_dev, 1) != CAM_SUCCESS) {
197 cam_sim_free(sc->sim_1, TRUE); /* passing true frees the devq */
198 mtx_unlock(&sc->sim_lock);
199 return (ENXIO);
200 }
201 if (xpt_create_path(&sc->path_1, NULL, cam_sim_path(sc->sim_1),
202 CAM_TARGET_WILDCARD,
203 CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
204 xpt_bus_deregister(cam_sim_path(sc->sim_1));
205 cam_sim_free(sc->sim_1, TRUE);
206 mtx_unlock(&sc->sim_lock);
207 return (ENXIO);
208 }
209 mtx_unlock(&sc->sim_lock);
210
211#if (__FreeBSD_version <= 704000)
212 if (mrsas_bus_scan(sc)) {
213 device_printf(sc->mrsas_dev, "Error in bus scan.\n");
214 return (1);
215 }
216#endif
217 return (0);
218}
219
220/*
221 * mrsas_cam_detach: De-allocates and teardown CAM
222 * input: Adapter instance soft state
223 *
224 * De-registers and frees the paths and SIMs.
225 */
226void
227mrsas_cam_detach(struct mrsas_softc *sc)
228{
229 if (sc->ev_tq != NULL)
230 taskqueue_free(sc->ev_tq);
231 mtx_lock(&sc->sim_lock);
232 if (sc->path_0)
233 xpt_free_path(sc->path_0);
234 if (sc->sim_0) {
235 xpt_bus_deregister(cam_sim_path(sc->sim_0));
236 cam_sim_free(sc->sim_0, FALSE);
237 }
238 if (sc->path_1)
239 xpt_free_path(sc->path_1);
240 if (sc->sim_1) {
241 xpt_bus_deregister(cam_sim_path(sc->sim_1));
242 cam_sim_free(sc->sim_1, TRUE);
243 }
244 mtx_unlock(&sc->sim_lock);
245}
246
247/*
248 * mrsas_action: SIM callback entry point
249 * input: pointer to SIM pointer to CAM Control Block
250 *
251 * This function processes CAM subsystem requests. The type of request is stored
252 * in ccb->ccb_h.func_code. The preprocessor #ifdef is necessary because
253 * ccb->cpi.maxio is not supported for FreeBSD version 7.4 or earlier.
254 */
255static void
256mrsas_action(struct cam_sim *sim, union ccb *ccb)
257{
258 struct mrsas_softc *sc = (struct mrsas_softc *)cam_sim_softc(sim);
259 struct ccb_hdr *ccb_h = &(ccb->ccb_h);
260 u_int32_t device_id;
261
262 switch (ccb->ccb_h.func_code) {
263 case XPT_SCSI_IO:
264 {
265 device_id = ccb_h->target_id;
266
267 /*
268 * bus 0 is LD, bus 1 is for system-PD
269 */
270 if (cam_sim_bus(sim) == 1 &&
271 sc->pd_list[device_id].driveState != MR_PD_STATE_SYSTEM) {
272 ccb->ccb_h.status |= CAM_DEV_NOT_THERE;
273 xpt_done(ccb);
274 } else {
275 if (mrsas_startio(sc, sim, ccb)) {
276 ccb->ccb_h.status |= CAM_REQ_INVALID;
277 xpt_done(ccb);
278 }
279 }
280 break;
281 }
282 case XPT_ABORT:
283 {
284 ccb->ccb_h.status = CAM_UA_ABORT;
285 xpt_done(ccb);
286 break;
287 }
288 case XPT_RESET_BUS:
289 {
290 xpt_done(ccb);
291 break;
292 }
293 case XPT_GET_TRAN_SETTINGS:
294 {
295 ccb->cts.protocol = PROTO_SCSI;
296 ccb->cts.protocol_version = SCSI_REV_2;
297 ccb->cts.transport = XPORT_SPI;
298 ccb->cts.transport_version = 2;
299 ccb->cts.xport_specific.spi.valid = CTS_SPI_VALID_DISC;
300 ccb->cts.xport_specific.spi.flags = CTS_SPI_FLAGS_DISC_ENB;
301 ccb->cts.proto_specific.scsi.valid = CTS_SCSI_VALID_TQ;
302 ccb->cts.proto_specific.scsi.flags = CTS_SCSI_FLAGS_TAG_ENB;
303 ccb->ccb_h.status = CAM_REQ_CMP;
304 xpt_done(ccb);
305 break;
306 }
307 case XPT_SET_TRAN_SETTINGS:
308 {
309 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
310 xpt_done(ccb);
311 break;
312 }
313 case XPT_CALC_GEOMETRY:
314 {
315 cam_calc_geometry(&ccb->ccg, 1);
316 xpt_done(ccb);
317 break;
318 }
319 case XPT_PATH_INQ:
320 {
321 ccb->cpi.version_num = 1;
322 ccb->cpi.hba_inquiry = 0;
323 ccb->cpi.target_sprt = 0;
324#if (__FreeBSD_version >= 902001)
325 ccb->cpi.hba_misc = PIM_UNMAPPED;
326#else
327 ccb->cpi.hba_misc = 0;
328#endif
329 ccb->cpi.hba_eng_cnt = 0;
330 ccb->cpi.max_lun = MRSAS_SCSI_MAX_LUNS;
331 ccb->cpi.unit_number = cam_sim_unit(sim);
332 ccb->cpi.bus_id = cam_sim_bus(sim);
333 ccb->cpi.initiator_id = MRSAS_SCSI_INITIATOR_ID;
334 ccb->cpi.base_transfer_speed = 150000;
335 strncpy(ccb->cpi.sim_vid, "FreeBSD", SIM_IDLEN);
336 strncpy(ccb->cpi.hba_vid, "AVAGO", HBA_IDLEN);
337 strncpy(ccb->cpi.dev_name, cam_sim_name(sim), DEV_IDLEN);
338 ccb->cpi.transport = XPORT_SPI;
339 ccb->cpi.transport_version = 2;
340 ccb->cpi.protocol = PROTO_SCSI;
341 ccb->cpi.protocol_version = SCSI_REV_2;
342 if (ccb->cpi.bus_id == 0)
343 ccb->cpi.max_target = MRSAS_MAX_PD - 1;
344 else
345 ccb->cpi.max_target = MRSAS_MAX_LD_IDS - 1;
346#if (__FreeBSD_version > 704000)
347 ccb->cpi.maxio = sc->max_num_sge * MRSAS_PAGE_SIZE;
348#endif
349 ccb->ccb_h.status = CAM_REQ_CMP;
350 xpt_done(ccb);
351 break;
352 }
353 default:
354 {
355 ccb->ccb_h.status = CAM_REQ_INVALID;
356 xpt_done(ccb);
357 break;
358 }
359 }
360}
361
362/*
363 * mrsas_scsiio_timeout: Callback function for IO timed out
364 * input: mpt command context
365 *
366 * This function will execute after timeout value provided by ccb header from
367 * CAM layer, if timer expires. Driver will run timer for all DCDM and LDIO
368 * coming from CAM layer. This function is callback function for IO timeout
369 * and it runs in no-sleep context. Set do_timedout_reset in Adapter context
370 * so that it will execute OCR/Kill adpter from ocr_thread context.
371 */
372static void
373mrsas_scsiio_timeout(void *data)
374{
375 struct mrsas_mpt_cmd *cmd;
376 struct mrsas_softc *sc;
377
378 cmd = (struct mrsas_mpt_cmd *)data;
379 sc = cmd->sc;
380
381 if (cmd->ccb_ptr == NULL) {
382 printf("command timeout with NULL ccb\n");
383 return;
384 }
385 /*
386 * Below callout is dummy entry so that it will be cancelled from
387 * mrsas_cmd_done(). Now Controller will go to OCR/Kill Adapter based
388 * on OCR enable/disable property of Controller from ocr_thread
389 * context.
390 */
391#if (__FreeBSD_version >= 1000510)
392 callout_reset_sbt(&cmd->cm_callout, SBT_1S * 600, 0,
393 mrsas_scsiio_timeout, cmd, 0);
394#else
395 callout_reset(&cmd->cm_callout, (600000 * hz) / 1000,
396 mrsas_scsiio_timeout, cmd);
397#endif
398 sc->do_timedout_reset = SCSIIO_TIMEOUT_OCR;
399 if (sc->ocr_thread_active)
400 wakeup(&sc->ocr_chan);
401}
402
403/*
404 * mrsas_startio: SCSI IO entry point
405 * input: Adapter instance soft state
406 * pointer to CAM Control Block
407 *
408 * This function is the SCSI IO entry point and it initiates IO processing. It
409 * copies the IO and depending if the IO is read/write or inquiry, it would
410 * call mrsas_build_ldio() or mrsas_build_dcdb(), respectively. It returns 0
411 * if the command is sent to firmware successfully, otherwise it returns 1.
412 */
413static int32_t
414mrsas_startio(struct mrsas_softc *sc, struct cam_sim *sim,
415 union ccb *ccb)
416{
417 struct mrsas_mpt_cmd *cmd;
418 struct ccb_hdr *ccb_h = &(ccb->ccb_h);
419 struct ccb_scsiio *csio = &(ccb->csio);
420 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
421 u_int8_t cmd_type;
422
423 if ((csio->cdb_io.cdb_bytes[0]) == SYNCHRONIZE_CACHE) {
424 ccb->ccb_h.status = CAM_REQ_CMP;
425 xpt_done(ccb);
426 return (0);
427 }
428 ccb_h->status |= CAM_SIM_QUEUED;
429 cmd = mrsas_get_mpt_cmd(sc);
430
431 if (!cmd) {
432 ccb_h->status |= CAM_REQUEUE_REQ;
433 xpt_done(ccb);
434 return (0);
435 }
436 if ((ccb_h->flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
437 if (ccb_h->flags & CAM_DIR_IN)
438 cmd->flags |= MRSAS_DIR_IN;
439 if (ccb_h->flags & CAM_DIR_OUT)
440 cmd->flags |= MRSAS_DIR_OUT;
441 } else
442 cmd->flags = MRSAS_DIR_NONE; /* no data */
443
444/* For FreeBSD 9.2 and higher */
445#if (__FreeBSD_version >= 902001)
446 /*
447 * XXX We don't yet support physical addresses here.
448 */
449 switch ((ccb->ccb_h.flags & CAM_DATA_MASK)) {
450 case CAM_DATA_PADDR:
451 case CAM_DATA_SG_PADDR:
452 device_printf(sc->mrsas_dev, "%s: physical addresses not supported\n",
453 __func__);
454 mrsas_release_mpt_cmd(cmd);
455 ccb_h->status = CAM_REQ_INVALID;
456 ccb_h->status &= ~CAM_SIM_QUEUED;
457 goto done;
458 case CAM_DATA_SG:
459 device_printf(sc->mrsas_dev, "%s: scatter gather is not supported\n",
460 __func__);
461 mrsas_release_mpt_cmd(cmd);
462 ccb_h->status = CAM_REQ_INVALID;
463 goto done;
464 case CAM_DATA_VADDR:
465 if (csio->dxfer_len > (sc->max_num_sge * MRSAS_PAGE_SIZE)) {
466 mrsas_release_mpt_cmd(cmd);
467 ccb_h->status = CAM_REQ_TOO_BIG;
468 goto done;
469 }
470 cmd->length = csio->dxfer_len;
471 if (cmd->length)
472 cmd->data = csio->data_ptr;
473 break;
474 case CAM_DATA_BIO:
475 if (csio->dxfer_len > (sc->max_num_sge * MRSAS_PAGE_SIZE)) {
476 mrsas_release_mpt_cmd(cmd);
477 ccb_h->status = CAM_REQ_TOO_BIG;
478 goto done;
479 }
480 cmd->length = csio->dxfer_len;
481 if (cmd->length)
482 cmd->data = csio->data_ptr;
483 break;
484 default:
485 ccb->ccb_h.status = CAM_REQ_INVALID;
486 goto done;
487 }
488#else
489 if (!(ccb_h->flags & CAM_DATA_PHYS)) { /* Virtual data address */
490 if (!(ccb_h->flags & CAM_SCATTER_VALID)) {
491 if (csio->dxfer_len > (sc->max_num_sge * MRSAS_PAGE_SIZE)) {
492 mrsas_release_mpt_cmd(cmd);
493 ccb_h->status = CAM_REQ_TOO_BIG;
494 goto done;
495 }
496 cmd->length = csio->dxfer_len;
497 if (cmd->length)
498 cmd->data = csio->data_ptr;
499 } else {
500 mrsas_release_mpt_cmd(cmd);
501 ccb_h->status = CAM_REQ_INVALID;
502 goto done;
503 }
504 } else { /* Data addresses are physical. */
505 mrsas_release_mpt_cmd(cmd);
506 ccb_h->status = CAM_REQ_INVALID;
507 ccb_h->status &= ~CAM_SIM_QUEUED;
508 goto done;
509 }
510#endif
511 /* save ccb ptr */
512 cmd->ccb_ptr = ccb;
513
514 req_desc = mrsas_get_request_desc(sc, (cmd->index) - 1);
515 if (!req_desc) {
516 device_printf(sc->mrsas_dev, "Cannot get request_descriptor.\n");
517 return (FAIL);
518 }
519 memset(req_desc, 0, sizeof(MRSAS_REQUEST_DESCRIPTOR_UNION));
520 cmd->request_desc = req_desc;
521
522 if (ccb_h->flags & CAM_CDB_POINTER)
523 bcopy(csio->cdb_io.cdb_ptr, cmd->io_request->CDB.CDB32, csio->cdb_len);
524 else
525 bcopy(csio->cdb_io.cdb_bytes, cmd->io_request->CDB.CDB32, csio->cdb_len);
526 mtx_lock(&sc->raidmap_lock);
527
528 /* Check for IO type READ-WRITE targeted for Logical Volume */
529 cmd_type = mrsas_find_io_type(sim, ccb);
530 switch (cmd_type) {
531 case READ_WRITE_LDIO:
532 /* Build READ-WRITE IO for Logical Volume */
533 if (mrsas_build_ldio_rw(sc, cmd, ccb)) {
534 device_printf(sc->mrsas_dev, "Build RW LDIO failed.\n");
535 mtx_unlock(&sc->raidmap_lock);
536 return (1);
537 }
538 break;
539 case NON_READ_WRITE_LDIO:
540 /* Build NON READ-WRITE IO for Logical Volume */
541 if (mrsas_build_ldio_nonrw(sc, cmd, ccb)) {
542 device_printf(sc->mrsas_dev, "Build NON-RW LDIO failed.\n");
543 mtx_unlock(&sc->raidmap_lock);
544 return (1);
545 }
546 break;
547 case READ_WRITE_SYSPDIO:
548 case NON_READ_WRITE_SYSPDIO:
549 if (sc->secure_jbod_support &&
550 (cmd_type == NON_READ_WRITE_SYSPDIO)) {
551 /* Build NON-RW IO for JBOD */
552 if (mrsas_build_syspdio(sc, cmd, ccb, sim, 0)) {
553 device_printf(sc->mrsas_dev,
554 "Build SYSPDIO failed.\n");
555 mtx_unlock(&sc->raidmap_lock);
556 return (1);
557 }
558 } else {
559 /* Build RW IO for JBOD */
560 if (mrsas_build_syspdio(sc, cmd, ccb, sim, 1)) {
561 device_printf(sc->mrsas_dev,
562 "Build SYSPDIO failed.\n");
563 mtx_unlock(&sc->raidmap_lock);
564 return (1);
565 }
566 }
567 }
568 mtx_unlock(&sc->raidmap_lock);
569
570 if (cmd->flags == MRSAS_DIR_IN) /* from device */
571 cmd->io_request->Control |= MPI2_SCSIIO_CONTROL_READ;
572 else if (cmd->flags == MRSAS_DIR_OUT) /* to device */
573 cmd->io_request->Control |= MPI2_SCSIIO_CONTROL_WRITE;
574
575 cmd->io_request->SGLFlags = MPI2_SGE_FLAGS_64_BIT_ADDRESSING;
576 cmd->io_request->SGLOffset0 = offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL) / 4;
577 cmd->io_request->SenseBufferLowAddress = cmd->sense_phys_addr;
578 cmd->io_request->SenseBufferLength = MRSAS_SCSI_SENSE_BUFFERSIZE;
579
580 req_desc = cmd->request_desc;
581 req_desc->SCSIIO.SMID = cmd->index;
582
583 /*
584 * Start timer for IO timeout. Default timeout value is 90 second.
585 */
586#if (__FreeBSD_version >= 1000510)
587 callout_reset_sbt(&cmd->cm_callout, SBT_1S * 600, 0,
588 mrsas_scsiio_timeout, cmd, 0);
589#else
590 callout_reset(&cmd->cm_callout, (600000 * hz) / 1000,
591 mrsas_scsiio_timeout, cmd);
592#endif
593 mrsas_atomic_inc(&sc->fw_outstanding);
594
595 if (mrsas_atomic_read(&sc->fw_outstanding) > sc->io_cmds_highwater)
596 sc->io_cmds_highwater++;
597
598 mrsas_fire_cmd(sc, req_desc->addr.u.low, req_desc->addr.u.high);
599 return (0);
600
601done:
602 xpt_done(ccb);
603 return (0);
604}
605
606/*
607 * mrsas_find_io_type: Determines if IO is read/write or inquiry
608 * input: pointer to CAM Control Block
609 *
610 * This function determines if the IO is read/write or inquiry. It returns a 1
611 * if the IO is read/write and 0 if it is inquiry.
612 */
613int
614mrsas_find_io_type(struct cam_sim *sim, union ccb *ccb)
615{
616 struct ccb_scsiio *csio = &(ccb->csio);
617
618 switch (csio->cdb_io.cdb_bytes[0]) {
619 case READ_10:
620 case WRITE_10:
621 case READ_12:
622 case WRITE_12:
623 case READ_6:
624 case WRITE_6:
625 case READ_16:
626 case WRITE_16:
627 return (cam_sim_bus(sim) ?
628 READ_WRITE_SYSPDIO : READ_WRITE_LDIO);
629 default:
630 return (cam_sim_bus(sim) ?
631 NON_READ_WRITE_SYSPDIO : NON_READ_WRITE_LDIO);
632 }
633}
634
635/*
636 * mrsas_get_mpt_cmd: Get a cmd from free command pool
637 * input: Adapter instance soft state
638 *
639 * This function removes an MPT command from the command free list and
640 * initializes it.
641 */
642struct mrsas_mpt_cmd *
643mrsas_get_mpt_cmd(struct mrsas_softc *sc)
644{
645 struct mrsas_mpt_cmd *cmd = NULL;
646
647 mtx_lock(&sc->mpt_cmd_pool_lock);
648 if (!TAILQ_EMPTY(&sc->mrsas_mpt_cmd_list_head)) {
649 cmd = TAILQ_FIRST(&sc->mrsas_mpt_cmd_list_head);
650 TAILQ_REMOVE(&sc->mrsas_mpt_cmd_list_head, cmd, next);
651 }
652 memset((uint8_t *)cmd->io_request, 0, MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE);
653 cmd->data = NULL;
654 cmd->length = 0;
655 cmd->flags = 0;
656 cmd->error_code = 0;
657 cmd->load_balance = 0;
658 cmd->ccb_ptr = NULL;
659 mtx_unlock(&sc->mpt_cmd_pool_lock);
660
661 return cmd;
662}
663
664/*
665 * mrsas_release_mpt_cmd: Return a cmd to free command pool
666 * input: Command packet for return to free command pool
667 *
668 * This function returns an MPT command to the free command list.
669 */
670void
671mrsas_release_mpt_cmd(struct mrsas_mpt_cmd *cmd)
672{
673 struct mrsas_softc *sc = cmd->sc;
674
675 mtx_lock(&sc->mpt_cmd_pool_lock);
676 cmd->sync_cmd_idx = (u_int32_t)MRSAS_ULONG_MAX;
677 TAILQ_INSERT_TAIL(&(sc->mrsas_mpt_cmd_list_head), cmd, next);
678 mtx_unlock(&sc->mpt_cmd_pool_lock);
679
680 return;
681}
682
683/*
684 * mrsas_get_request_desc: Get request descriptor from array
685 * input: Adapter instance soft state
686 * SMID index
687 *
688 * This function returns a pointer to the request descriptor.
689 */
690MRSAS_REQUEST_DESCRIPTOR_UNION *
691mrsas_get_request_desc(struct mrsas_softc *sc, u_int16_t index)
692{
693 u_int8_t *p;
694
695 if (index >= sc->max_fw_cmds) {
696 device_printf(sc->mrsas_dev, "Invalid SMID (0x%x)request for desc\n", index);
697 return NULL;
698 }
699 p = sc->req_desc + sizeof(MRSAS_REQUEST_DESCRIPTOR_UNION) * index;
700
701 return (MRSAS_REQUEST_DESCRIPTOR_UNION *) p;
702}
703
704/*
705 * mrsas_build_ldio_rw: Builds an LDIO command
706 * input: Adapter instance soft state
707 * Pointer to command packet
708 * Pointer to CCB
709 *
710 * This function builds the LDIO command packet. It returns 0 if the command is
711 * built successfully, otherwise it returns a 1.
712 */
713int
714mrsas_build_ldio_rw(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
715 union ccb *ccb)
716{
717 struct ccb_hdr *ccb_h = &(ccb->ccb_h);
718 struct ccb_scsiio *csio = &(ccb->csio);
719 u_int32_t device_id;
720 MRSAS_RAID_SCSI_IO_REQUEST *io_request;
721
722 device_id = ccb_h->target_id;
723
724 io_request = cmd->io_request;
725 io_request->RaidContext.VirtualDiskTgtId = device_id;
726 io_request->RaidContext.status = 0;
727 io_request->RaidContext.exStatus = 0;
728
729 /* just the cdb len, other flags zero, and ORed-in later for FP */
730 io_request->IoFlags = csio->cdb_len;
731
732 if (mrsas_setup_io(sc, cmd, ccb, device_id, io_request) != SUCCESS)
733 device_printf(sc->mrsas_dev, "Build ldio or fpio error\n");
734
735 io_request->DataLength = cmd->length;
736
737 if (mrsas_map_request(sc, cmd, ccb) == SUCCESS) {
738 if (cmd->sge_count > sc->max_num_sge) {
739 device_printf(sc->mrsas_dev, "Error: sge_count (0x%x) exceeds"
740 "max (0x%x) allowed\n", cmd->sge_count, sc->max_num_sge);
741 return (FAIL);
742 }
743 /*
744 * numSGE store lower 8 bit of sge_count. numSGEExt store
745 * higher 8 bit of sge_count
746 */
747 io_request->RaidContext.numSGE = cmd->sge_count;
748 io_request->RaidContext.numSGEExt = (uint8_t)(cmd->sge_count >> 8);
749
750 } else {
751 device_printf(sc->mrsas_dev, "Data map/load failed.\n");
752 return (FAIL);
753 }
754 return (0);
755}
756
757/*
758 * mrsas_setup_io: Set up data including Fast Path I/O
759 * input: Adapter instance soft state
760 * Pointer to command packet
761 * Pointer to CCB
762 *
763 * This function builds the DCDB inquiry command. It returns 0 if the command
764 * is built successfully, otherwise it returns a 1.
765 */
766int
767mrsas_setup_io(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
768 union ccb *ccb, u_int32_t device_id,
769 MRSAS_RAID_SCSI_IO_REQUEST * io_request)
770{
771 struct ccb_hdr *ccb_h = &(ccb->ccb_h);
772 struct ccb_scsiio *csio = &(ccb->csio);
773 struct IO_REQUEST_INFO io_info;
774 MR_DRV_RAID_MAP_ALL *map_ptr;
775 u_int8_t fp_possible;
776 u_int32_t start_lba_hi, start_lba_lo, ld_block_size;
777 u_int32_t datalength = 0;
778
779 start_lba_lo = 0;
780 start_lba_hi = 0;
781 fp_possible = 0;
782
783 /*
784 * READ_6 (0x08) or WRITE_6 (0x0A) cdb
785 */
786 if (csio->cdb_len == 6) {
787 datalength = (u_int32_t)csio->cdb_io.cdb_bytes[4];
788 start_lba_lo = ((u_int32_t)csio->cdb_io.cdb_bytes[1] << 16) |
789 ((u_int32_t)csio->cdb_io.cdb_bytes[2] << 8) |
790 (u_int32_t)csio->cdb_io.cdb_bytes[3];
791 start_lba_lo &= 0x1FFFFF;
792 }
793 /*
794 * READ_10 (0x28) or WRITE_6 (0x2A) cdb
795 */
796 else if (csio->cdb_len == 10) {
797 datalength = (u_int32_t)csio->cdb_io.cdb_bytes[8] |
798 ((u_int32_t)csio->cdb_io.cdb_bytes[7] << 8);
799 start_lba_lo = ((u_int32_t)csio->cdb_io.cdb_bytes[2] << 24) |
800 ((u_int32_t)csio->cdb_io.cdb_bytes[3] << 16) |
801 (u_int32_t)csio->cdb_io.cdb_bytes[4] << 8 |
802 ((u_int32_t)csio->cdb_io.cdb_bytes[5]);
803 }
804 /*
805 * READ_12 (0xA8) or WRITE_12 (0xAA) cdb
806 */
807 else if (csio->cdb_len == 12) {
808 datalength = (u_int32_t)csio->cdb_io.cdb_bytes[6] << 24 |
809 ((u_int32_t)csio->cdb_io.cdb_bytes[7] << 16) |
810 ((u_int32_t)csio->cdb_io.cdb_bytes[8] << 8) |
811 ((u_int32_t)csio->cdb_io.cdb_bytes[9]);
812 start_lba_lo = ((u_int32_t)csio->cdb_io.cdb_bytes[2] << 24) |
813 ((u_int32_t)csio->cdb_io.cdb_bytes[3] << 16) |
814 (u_int32_t)csio->cdb_io.cdb_bytes[4] << 8 |
815 ((u_int32_t)csio->cdb_io.cdb_bytes[5]);
816 }
817 /*
818 * READ_16 (0x88) or WRITE_16 (0xx8A) cdb
819 */
820 else if (csio->cdb_len == 16) {
821 datalength = (u_int32_t)csio->cdb_io.cdb_bytes[10] << 24 |
822 ((u_int32_t)csio->cdb_io.cdb_bytes[11] << 16) |
823 ((u_int32_t)csio->cdb_io.cdb_bytes[12] << 8) |
824 ((u_int32_t)csio->cdb_io.cdb_bytes[13]);
825 start_lba_lo = ((u_int32_t)csio->cdb_io.cdb_bytes[6] << 24) |
826 ((u_int32_t)csio->cdb_io.cdb_bytes[7] << 16) |
827 (u_int32_t)csio->cdb_io.cdb_bytes[8] << 8 |
828 ((u_int32_t)csio->cdb_io.cdb_bytes[9]);
829 start_lba_hi = ((u_int32_t)csio->cdb_io.cdb_bytes[2] << 24) |
830 ((u_int32_t)csio->cdb_io.cdb_bytes[3] << 16) |
831 (u_int32_t)csio->cdb_io.cdb_bytes[4] << 8 |
832 ((u_int32_t)csio->cdb_io.cdb_bytes[5]);
833 }
834 memset(&io_info, 0, sizeof(struct IO_REQUEST_INFO));
835 io_info.ldStartBlock = ((u_int64_t)start_lba_hi << 32) | start_lba_lo;
836 io_info.numBlocks = datalength;
837 io_info.ldTgtId = device_id;
838
839 switch (ccb_h->flags & CAM_DIR_MASK) {
840 case CAM_DIR_IN:
841 io_info.isRead = 1;
842 break;
843 case CAM_DIR_OUT:
844 io_info.isRead = 0;
845 break;
846 case CAM_DIR_NONE:
847 default:
848 mrsas_dprint(sc, MRSAS_TRACE, "From %s : DMA Flag is %d \n", __func__, ccb_h->flags & CAM_DIR_MASK);
849 break;
850 }
851
852 map_ptr = sc->ld_drv_map[(sc->map_id & 1)];
853 ld_block_size = MR_LdBlockSizeGet(device_id, map_ptr, sc);
854
855 if ((MR_TargetIdToLdGet(device_id, map_ptr) >= MAX_LOGICAL_DRIVES_EXT) ||
856 (!sc->fast_path_io)) {
857 io_request->RaidContext.regLockFlags = 0;
858 fp_possible = 0;
859 } else {
860 if (MR_BuildRaidContext(sc, &io_info, &io_request->RaidContext, map_ptr))
861 fp_possible = io_info.fpOkForIo;
862 }
863
864 cmd->request_desc->SCSIIO.MSIxIndex =
865 sc->msix_vectors ? smp_processor_id() % sc->msix_vectors : 0;
866
867
868 if (fp_possible) {
869 mrsas_set_pd_lba(io_request, csio->cdb_len, &io_info, ccb, map_ptr,
870 start_lba_lo, ld_block_size);
871 io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
872 cmd->request_desc->SCSIIO.RequestFlags =
873 (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY <<
874 MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
875 if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY)) {
875 if ((sc->device_id == MRSAS_INVADER) ||
876 (sc->device_id == MRSAS_FURY) ||
877 (sc->device_id == MRSAS_INTRUDER) ||
878 (sc->device_id == MRSAS_INTRUDER_24)) {
879 if (io_request->RaidContext.regLockFlags == REGION_TYPE_UNUSED)
880 cmd->request_desc->SCSIIO.RequestFlags =
881 (MRSAS_REQ_DESCRIPT_FLAGS_NO_LOCK <<
882 MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
883 io_request->RaidContext.Type = MPI2_TYPE_CUDA;
884 io_request->RaidContext.nseg = 0x1;
885 io_request->IoFlags |= MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH;
886 io_request->RaidContext.regLockFlags |=
887 (MR_RL_FLAGS_GRANT_DESTINATION_CUDA |
888 MR_RL_FLAGS_SEQ_NUM_ENABLE);
889 }
890 if ((sc->load_balance_info[device_id].loadBalanceFlag) &&
891 (io_info.isRead)) {
892 io_info.devHandle =
893 mrsas_get_updated_dev_handle(sc,
894 &sc->load_balance_info[device_id], &io_info);
895 cmd->load_balance = MRSAS_LOAD_BALANCE_FLAG;
896 cmd->pd_r1_lb = io_info.pd_after_lb;
897 } else
898 cmd->load_balance = 0;
899 cmd->request_desc->SCSIIO.DevHandle = io_info.devHandle;
900 io_request->DevHandle = io_info.devHandle;
901 } else {
902 /* Not FP IO */
903 io_request->RaidContext.timeoutValue = map_ptr->raidMap.fpPdIoTimeoutSec;
904 cmd->request_desc->SCSIIO.RequestFlags =
905 (MRSAS_REQ_DESCRIPT_FLAGS_LD_IO <<
906 MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
904 if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY)) {
907 if ((sc->device_id == MRSAS_INVADER) ||
908 (sc->device_id == MRSAS_FURY) ||
909 (sc->device_id == MRSAS_INTRUDER) ||
910 (sc->device_id == MRSAS_INTRUDER_24)) {
911 if (io_request->RaidContext.regLockFlags == REGION_TYPE_UNUSED)
912 cmd->request_desc->SCSIIO.RequestFlags =
913 (MRSAS_REQ_DESCRIPT_FLAGS_NO_LOCK <<
914 MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
915 io_request->RaidContext.Type = MPI2_TYPE_CUDA;
916 io_request->RaidContext.regLockFlags |=
917 (MR_RL_FLAGS_GRANT_DESTINATION_CPU0 |
918 MR_RL_FLAGS_SEQ_NUM_ENABLE);
919 io_request->RaidContext.nseg = 0x1;
920 }
921 io_request->Function = MRSAS_MPI2_FUNCTION_LD_IO_REQUEST;
922 io_request->DevHandle = device_id;
923 }
924 return (0);
925}
926
927/*
928 * mrsas_build_ldio_nonrw: Builds an LDIO command
929 * input: Adapter instance soft state
930 * Pointer to command packet
931 * Pointer to CCB
932 *
933 * This function builds the LDIO command packet. It returns 0 if the command is
934 * built successfully, otherwise it returns a 1.
935 */
936int
937mrsas_build_ldio_nonrw(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
938 union ccb *ccb)
939{
940 struct ccb_hdr *ccb_h = &(ccb->ccb_h);
941 u_int32_t device_id;
942 MRSAS_RAID_SCSI_IO_REQUEST *io_request;
943
944 io_request = cmd->io_request;
945 device_id = ccb_h->target_id;
946
947 /* FW path for LD Non-RW (SCSI management commands) */
948 io_request->Function = MRSAS_MPI2_FUNCTION_LD_IO_REQUEST;
949 io_request->DevHandle = device_id;
950 cmd->request_desc->SCSIIO.RequestFlags =
951 (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
952 MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
953
954 io_request->RaidContext.VirtualDiskTgtId = device_id;
955 io_request->LUN[1] = ccb_h->target_lun & 0xF;
956 io_request->DataLength = cmd->length;
957
958 if (mrsas_map_request(sc, cmd, ccb) == SUCCESS) {
959 if (cmd->sge_count > sc->max_num_sge) {
960 device_printf(sc->mrsas_dev, "Error: sge_count (0x%x) exceeds"
961 "max (0x%x) allowed\n", cmd->sge_count, sc->max_num_sge);
962 return (1);
963 }
964 /*
965 * numSGE store lower 8 bit of sge_count. numSGEExt store
966 * higher 8 bit of sge_count
967 */
968 io_request->RaidContext.numSGE = cmd->sge_count;
969 io_request->RaidContext.numSGEExt = (uint8_t)(cmd->sge_count >> 8);
970 } else {
971 device_printf(sc->mrsas_dev, "Data map/load failed.\n");
972 return (1);
973 }
974 return (0);
975}
976
977/*
978 * mrsas_build_syspdio: Builds an DCDB command
979 * input: Adapter instance soft state
980 * Pointer to command packet
981 * Pointer to CCB
982 *
983 * This function builds the DCDB inquiry command. It returns 0 if the command
984 * is built successfully, otherwise it returns a 1.
985 */
986int
987mrsas_build_syspdio(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
988 union ccb *ccb, struct cam_sim *sim, u_int8_t fp_possible)
989{
990 struct ccb_hdr *ccb_h = &(ccb->ccb_h);
991 u_int32_t device_id;
992 MR_DRV_RAID_MAP_ALL *local_map_ptr;
993 MRSAS_RAID_SCSI_IO_REQUEST *io_request;
994 struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync;
995
996 pd_sync = (void *)sc->jbodmap_mem[(sc->pd_seq_map_id - 1) & 1];
997
998 io_request = cmd->io_request;
999 device_id = ccb_h->target_id;
1000 local_map_ptr = sc->ld_drv_map[(sc->map_id & 1)];
1001 io_request->RaidContext.RAIDFlags = MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD
1002 << MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT;
1003 io_request->RaidContext.regLockFlags = 0;
1004 io_request->RaidContext.regLockRowLBA = 0;
1005 io_request->RaidContext.regLockLength = 0;
1006
1007 /* If FW supports PD sequence number */
1008 if (sc->use_seqnum_jbod_fp &&
1009 sc->pd_list[device_id].driveType == 0x00) {
1010 //printf("Using Drv seq num\n");
1011 io_request->RaidContext.VirtualDiskTgtId = device_id + 255;
1012 io_request->RaidContext.configSeqNum = pd_sync->seq[device_id].seqNum;
1013 io_request->DevHandle = pd_sync->seq[device_id].devHandle;
1014 io_request->RaidContext.regLockFlags |=
1015 (MR_RL_FLAGS_SEQ_NUM_ENABLE | MR_RL_FLAGS_GRANT_DESTINATION_CUDA);
1016 io_request->RaidContext.Type = MPI2_TYPE_CUDA;
1017 io_request->RaidContext.nseg = 0x1;
1018 } else if (sc->fast_path_io) {
1019 //printf("Using LD RAID map\n");
1020 io_request->RaidContext.VirtualDiskTgtId = device_id;
1021 io_request->RaidContext.configSeqNum = 0;
1022 local_map_ptr = sc->ld_drv_map[(sc->map_id & 1)];
1023 io_request->DevHandle =
1024 local_map_ptr->raidMap.devHndlInfo[device_id].curDevHdl;
1025 } else {
1026 //printf("Using FW PATH\n");
1027 /* Want to send all IO via FW path */
1028 io_request->RaidContext.VirtualDiskTgtId = device_id;
1029 io_request->RaidContext.configSeqNum = 0;
1030 io_request->DevHandle = 0xFFFF;
1031 }
1032
1033 cmd->request_desc->SCSIIO.DevHandle = io_request->DevHandle;
1034 cmd->request_desc->SCSIIO.MSIxIndex =
1035 sc->msix_vectors ? smp_processor_id() % sc->msix_vectors : 0;
1036
1037 if (!fp_possible) {
1038 /* system pd firmware path */
1039 io_request->Function = MRSAS_MPI2_FUNCTION_LD_IO_REQUEST;
1040 cmd->request_desc->SCSIIO.RequestFlags =
1041 (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
1042 MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1043 io_request->RaidContext.timeoutValue =
1044 local_map_ptr->raidMap.fpPdIoTimeoutSec;
1045 io_request->RaidContext.VirtualDiskTgtId = device_id;
1046 } else {
1047 /* system pd fast path */
1048 io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
1049 io_request->RaidContext.timeoutValue = local_map_ptr->raidMap.fpPdIoTimeoutSec;
1050
1051 /*
1052 * NOTE - For system pd RW cmds only IoFlags will be FAST_PATH
1053 * Because the NON RW cmds will now go via FW Queue
1054 * and not the Exception queue
1055 */
1056 io_request->IoFlags |= MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH;
1057
1058 cmd->request_desc->SCSIIO.RequestFlags =
1059 (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY <<
1060 MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1061 }
1062
1063 io_request->LUN[1] = ccb_h->target_lun & 0xF;
1064 io_request->DataLength = cmd->length;
1065
1066 if (mrsas_map_request(sc, cmd, ccb) == SUCCESS) {
1067 if (cmd->sge_count > sc->max_num_sge) {
1068 device_printf(sc->mrsas_dev, "Error: sge_count (0x%x) exceeds"
1069 "max (0x%x) allowed\n", cmd->sge_count, sc->max_num_sge);
1070 return (1);
1071 }
1072 /*
1073 * numSGE store lower 8 bit of sge_count. numSGEExt store
1074 * higher 8 bit of sge_count
1075 */
1076 io_request->RaidContext.numSGE = cmd->sge_count;
1077 io_request->RaidContext.numSGEExt = (uint8_t)(cmd->sge_count >> 8);
1078 } else {
1079 device_printf(sc->mrsas_dev, "Data map/load failed.\n");
1080 return (1);
1081 }
1082 return (0);
1083}
1084
1085/*
1086 * mrsas_map_request: Map and load data
1087 * input: Adapter instance soft state
1088 * Pointer to command packet
1089 *
1090 * For data from OS, map and load the data buffer into bus space. The SG list
1091 * is built in the callback. If the bus dmamap load is not successful,
1092 * cmd->error_code will contain the error code and a 1 is returned.
1093 */
1094int
1095mrsas_map_request(struct mrsas_softc *sc,
1096 struct mrsas_mpt_cmd *cmd, union ccb *ccb)
1097{
1098 u_int32_t retcode = 0;
1099 struct cam_sim *sim;
1100
1101 sim = xpt_path_sim(cmd->ccb_ptr->ccb_h.path);
1102
1103 if (cmd->data != NULL) {
1104 /* Map data buffer into bus space */
1105 mtx_lock(&sc->io_lock);
1106#if (__FreeBSD_version >= 902001)
1107 retcode = bus_dmamap_load_ccb(sc->data_tag, cmd->data_dmamap, ccb,
1108 mrsas_data_load_cb, cmd, 0);
1109#else
1110 retcode = bus_dmamap_load(sc->data_tag, cmd->data_dmamap, cmd->data,
1111 cmd->length, mrsas_data_load_cb, cmd, BUS_DMA_NOWAIT);
1112#endif
1113 mtx_unlock(&sc->io_lock);
1114 if (retcode)
1115 device_printf(sc->mrsas_dev, "bus_dmamap_load(): retcode = %d\n", retcode);
1116 if (retcode == EINPROGRESS) {
1117 device_printf(sc->mrsas_dev, "request load in progress\n");
1118 mrsas_freeze_simq(cmd, sim);
1119 }
1120 }
1121 if (cmd->error_code)
1122 return (1);
1123 return (retcode);
1124}
1125
1126/*
1127 * mrsas_unmap_request: Unmap and unload data
1128 * input: Adapter instance soft state
1129 * Pointer to command packet
1130 *
1131 * This function unmaps and unloads data from OS.
1132 */
1133void
1134mrsas_unmap_request(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd)
1135{
1136 if (cmd->data != NULL) {
1137 if (cmd->flags & MRSAS_DIR_IN)
1138 bus_dmamap_sync(sc->data_tag, cmd->data_dmamap, BUS_DMASYNC_POSTREAD);
1139 if (cmd->flags & MRSAS_DIR_OUT)
1140 bus_dmamap_sync(sc->data_tag, cmd->data_dmamap, BUS_DMASYNC_POSTWRITE);
1141 mtx_lock(&sc->io_lock);
1142 bus_dmamap_unload(sc->data_tag, cmd->data_dmamap);
1143 mtx_unlock(&sc->io_lock);
1144 }
1145}
1146
1147/*
1148 * mrsas_data_load_cb: Callback entry point
1149 * input: Pointer to command packet as argument
1150 * Pointer to segment
1151 * Number of segments Error
1152 *
1153 * This is the callback function of the bus dma map load. It builds the SG
1154 * list.
1155 */
1156static void
1157mrsas_data_load_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1158{
1159 struct mrsas_mpt_cmd *cmd = (struct mrsas_mpt_cmd *)arg;
1160 struct mrsas_softc *sc = cmd->sc;
1161 MRSAS_RAID_SCSI_IO_REQUEST *io_request;
1162 pMpi25IeeeSgeChain64_t sgl_ptr;
1163 int i = 0, sg_processed = 0;
1164
1165 if (error) {
1166 cmd->error_code = error;
1167 device_printf(sc->mrsas_dev, "mrsas_data_load_cb: error=%d\n", error);
1168 if (error == EFBIG) {
1169 cmd->ccb_ptr->ccb_h.status = CAM_REQ_TOO_BIG;
1170 return;
1171 }
1172 }
1173 if (cmd->flags & MRSAS_DIR_IN)
1174 bus_dmamap_sync(cmd->sc->data_tag, cmd->data_dmamap,
1175 BUS_DMASYNC_PREREAD);
1176 if (cmd->flags & MRSAS_DIR_OUT)
1177 bus_dmamap_sync(cmd->sc->data_tag, cmd->data_dmamap,
1178 BUS_DMASYNC_PREWRITE);
1179 if (nseg > sc->max_num_sge) {
1180 device_printf(sc->mrsas_dev, "SGE count is too large or 0.\n");
1181 return;
1182 }
1183 io_request = cmd->io_request;
1184 sgl_ptr = (pMpi25IeeeSgeChain64_t)&io_request->SGL;
1185
1180 if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY)) {
1186 if ((sc->device_id == MRSAS_INVADER) ||
1187 (sc->device_id == MRSAS_FURY) ||
1188 (sc->device_id == MRSAS_INTRUDER) ||
1189 (sc->device_id == MRSAS_INTRUDER_24)) {
1190 pMpi25IeeeSgeChain64_t sgl_ptr_end = sgl_ptr;
1191
1192 sgl_ptr_end += sc->max_sge_in_main_msg - 1;
1193 sgl_ptr_end->Flags = 0;
1194 }
1195 if (nseg != 0) {
1196 for (i = 0; i < nseg; i++) {
1197 sgl_ptr->Address = segs[i].ds_addr;
1198 sgl_ptr->Length = segs[i].ds_len;
1199 sgl_ptr->Flags = 0;
1191 if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY)) {
1200 if ((sc->device_id == MRSAS_INVADER) ||
1201 (sc->device_id == MRSAS_FURY) ||
1202 (sc->device_id == MRSAS_INTRUDER) ||
1203 (sc->device_id == MRSAS_INTRUDER_24)) {
1204 if (i == nseg - 1)
1205 sgl_ptr->Flags = IEEE_SGE_FLAGS_END_OF_LIST;
1206 }
1207 sgl_ptr++;
1208 sg_processed = i + 1;
1209 if ((sg_processed == (sc->max_sge_in_main_msg - 1)) &&
1210 (nseg > sc->max_sge_in_main_msg)) {
1211 pMpi25IeeeSgeChain64_t sg_chain;
1212
1201 if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY)) {
1213 if ((sc->device_id == MRSAS_INVADER) ||
1214 (sc->device_id == MRSAS_FURY) ||
1215 (sc->device_id == MRSAS_INTRUDER) ||
1216 (sc->device_id == MRSAS_INTRUDER_24)) {
1217 if ((cmd->io_request->IoFlags & MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH)
1218 != MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH)
1219 cmd->io_request->ChainOffset = sc->chain_offset_io_request;
1220 else
1221 cmd->io_request->ChainOffset = 0;
1222 } else
1223 cmd->io_request->ChainOffset = sc->chain_offset_io_request;
1224 sg_chain = sgl_ptr;
1210 if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY))
1225 if ((sc->device_id == MRSAS_INVADER) ||
1226 (sc->device_id == MRSAS_FURY) ||
1227 (sc->device_id == MRSAS_INTRUDER) ||
1228 (sc->device_id == MRSAS_INTRUDER_24))
1229 sg_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT;
1230 else
1231 sg_chain->Flags = (IEEE_SGE_FLAGS_CHAIN_ELEMENT | MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR);
1232 sg_chain->Length = (sizeof(MPI2_SGE_IO_UNION) * (nseg - sg_processed));
1233 sg_chain->Address = cmd->chain_frame_phys_addr;
1234 sgl_ptr = (pMpi25IeeeSgeChain64_t)cmd->chain_frame;
1235 }
1236 }
1237 }
1238 cmd->sge_count = nseg;
1239}
1240
1241/*
1242 * mrsas_freeze_simq: Freeze SIM queue
1243 * input: Pointer to command packet
1244 * Pointer to SIM
1245 *
1246 * This function freezes the sim queue.
1247 */
1248static void
1249mrsas_freeze_simq(struct mrsas_mpt_cmd *cmd, struct cam_sim *sim)
1250{
1251 union ccb *ccb = (union ccb *)(cmd->ccb_ptr);
1252
1253 xpt_freeze_simq(sim, 1);
1254 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
1255 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
1256}
1257
1258void
1259mrsas_xpt_freeze(struct mrsas_softc *sc)
1260{
1261 xpt_freeze_simq(sc->sim_0, 1);
1262 xpt_freeze_simq(sc->sim_1, 1);
1263}
1264
1265void
1266mrsas_xpt_release(struct mrsas_softc *sc)
1267{
1268 xpt_release_simq(sc->sim_0, 1);
1269 xpt_release_simq(sc->sim_1, 1);
1270}
1271
1272/*
1273 * mrsas_cmd_done: Perform remaining command completion
1274 * input: Adapter instance soft state Pointer to command packet
1275 *
1276 * This function calls ummap request and releases the MPT command.
1277 */
1278void
1279mrsas_cmd_done(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd)
1280{
1281 callout_stop(&cmd->cm_callout);
1282 mrsas_unmap_request(sc, cmd);
1283 mtx_lock(&sc->sim_lock);
1284 xpt_done(cmd->ccb_ptr);
1285 cmd->ccb_ptr = NULL;
1286 mtx_unlock(&sc->sim_lock);
1287 mrsas_release_mpt_cmd(cmd);
1288}
1289
1290/*
1291 * mrsas_cam_poll: Polling entry point
1292 * input: Pointer to SIM
1293 *
1294 * This is currently a stub function.
1295 */
1296static void
1297mrsas_cam_poll(struct cam_sim *sim)
1298{
1299 struct mrsas_softc *sc = (struct mrsas_softc *)cam_sim_softc(sim);
1300
1301 mrsas_isr((void *)sc);
1302}
1303
1304/*
1305 * mrsas_bus_scan: Perform bus scan
1306 * input: Adapter instance soft state
1307 *
1308 * This mrsas_bus_scan function is needed for FreeBSD 7.x. Also, it should not
1309 * be called in FreeBSD 8.x and later versions, where the bus scan is
1310 * automatic.
1311 */
1312int
1313mrsas_bus_scan(struct mrsas_softc *sc)
1314{
1315 union ccb *ccb_0;
1316 union ccb *ccb_1;
1317
1318 if ((ccb_0 = xpt_alloc_ccb()) == NULL) {
1319 return (ENOMEM);
1320 }
1321 if ((ccb_1 = xpt_alloc_ccb()) == NULL) {
1322 xpt_free_ccb(ccb_0);
1323 return (ENOMEM);
1324 }
1325 mtx_lock(&sc->sim_lock);
1326 if (xpt_create_path(&ccb_0->ccb_h.path, xpt_periph, cam_sim_path(sc->sim_0),
1327 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
1328 xpt_free_ccb(ccb_0);
1329 xpt_free_ccb(ccb_1);
1330 mtx_unlock(&sc->sim_lock);
1331 return (EIO);
1332 }
1333 if (xpt_create_path(&ccb_1->ccb_h.path, xpt_periph, cam_sim_path(sc->sim_1),
1334 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
1335 xpt_free_ccb(ccb_0);
1336 xpt_free_ccb(ccb_1);
1337 mtx_unlock(&sc->sim_lock);
1338 return (EIO);
1339 }
1340 mtx_unlock(&sc->sim_lock);
1341 xpt_rescan(ccb_0);
1342 xpt_rescan(ccb_1);
1343
1344 return (0);
1345}
1346
1347/*
1348 * mrsas_bus_scan_sim: Perform bus scan per SIM
1349 * input: adapter instance soft state
1350 *
1351 * This function will be called from Event handler on LD creation/deletion,
1352 * JBOD on/off.
1353 */
1354int
1355mrsas_bus_scan_sim(struct mrsas_softc *sc, struct cam_sim *sim)
1356{
1357 union ccb *ccb;
1358
1359 if ((ccb = xpt_alloc_ccb()) == NULL) {
1360 return (ENOMEM);
1361 }
1362 mtx_lock(&sc->sim_lock);
1363 if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, cam_sim_path(sim),
1364 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
1365 xpt_free_ccb(ccb);
1366 mtx_unlock(&sc->sim_lock);
1367 return (EIO);
1368 }
1369 mtx_unlock(&sc->sim_lock);
1370 xpt_rescan(ccb);
1371
1372 return (0);
1373}