1/* 2 * Copyright (c) 2010, LSI Corp. 3 * All rights reserved. 4 * Author : Manjunath Ranganathaiah 5 * Support: freebsdraid@lsi.com 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of the <ORGANIZATION> nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 29 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 31 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 32 * POSSIBILITY OF SUCH DAMAGE. 33 * 34 * $FreeBSD: stable/11/sys/dev/tws/tws_user.c 342970 2019-01-12 17:00:54Z markj $ 35 */ 36 37#include <dev/tws/tws.h> 38#include <dev/tws/tws_services.h> 39#include <dev/tws/tws_hdm.h> 40#include <dev/tws/tws_user.h> 41 42 43int tws_ioctl(struct cdev *dev, long unsigned int cmd, caddr_t buf, int flags, 44 struct thread *td); 45void tws_passthru_complete(struct tws_request *req); 46extern void tws_circular_aenq_insert(struct tws_softc *sc, 47 struct tws_circular_q *cq, struct tws_event_packet *aen); 48 49 50static int tws_passthru(struct tws_softc *sc, void *buf); 51static int tws_ioctl_aen(struct tws_softc *sc, u_long cmd, void *buf); 52 53extern int tws_bus_scan(struct tws_softc *sc); 54extern struct tws_request *tws_get_request(struct tws_softc *sc, 55 u_int16_t type); 56extern int32_t tws_map_request(struct tws_softc *sc, struct tws_request *req); 57extern void tws_unmap_request(struct tws_softc *sc, struct tws_request *req); 58extern uint8_t tws_get_state(struct tws_softc *sc); 59extern void tws_timeout(void *arg); 60 61int 62tws_ioctl(struct cdev *dev, u_long cmd, caddr_t buf, int flags, 63 struct thread *td) 64{ 65 struct tws_softc *sc = (struct tws_softc *)(dev->si_drv1); 66 int error; 67 68 TWS_TRACE_DEBUG(sc, "entry", sc, cmd); 69 sc->stats.ioctls++; 70 switch(cmd) { 71 case TWS_IOCTL_FIRMWARE_PASS_THROUGH : 72 error = tws_passthru(sc, (void *)buf); 73 break; 74 case TWS_IOCTL_SCAN_BUS : 75 TWS_TRACE_DEBUG(sc, "scan-bus", 0, 0); 76 error = tws_bus_scan(sc); 77 break; 78 default : 79 TWS_TRACE_DEBUG(sc, "ioctl-aen", cmd, buf); 80 error = tws_ioctl_aen(sc, cmd, (void *)buf); 81 break; 82 83 } 84 return(error); 85} 86 87static int 88tws_passthru(struct tws_softc *sc, void *buf) 89{ 90 struct tws_request *req; 91 struct tws_ioctl_no_data_buf *ubuf = (struct tws_ioctl_no_data_buf *)buf; 92 int error; 93 u_int32_t buffer_length; 94 u_int16_t lun4; 95 96 buffer_length = roundup2(ubuf->driver_pkt.buffer_length, 512); 97 if ( buffer_length > TWS_MAX_IO_SIZE ) { 98 return(EINVAL); 99 } 100 if ( tws_get_state(sc) != TWS_ONLINE) { 101 return(EBUSY); 102 } 103 104 //============================================================================================== 105 // Get a command 106 // 107 do { 108 req = tws_get_request(sc, TWS_REQ_TYPE_PASSTHRU); 109 if ( !req ) { 110 error = tsleep(sc, 0, "tws_sleep", TWS_IOCTL_TIMEOUT*hz); 111 if ( error == EWOULDBLOCK ) { 112 return(ETIMEDOUT); 113 } 114 } else { 115 // Make sure we are still ready for new commands... 116 if ( tws_get_state(sc) != TWS_ONLINE) { 117 return(EBUSY); 118 } 119 break; 120 } 121 } while(1); 122 123 req->length = buffer_length; 124 TWS_TRACE_DEBUG(sc, "datal,rid", req->length, req->request_id); 125 if ( req->length ) { 126 req->data = sc->ioctl_data_mem; 127 req->dma_map = sc->ioctl_data_map; 128 129 //========================================================================================== 130 // Copy data in from user space 131 // 132 error = copyin(ubuf->pdata, req->data, req->length); 133 } 134 135 //============================================================================================== 136 // Set command fields 137 // 138 req->flags = TWS_DIR_IN | TWS_DIR_OUT; 139 req->cb = tws_passthru_complete; 140 141 memcpy(&req->cmd_pkt->cmd, &ubuf->cmd_pkt.cmd, 142 sizeof(struct tws_command_apache)); 143 144 if ( GET_OPCODE(req->cmd_pkt->cmd.pkt_a.res__opcode) == 145 TWS_FW_CMD_EXECUTE_SCSI ) { 146 lun4 = req->cmd_pkt->cmd.pkt_a.lun_l4__req_id & 0xF000; 147 req->cmd_pkt->cmd.pkt_a.lun_l4__req_id = lun4 | req->request_id; 148 } else { 149 req->cmd_pkt->cmd.pkt_g.generic.request_id = (u_int8_t) req->request_id; 150 } 151 152 //============================================================================================== 153 // Send command to controller 154 // 155 error = tws_map_request(sc, req); 156 if (error) { 157 ubuf->driver_pkt.os_status = error; 158 goto out_data; 159 } 160 161 if ( req->state == TWS_REQ_STATE_COMPLETE ) { 162 ubuf->driver_pkt.os_status = req->error_code; 163 goto out_unmap; 164 } 165 166 mtx_lock(&sc->gen_lock); 167 error = mtx_sleep(req, &sc->gen_lock, 0, "tws_passthru", TWS_IOCTL_TIMEOUT*hz); 168 mtx_unlock(&sc->gen_lock); 169 if (( req->state != TWS_REQ_STATE_COMPLETE ) && ( error == EWOULDBLOCK )) { 170 TWS_TRACE_DEBUG(sc, "msleep timeout", error, req->request_id); 171 tws_timeout((void*) req); 172 } 173 174out_unmap: 175 if ( req->error_code == TWS_REQ_RET_RESET ) { 176 error = EBUSY; 177 req->error_code = EBUSY; 178 TWS_TRACE_DEBUG(sc, "ioctl reset", error, req->request_id); 179 } 180 181 tws_unmap_request(sc, req); 182 183 //============================================================================================== 184 // Return command status to user space 185 // 186 memcpy(&ubuf->cmd_pkt.hdr, &req->cmd_pkt->hdr, sizeof(struct tws_command_apache)); 187 memcpy(&ubuf->cmd_pkt.cmd, &req->cmd_pkt->cmd, sizeof(struct tws_command_apache)); 188 189out_data: 190 if ( req->length ) { 191 //========================================================================================== 192 // Copy data out to user space 193 // 194 if ( !error ) 195 error = copyout(req->data, ubuf->pdata, ubuf->driver_pkt.buffer_length); 196 } 197 198 if ( error ) 199 TWS_TRACE_DEBUG(sc, "errored", error, 0); 200 201 if ( req->error_code != TWS_REQ_RET_SUBMIT_SUCCESS ) 202 ubuf->driver_pkt.os_status = error; 203 204 //============================================================================================== 205 // Free command 206 // 207 req->state = TWS_REQ_STATE_FREE; 208 209 wakeup_one(sc); 210 211 return(error); 212} 213 214void 215tws_passthru_complete(struct tws_request *req) 216{ 217 req->state = TWS_REQ_STATE_COMPLETE; 218 wakeup_one(req); 219 220} 221 222static void 223tws_retrive_aen(struct tws_softc *sc, u_long cmd, 224 struct tws_ioctl_packet *ubuf) 225{ 226 u_int16_t index=0; 227 struct tws_event_packet eventp, *qp; 228 229 if ( sc->aen_q.head == sc->aen_q.tail ) { 230 ubuf->driver_pkt.status = TWS_AEN_NO_EVENTS; 231 return; 232 } 233 234 ubuf->driver_pkt.status = 0; 235 236 /* 237 * once this flag is set cli will not display alarms 238 * needs a revisit from tools? 239 */ 240 if ( sc->aen_q.overflow ) { 241 ubuf->driver_pkt.status = TWS_AEN_OVERFLOW; 242 sc->aen_q.overflow = 0; /* reset */ 243 } 244 245 qp = (struct tws_event_packet *)sc->aen_q.q; 246 247 switch (cmd) { 248 case TWS_IOCTL_GET_FIRST_EVENT : 249 index = sc->aen_q.head; 250 break; 251 case TWS_IOCTL_GET_LAST_EVENT : 252 /* index = tail-1 */ 253 index = (sc->aen_q.depth + sc->aen_q.tail - 1) % sc->aen_q.depth; 254 break; 255 case TWS_IOCTL_GET_NEXT_EVENT : 256 memcpy(&eventp, ubuf->data_buf, sizeof(struct tws_event_packet)); 257 index = sc->aen_q.head; 258 do { 259 if ( qp[index].sequence_id == 260 (eventp.sequence_id + 1) ) 261 break; 262 index = (index+1) % sc->aen_q.depth; 263 }while ( index != sc->aen_q.tail ); 264 if ( index == sc->aen_q.tail ) { 265 ubuf->driver_pkt.status = TWS_AEN_NO_EVENTS; 266 return; 267 } 268 break; 269 case TWS_IOCTL_GET_PREVIOUS_EVENT : 270 memcpy(&eventp, ubuf->data_buf, sizeof(struct tws_event_packet)); 271 index = sc->aen_q.head; 272 do { 273 if ( qp[index].sequence_id == 274 (eventp.sequence_id - 1) ) 275 break; 276 index = (index+1) % sc->aen_q.depth; 277 }while ( index != sc->aen_q.tail ); 278 if ( index == sc->aen_q.tail ) { 279 ubuf->driver_pkt.status = TWS_AEN_NO_EVENTS; 280 return; 281 } 282 break; 283 default : 284 TWS_TRACE_DEBUG(sc, "not a valid event", sc, cmd); 285 ubuf->driver_pkt.status = TWS_AEN_NO_EVENTS; 286 return; 287 } 288 289 memcpy(ubuf->data_buf, &qp[index], 290 sizeof(struct tws_event_packet)); 291 qp[index].retrieved = TWS_AEN_RETRIEVED; 292 293 return; 294 295} 296 297static int 298tws_ioctl_aen(struct tws_softc *sc, u_long cmd, void *buf) 299{ 300 301 struct tws_ioctl_packet *ubuf = (struct tws_ioctl_packet *)buf; 302 struct tws_compatibility_packet cpkt; 303 struct tws_lock_packet lpkt; 304 time_t ctime; 305 306 mtx_lock(&sc->gen_lock); 307 ubuf->driver_pkt.status = 0; 308 switch(cmd) { 309 case TWS_IOCTL_GET_FIRST_EVENT : 310 case TWS_IOCTL_GET_LAST_EVENT : 311 case TWS_IOCTL_GET_NEXT_EVENT : 312 case TWS_IOCTL_GET_PREVIOUS_EVENT : 313 tws_retrive_aen(sc,cmd,ubuf); 314 break; 315 case TWS_IOCTL_GET_LOCK : 316 ctime = TWS_LOCAL_TIME; 317 memcpy(&lpkt, ubuf->data_buf, sizeof(struct tws_lock_packet)); 318 if ( (sc->ioctl_lock.lock == TWS_IOCTL_LOCK_FREE) || 319 (lpkt.force_flag) || 320 (ctime >= sc->ioctl_lock.timeout) ) { 321 sc->ioctl_lock.lock = TWS_IOCTL_LOCK_HELD; 322 sc->ioctl_lock.timeout = ctime + (lpkt.timeout_msec / 1000); 323 lpkt.time_remaining_msec = lpkt.timeout_msec; 324 } else { 325 lpkt.time_remaining_msec = (u_int32_t) 326 ((sc->ioctl_lock.timeout - ctime) * 1000); 327 ubuf->driver_pkt.status = TWS_IOCTL_LOCK_ALREADY_HELD; 328 329 } 330 break; 331 case TWS_IOCTL_RELEASE_LOCK : 332 if (sc->ioctl_lock.lock == TWS_IOCTL_LOCK_FREE) { 333 ubuf->driver_pkt.status = TWS_IOCTL_LOCK_NOT_HELD; 334 } else { 335 sc->ioctl_lock.lock = TWS_IOCTL_LOCK_FREE; 336 ubuf->driver_pkt.status = 0; 337 } 338 break; 339 case TWS_IOCTL_GET_COMPATIBILITY_INFO : 340 TWS_TRACE_DEBUG(sc, "get comp info", sc, cmd); 341 342 memcpy( cpkt.driver_version, TWS_DRIVER_VERSION_STRING, 343 sizeof(TWS_DRIVER_VERSION_STRING)); 344 cpkt.working_srl = sc->cinfo.working_srl; 345 cpkt.working_branch = sc->cinfo.working_branch; 346 cpkt.working_build = sc->cinfo.working_build; 347 cpkt.driver_srl_high = TWS_CURRENT_FW_SRL; 348 cpkt.driver_branch_high = TWS_CURRENT_FW_BRANCH; 349 cpkt.driver_build_high = TWS_CURRENT_FW_BUILD; 350 cpkt.driver_srl_low = TWS_BASE_FW_SRL; 351 cpkt.driver_branch_low = TWS_BASE_FW_BRANCH; 352 cpkt.driver_build_low = TWS_BASE_FW_BUILD; 353 cpkt.fw_on_ctlr_srl = sc->cinfo.fw_on_ctlr_srl; 354 cpkt.fw_on_ctlr_branch = sc->cinfo.fw_on_ctlr_branch; 355 cpkt.fw_on_ctlr_build = sc->cinfo.fw_on_ctlr_build; 356 ubuf->driver_pkt.status = 0; 357 int len = sizeof(struct tws_compatibility_packet); 358 if ( ubuf->driver_pkt.buffer_length < len ) 359 len = ubuf->driver_pkt.buffer_length; 360 memcpy(ubuf->data_buf, &cpkt, len); 361 362 break; 363 default : 364 TWS_TRACE_DEBUG(sc, "not valid cmd", cmd, 365 TWS_IOCTL_GET_COMPATIBILITY_INFO); 366 break; 367 368 } 369 mtx_unlock(&sc->gen_lock); 370 return(SUCCESS); 371 372} 373 374void 375tws_circular_aenq_insert(struct tws_softc *sc, struct tws_circular_q *cq, 376struct tws_event_packet *aen) 377{ 378 379 struct tws_event_packet *q = (struct tws_event_packet *)cq->q; 380 volatile u_int16_t head, tail; 381 u_int8_t retr; 382 mtx_assert(&sc->gen_lock, MA_OWNED); 383 384 head = cq->head; 385 tail = cq->tail; 386 retr = q[tail].retrieved; 387 388 memcpy(&q[tail], aen, sizeof(struct tws_event_packet)); 389 tail = (tail+1) % cq->depth; 390 391 if ( head == tail ) { /* q is full */ 392 if ( retr != TWS_AEN_RETRIEVED ) 393 cq->overflow = 1; 394 cq->head = (head+1) % cq->depth; 395 } 396 cq->tail = tail; 397 398} 399