• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-WNDR4500v2-V1.0.0.60_1.0.38/src/linux/linux-2.6/drivers/usb/gadget/

Lines Matching defs:fsg

76  * fsg->state; it won't try to stop the thread if the state is already
90 * (fsg->next_buffhd_to_fill) that points to the next buffer head to use.
211 #define DBG(fsg,fmt,args...) \
212 xprintk(fsg , KERN_DEBUG , fmt , ## args)
218 #define DBG(fsg,fmt,args...) \
232 #define VDBG(fsg,fmt,args...) \
238 #define ERROR(fsg,fmt,args...) \
239 xprintk(fsg , KERN_ERR , fmt , ## args)
243 #define WARN(fsg,fmt,args...) \
244 xprintk(fsg , KERN_WARNING , fmt , ## args)
248 #define INFO(fsg,fmt,args...) \
249 xprintk(fsg , KERN_INFO , fmt , ## args)
628 static int inline exception_in_progress(struct fsg_dev *fsg)
630 return (fsg->state > FSG_STATE_IDLE);
634 static void inline set_bulk_out_req_length(struct fsg_dev *fsg,
640 rem = length % fsg->bulk_out_maxpacket;
642 length += fsg->bulk_out_maxpacket - rem;
650 static void close_all_backing_files(struct fsg_dev *fsg);
657 static void dump_msg(struct fsg_dev *fsg, const char *label,
665 DBG(fsg, "%s, length %u:\n", label, length);
685 static void inline dump_cdb(struct fsg_dev *fsg)
690 static void inline dump_msg(struct fsg_dev *fsg, const char *label,
694 static void inline dump_cdb(struct fsg_dev *fsg)
699 for (i = 0; i < fsg->cmnd_size; ++i)
700 sprintf(cmdbuf + i*3, " %02x", fsg->cmnd[i]);
701 VDBG(fsg, "SCSI CDB: %s\n", cmdbuf);
707 static int fsg_set_halt(struct fsg_dev *fsg, struct usb_ep *ep)
711 if (ep == fsg->bulk_in)
713 else if (ep == fsg->bulk_out)
717 DBG(fsg, "%s set halt\n", name);
1002 /* Caller must hold fsg->lock */
1003 static void wakeup_thread(struct fsg_dev *fsg)
1006 fsg->thread_wakeup_needed = 1;
1007 if (fsg->thread_task)
1008 wake_up_process(fsg->thread_task);
1012 static void raise_exception(struct fsg_dev *fsg, enum fsg_state new_state)
1019 spin_lock_irqsave(&fsg->lock, flags);
1020 if (fsg->state <= new_state) {
1021 fsg->exception_req_tag = fsg->ep0_req_tag;
1022 fsg->state = new_state;
1023 if (fsg->thread_task)
1025 fsg->thread_task);
1027 spin_unlock_irqrestore(&fsg->lock, flags);
1040 struct fsg_dev *fsg = get_gadget_data(gadget);
1042 DBG(fsg, "disconnect or port reset\n");
1043 raise_exception(fsg, FSG_STATE_DISCONNECT);
1047 static int ep0_queue(struct fsg_dev *fsg)
1051 rc = usb_ep_queue(fsg->ep0, fsg->ep0req, GFP_ATOMIC);
1055 WARN(fsg, "error in submission: %s --> %d\n",
1056 fsg->ep0->name, rc);
1063 struct fsg_dev *fsg = ep->driver_data;
1066 dump_msg(fsg, fsg->ep0req_name, req->buf, req->actual);
1068 DBG(fsg, "%s --> %d, %u/%u\n", __FUNCTION__,
1074 ((fsg_routine_t) (req->context))(fsg);
1085 struct fsg_dev *fsg = ep->driver_data;
1089 DBG(fsg, "%s --> %d, %u/%u\n", __FUNCTION__,
1096 spin_lock(&fsg->lock);
1099 wakeup_thread(fsg);
1100 spin_unlock(&fsg->lock);
1105 struct fsg_dev *fsg = ep->driver_data;
1108 dump_msg(fsg, "bulk-out", req->buf, req->actual);
1110 DBG(fsg, "%s --> %d, %u/%u\n", __FUNCTION__,
1118 spin_lock(&fsg->lock);
1121 wakeup_thread(fsg);
1122 spin_unlock(&fsg->lock);
1129 struct fsg_dev *fsg = ep->driver_data;
1133 DBG(fsg, "%s --> %d, %u/%u\n", __FUNCTION__,
1140 spin_lock(&fsg->lock);
1141 fsg->intreq_busy = 0;
1143 wakeup_thread(fsg);
1144 spin_unlock(&fsg->lock);
1158 static void received_cbi_adsc(struct fsg_dev *fsg, struct fsg_buffhd *bh)
1160 struct usb_request *req = fsg->ep0req;
1170 fsg_set_halt(fsg, fsg->ep0);
1181 DBG(fsg, "cbi reset request\n");
1182 raise_exception(fsg, FSG_STATE_RESET);
1186 VDBG(fsg, "CB[I] accept device-specific command\n");
1187 spin_lock(&fsg->lock);
1190 if (fsg->cbbuf_cmnd_size)
1191 WARN(fsg, "CB[I] overwriting previous command\n");
1192 fsg->cbbuf_cmnd_size = req->actual;
1193 memcpy(fsg->cbbuf_cmnd, req->buf, fsg->cbbuf_cmnd_size);
1195 wakeup_thread(fsg);
1196 spin_unlock(&fsg->lock);
1200 static void received_cbi_adsc(struct fsg_dev *fsg, struct fsg_buffhd *bh)
1205 static int class_setup_req(struct fsg_dev *fsg,
1208 struct usb_request *req = fsg->ep0req;
1213 if (!fsg->config)
1231 DBG(fsg, "bulk reset request\n");
1232 raise_exception(fsg, FSG_STATE_RESET);
1244 VDBG(fsg, "get max LUN\n");
1245 *(u8 *) req->buf = fsg->nluns - 1;
1268 fsg->ep0req->context = received_cbi_adsc;
1274 VDBG(fsg,
1287 static int standard_setup_req(struct fsg_dev *fsg,
1290 struct usb_request *req = fsg->ep0req;
1306 VDBG(fsg, "get device descriptor\n");
1312 VDBG(fsg, "get device qualifier\n");
1313 if (!fsg->gadget->is_dualspeed)
1320 VDBG(fsg, "get other-speed config descriptor\n");
1321 if (!fsg->gadget->is_dualspeed)
1326 VDBG(fsg, "get configuration descriptor\n");
1330 value = populate_config_buf(fsg->gadget,
1337 VDBG(fsg, "get string descriptor\n");
1351 VDBG(fsg, "set configuration\n");
1353 fsg->new_config = w_value;
1357 raise_exception(fsg, FSG_STATE_CONFIG_CHANGE);
1365 VDBG(fsg, "get configuration\n");
1366 *(u8 *) req->buf = fsg->config;
1374 if (fsg->config && w_index == 0) {
1379 raise_exception(fsg, FSG_STATE_INTERFACE_CHANGE);
1387 if (!fsg->config)
1393 VDBG(fsg, "get interface\n");
1399 VDBG(fsg,
1412 struct fsg_dev *fsg = get_gadget_data(gadget);
1416 ++fsg->ep0_req_tag; // Record arrival of a new request
1417 fsg->ep0req->context = NULL;
1418 fsg->ep0req->length = 0;
1419 dump_msg(fsg, "ep0-setup", (u8 *) ctrl, sizeof(*ctrl));
1422 rc = class_setup_req(fsg, ctrl);
1424 rc = standard_setup_req(fsg, ctrl);
1429 fsg->ep0req->length = rc;
1430 fsg->ep0req->zero = rc < w_length;
1431 fsg->ep0req_name = (ctrl->bRequestType & USB_DIR_IN ?
1433 rc = ep0_queue(fsg);
1447 static void start_transfer(struct fsg_dev *fsg, struct usb_ep *ep,
1453 if (ep == fsg->bulk_in)
1454 dump_msg(fsg, "bulk-in", req->buf, req->length);
1455 else if (ep == fsg->intr_in)
1456 dump_msg(fsg, "intr-in", req->buf, req->length);
1458 spin_lock_irq(&fsg->lock);
1461 spin_unlock_irq(&fsg->lock);
1473 WARN(fsg, "error in submission: %s --> %d\n",
1479 static int sleep_thread(struct fsg_dev *fsg)
1491 if (fsg->thread_wakeup_needed)
1496 fsg->thread_wakeup_needed = 0;
1503 static int do_read(struct fsg_dev *fsg)
1505 struct lun *curlun = fsg->curlun;
1517 if (fsg->cmnd[0] == SC_READ_6)
1518 lba = (fsg->cmnd[1] << 16) | get_be16(&fsg->cmnd[2]);
1520 lba = get_be32(&fsg->cmnd[2]);
1525 if ((fsg->cmnd[1] & ~0x18) != 0) {
1537 amount_left = fsg->data_size_from_cmnd;
1560 bh = fsg->next_buffhd_to_fill;
1562 if ((rc = sleep_thread(fsg)) != 0)
1600 fsg->residue -= nread;
1617 start_transfer(fsg, fsg->bulk_in, bh->inreq,
1619 fsg->next_buffhd_to_fill = bh->next;
1628 static int do_write(struct fsg_dev *fsg)
1630 struct lun *curlun = fsg->curlun;
1649 if (fsg->cmnd[0] == SC_WRITE_6)
1650 lba = (fsg->cmnd[1] << 16) | get_be16(&fsg->cmnd[2]);
1652 lba = get_be32(&fsg->cmnd[2]);
1658 if ((fsg->cmnd[1] & ~0x18) != 0) {
1662 if (fsg->cmnd[1] & 0x08) // FUA
1673 amount_left_to_req = amount_left_to_write = fsg->data_size_from_cmnd;
1678 bh = fsg->next_buffhd_to_fill;
1717 fsg->usb_amount_left -= amount;
1727 start_transfer(fsg, fsg->bulk_out, bh->outreq,
1729 fsg->next_buffhd_to_fill = bh->next;
1734 bh = fsg->next_buffhd_to_drain;
1739 fsg->next_buffhd_to_drain = bh->next;
1782 fsg->residue -= nwritten;
1794 fsg->short_packet_received = 1;
1801 if ((rc = sleep_thread(fsg)) != 0)
1838 static void fsync_all(struct fsg_dev *fsg)
1842 for (i = 0; i < fsg->nluns; ++i)
1843 fsync_sub(&fsg->luns[i]);
1846 static int do_synchronize_cache(struct fsg_dev *fsg)
1848 struct lun *curlun = fsg->curlun;
1872 static int do_verify(struct fsg_dev *fsg)
1874 struct lun *curlun = fsg->curlun;
1877 struct fsg_buffhd *bh = fsg->next_buffhd_to_fill;
1885 lba = get_be32(&fsg->cmnd[2]);
1893 if ((fsg->cmnd[1] & ~0x10) != 0) {
1898 verification_length = get_be16(&fsg->cmnd[7]);
1970 static int do_inquiry(struct fsg_dev *fsg, struct fsg_buffhd *bh)
1977 if (!fsg->curlun) { // Unsupported LUNs are okay
1978 fsg->bad_lun_okay = 1;
1997 static int do_request_sense(struct fsg_dev *fsg, struct fsg_buffhd *bh)
1999 struct lun *curlun = fsg->curlun;
2021 fsg->bad_lun_okay = 1;
2045 static int do_read_capacity(struct fsg_dev *fsg, struct fsg_buffhd *bh)
2047 struct lun *curlun = fsg->curlun;
2048 u32 lba = get_be32(&fsg->cmnd[2]);
2049 int pmi = fsg->cmnd[8];
2064 static int do_mode_sense(struct fsg_dev *fsg, struct fsg_buffhd *bh)
2066 struct lun *curlun = fsg->curlun;
2067 int mscmnd = fsg->cmnd[0];
2075 if ((fsg->cmnd[1] & ~0x08) != 0) { // Mask away DBD
2079 pc = fsg->cmnd[2] >> 6;
2080 page_code = fsg->cmnd[2] & 0x3f;
2142 static int do_start_stop(struct fsg_dev *fsg)
2144 struct lun *curlun = fsg->curlun;
2152 // int immed = fsg->cmnd[1] & 0x01;
2153 loej = fsg->cmnd[4] & 0x02;
2154 start = fsg->cmnd[4] & 0x01;
2157 if ((fsg->cmnd[1] & ~0x01) != 0 || // Mask away Immed
2158 (fsg->cmnd[4] & ~0x03) != 0) { // Mask LoEj, Start
2172 up_read(&fsg->filesem);
2173 down_write(&fsg->filesem);
2175 up_write(&fsg->filesem);
2176 down_read(&fsg->filesem);
2192 static int do_prevent_allow(struct fsg_dev *fsg)
2194 struct lun *curlun = fsg->curlun;
2202 prevent = fsg->cmnd[4] & 0x01;
2203 if ((fsg->cmnd[4] & ~0x01) != 0) { // Mask away Prevent
2215 static int do_read_format_capacities(struct fsg_dev *fsg,
2218 struct lun *curlun = fsg->curlun;
2232 static int do_mode_select(struct fsg_dev *fsg, struct fsg_buffhd *bh)
2234 struct lun *curlun = fsg->curlun;
2244 static int halt_bulk_in_endpoint(struct fsg_dev *fsg)
2248 rc = fsg_set_halt(fsg, fsg->bulk_in);
2250 VDBG(fsg, "delayed bulk-in endpoint halt\n");
2253 WARN(fsg, "usb_ep_set_halt -> %d\n", rc);
2261 rc = usb_ep_set_halt(fsg->bulk_in);
2266 static int pad_with_zeros(struct fsg_dev *fsg)
2268 struct fsg_buffhd *bh = fsg->next_buffhd_to_fill;
2274 fsg->usb_amount_left = nkeep + fsg->residue;
2275 while (fsg->usb_amount_left > 0) {
2279 if ((rc = sleep_thread(fsg)) != 0)
2283 nsend = min(fsg->usb_amount_left, (u32) mod_data.buflen);
2287 start_transfer(fsg, fsg->bulk_in, bh->inreq,
2289 bh = fsg->next_buffhd_to_fill = bh->next;
2290 fsg->usb_amount_left -= nsend;
2296 static int throw_away_data(struct fsg_dev *fsg)
2302 while ((bh = fsg->next_buffhd_to_drain)->state != BUF_STATE_EMPTY ||
2303 fsg->usb_amount_left > 0) {
2309 fsg->next_buffhd_to_drain = bh->next;
2314 raise_exception(fsg, FSG_STATE_ABORT_BULK_OUT);
2321 bh = fsg->next_buffhd_to_fill;
2322 if (bh->state == BUF_STATE_EMPTY && fsg->usb_amount_left > 0) {
2323 amount = min(fsg->usb_amount_left,
2331 start_transfer(fsg, fsg->bulk_out, bh->outreq,
2333 fsg->next_buffhd_to_fill = bh->next;
2334 fsg->usb_amount_left -= amount;
2339 if ((rc = sleep_thread(fsg)) != 0)
2346 static int finish_reply(struct fsg_dev *fsg)
2348 struct fsg_buffhd *bh = fsg->next_buffhd_to_fill;
2351 switch (fsg->data_dir) {
2361 fsg_set_halt(fsg, fsg->bulk_out);
2362 rc = halt_bulk_in_endpoint(fsg);
2368 if (fsg->data_size == 0)
2372 else if (fsg->residue == 0) {
2374 start_transfer(fsg, fsg->bulk_in, bh->inreq,
2376 fsg->next_buffhd_to_fill = bh->next;
2387 fsg->residue == fsg->data_size &&
2388 (!fsg->curlun || fsg->curlun->sense_data != SS_NO_SENSE)) {
2390 rc = halt_bulk_in_endpoint(fsg);
2393 start_transfer(fsg, fsg->bulk_in, bh->inreq,
2395 fsg->next_buffhd_to_fill = bh->next;
2405 start_transfer(fsg, fsg->bulk_in, bh->inreq,
2407 fsg->next_buffhd_to_fill = bh->next;
2408 rc = halt_bulk_in_endpoint(fsg);
2410 rc = pad_with_zeros(fsg);
2417 if (fsg->residue == 0)
2421 else if (fsg->short_packet_received) {
2422 raise_exception(fsg, FSG_STATE_ABORT_BULK_OUT);
2436 rc = throw_away_data(fsg);
2443 static int send_status(struct fsg_dev *fsg)
2445 struct lun *curlun = fsg->curlun;
2452 bh = fsg->next_buffhd_to_fill;
2454 if ((rc = sleep_thread(fsg)) != 0)
2461 } else if (fsg->bad_lun_okay)
2466 if (fsg->phase_error) {
2467 DBG(fsg, "sending phase-error status\n");
2471 DBG(fsg, "sending command-failure status\n");
2473 VDBG(fsg, " sense data: SK x%02x, ASC x%02x, ASCQ x%02x;"
2483 csw->Tag = fsg->tag;
2484 csw->Residue = cpu_to_le32(fsg->residue);
2489 start_transfer(fsg, fsg->bulk_in, bh->inreq,
2510 fsg->intreq->length = CBI_INTERRUPT_DATA_LEN;
2512 fsg->intr_buffhd = bh; // Point to the right buffhd
2513 fsg->intreq->buf = bh->inreq->buf;
2514 fsg->intreq->dma = bh->inreq->dma;
2515 fsg->intreq->context = bh;
2516 start_transfer(fsg, fsg->intr_in, fsg->intreq,
2517 &fsg->intreq_busy, &bh->state);
2520 fsg->next_buffhd_to_fill = bh->next;
2529 static int check_command(struct fsg_dev *fsg, int cmnd_size,
2534 int lun = fsg->cmnd[1] >> 5;
2547 if (fsg->cmnd_size == 12)
2555 if (fsg->data_dir != DATA_DIR_UNKNOWN)
2556 sprintf(hdlen, ", H%c=%u", dirletter[(int) fsg->data_dir],
2557 fsg->data_size);
2558 VDBG(fsg, "SCSI command: %s; Dc=%d, D%c=%u; Hc=%d%s\n",
2560 fsg->data_size_from_cmnd, fsg->cmnd_size, hdlen);
2564 if (fsg->data_size_from_cmnd == 0)
2566 if (fsg->data_dir == DATA_DIR_UNKNOWN) { // CB or CBI
2567 fsg->data_dir = data_dir;
2568 fsg->data_size = fsg->data_size_from_cmnd;
2571 if (fsg->data_size < fsg->data_size_from_cmnd) {
2576 fsg->data_size_from_cmnd = fsg->data_size;
2577 fsg->phase_error = 1;
2580 fsg->residue = fsg->usb_amount_left = fsg->data_size;
2583 if (fsg->data_dir != data_dir && fsg->data_size_from_cmnd > 0) {
2584 fsg->phase_error = 1;
2589 if (cmnd_size != fsg->cmnd_size) {
2591 if (fsg->cmnd[0] == SC_REQUEST_SENSE && fsg->cmnd_size == 12)
2592 cmnd_size = fsg->cmnd_size;
2594 fsg->phase_error = 1;
2601 if (fsg->lun != lun)
2602 DBG(fsg, "using LUN %d from CBW, "
2604 fsg->lun, lun);
2606 fsg->lun = lun; // Use LUN from the command
2609 if (fsg->lun >= 0 && fsg->lun < fsg->nluns) {
2610 fsg->curlun = curlun = &fsg->luns[fsg->lun];
2611 if (fsg->cmnd[0] != SC_REQUEST_SENSE) {
2617 fsg->curlun = curlun = NULL;
2618 fsg->bad_lun_okay = 0;
2622 if (fsg->cmnd[0] != SC_INQUIRY &&
2623 fsg->cmnd[0] != SC_REQUEST_SENSE) {
2624 DBG(fsg, "unsupported LUN %d\n", fsg->lun);
2632 fsg->cmnd[0] != SC_INQUIRY &&
2633 fsg->cmnd[0] != SC_REQUEST_SENSE) {
2640 fsg->cmnd[1] &= 0x1f; // Mask away the LUN
2642 if (fsg->cmnd[i] && !(mask & (1 << i))) {
2660 static int do_scsi_command(struct fsg_dev *fsg)
2668 dump_cdb(fsg);
2671 bh = fsg->next_buffhd_to_drain = fsg->next_buffhd_to_fill;
2673 if ((rc = sleep_thread(fsg)) != 0)
2676 fsg->phase_error = 0;
2677 fsg->short_packet_received = 0;
2679 down_read(&fsg->filesem); // We're using the backing file
2680 switch (fsg->cmnd[0]) {
2683 fsg->data_size_from_cmnd = fsg->cmnd[4];
2684 if ((reply = check_command(fsg, 6, DATA_DIR_TO_HOST,
2687 reply = do_inquiry(fsg, bh);
2691 fsg->data_size_from_cmnd = fsg->cmnd[4];
2692 if ((reply = check_command(fsg, 6, DATA_DIR_FROM_HOST,
2695 reply = do_mode_select(fsg, bh);
2699 fsg->data_size_from_cmnd = get_be16(&fsg->cmnd[7]);
2700 if ((reply = check_command(fsg, 10, DATA_DIR_FROM_HOST,
2703 reply = do_mode_select(fsg, bh);
2707 fsg->data_size_from_cmnd = fsg->cmnd[4];
2708 if ((reply = check_command(fsg, 6, DATA_DIR_TO_HOST,
2711 reply = do_mode_sense(fsg, bh);
2715 fsg->data_size_from_cmnd = get_be16(&fsg->cmnd[7]);
2716 if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST,
2719 reply = do_mode_sense(fsg, bh);
2723 fsg->data_size_from_cmnd = 0;
2724 if ((reply = check_command(fsg, 6, DATA_DIR_NONE,
2727 reply = do_prevent_allow(fsg);
2731 i = fsg->cmnd[4];
2732 fsg->data_size_from_cmnd = (i == 0 ? 256 : i) << 9;
2733 if ((reply = check_command(fsg, 6, DATA_DIR_TO_HOST,
2736 reply = do_read(fsg);
2740 fsg->data_size_from_cmnd = get_be16(&fsg->cmnd[7]) << 9;
2741 if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST,
2744 reply = do_read(fsg);
2748 fsg->data_size_from_cmnd = get_be32(&fsg->cmnd[6]) << 9;
2749 if ((reply = check_command(fsg, 12, DATA_DIR_TO_HOST,
2752 reply = do_read(fsg);
2756 fsg->data_size_from_cmnd = 8;
2757 if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST,
2760 reply = do_read_capacity(fsg, bh);
2764 fsg->data_size_from_cmnd = get_be16(&fsg->cmnd[7]);
2765 if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST,
2768 reply = do_read_format_capacities(fsg, bh);
2772 fsg->data_size_from_cmnd = fsg->cmnd[4];
2773 if ((reply = check_command(fsg, 6, DATA_DIR_TO_HOST,
2776 reply = do_request_sense(fsg, bh);
2780 fsg->data_size_from_cmnd = 0;
2781 if ((reply = check_command(fsg, 6, DATA_DIR_NONE,
2784 reply = do_start_stop(fsg);
2788 fsg->data_size_from_cmnd = 0;
2789 if ((reply = check_command(fsg, 10, DATA_DIR_NONE,
2792 reply = do_synchronize_cache(fsg);
2796 fsg->data_size_from_cmnd = 0;
2797 reply = check_command(fsg, 6, DATA_DIR_NONE,
2805 fsg->data_size_from_cmnd = 0;
2806 if ((reply = check_command(fsg, 10, DATA_DIR_NONE,
2809 reply = do_verify(fsg);
2813 i = fsg->cmnd[4];
2814 fsg->data_size_from_cmnd = (i == 0 ? 256 : i) << 9;
2815 if ((reply = check_command(fsg, 6, DATA_DIR_FROM_HOST,
2818 reply = do_write(fsg);
2822 fsg->data_size_from_cmnd = get_be16(&fsg->cmnd[7]) << 9;
2823 if ((reply = check_command(fsg, 10, DATA_DIR_FROM_HOST,
2826 reply = do_write(fsg);
2830 fsg->data_size_from_cmnd = get_be32(&fsg->cmnd[6]) << 9;
2831 if ((reply = check_command(fsg, 12, DATA_DIR_FROM_HOST,
2834 reply = do_write(fsg);
2848 fsg->data_size_from_cmnd = 0;
2849 sprintf(unknown, "Unknown x%02x", fsg->cmnd[0]);
2850 if ((reply = check_command(fsg, fsg->cmnd_size,
2852 fsg->curlun->sense_data = SS_INVALID_COMMAND;
2857 up_read(&fsg->filesem);
2865 if (reply >= 0 && fsg->data_dir == DATA_DIR_TO_HOST) {
2866 reply = min((u32) reply, fsg->data_size_from_cmnd);
2869 fsg->residue -= reply;
2878 static int received_cbw(struct fsg_dev *fsg, struct fsg_buffhd *bh)
2891 DBG(fsg, "invalid CBW: len %u sig 0x%x\n",
2899 set_bit(CLEAR_BULK_HALTS, &fsg->atomic_bitflags);
2900 fsg_set_halt(fsg, fsg->bulk_out);
2901 halt_bulk_in_endpoint(fsg);
2908 DBG(fsg, "non-meaningful CBW: lun = %u, flags = 0x%x, "
2915 fsg_set_halt(fsg, fsg->bulk_out);
2916 halt_bulk_in_endpoint(fsg);
2922 fsg->cmnd_size = cbw->Length;
2923 memcpy(fsg->cmnd, cbw->CDB, fsg->cmnd_size);
2925 fsg->data_dir = DATA_DIR_TO_HOST;
2927 fsg->data_dir = DATA_DIR_FROM_HOST;
2928 fsg->data_size = le32_to_cpu(cbw->DataTransferLength);
2929 if (fsg->data_size == 0)
2930 fsg->data_dir = DATA_DIR_NONE;
2931 fsg->lun = cbw->Lun;
2932 fsg->tag = cbw->Tag;
2937 static int get_next_command(struct fsg_dev *fsg)
2945 bh = fsg->next_buffhd_to_fill;
2947 if ((rc = sleep_thread(fsg)) != 0)
2952 set_bulk_out_req_length(fsg, bh, USB_BULK_CB_WRAP_LEN);
2954 start_transfer(fsg, fsg->bulk_out, bh->outreq,
2963 if ((rc = sleep_thread(fsg)) != 0)
2967 rc = received_cbw(fsg, bh);
2973 while (fsg->cbbuf_cmnd_size == 0) {
2974 if ((rc = sleep_thread(fsg)) != 0)
2981 if (fsg->intreq_busy)
2982 usb_ep_dequeue(fsg->intr_in, fsg->intreq);
2985 fsg->data_dir = DATA_DIR_UNKNOWN;
2986 spin_lock_irq(&fsg->lock);
2987 fsg->cmnd_size = fsg->cbbuf_cmnd_size;
2988 memcpy(fsg->cmnd, fsg->cbbuf_cmnd, fsg->cmnd_size);
2989 fsg->cbbuf_cmnd_size = 0;
2990 spin_unlock_irq(&fsg->lock);
2998 static int enable_endpoint(struct fsg_dev *fsg, struct usb_ep *ep,
3003 ep->driver_data = fsg;
3006 ERROR(fsg, "can't enable %s, result %d\n", ep->name, rc);
3010 static int alloc_request(struct fsg_dev *fsg, struct usb_ep *ep,
3016 ERROR(fsg, "can't allocate request for %s\n", ep->name);
3025 static int do_set_interface(struct fsg_dev *fsg, int altsetting)
3031 if (fsg->running)
3032 DBG(fsg, "reset interface\n");
3037 struct fsg_buffhd *bh = &fsg->buffhds[i];
3040 usb_ep_free_request(fsg->bulk_in, bh->inreq);
3044 usb_ep_free_request(fsg->bulk_out, bh->outreq);
3048 if (fsg->intreq) {
3049 usb_ep_free_request(fsg->intr_in, fsg->intreq);
3050 fsg->intreq = NULL;
3054 if (fsg->bulk_in_enabled) {
3055 usb_ep_disable(fsg->bulk_in);
3056 fsg->bulk_in_enabled = 0;
3058 if (fsg->bulk_out_enabled) {
3059 usb_ep_disable(fsg->bulk_out);
3060 fsg->bulk_out_enabled = 0;
3062 if (fsg->intr_in_enabled) {
3063 usb_ep_disable(fsg->intr_in);
3064 fsg->intr_in_enabled = 0;
3067 fsg->running = 0;
3071 DBG(fsg, "set interface %d\n", altsetting);
3074 d = ep_desc(fsg->gadget, &fs_bulk_in_desc, &hs_bulk_in_desc);
3075 if ((rc = enable_endpoint(fsg, fsg->bulk_in, d)) != 0)
3077 fsg->bulk_in_enabled = 1;
3079 d = ep_desc(fsg->gadget, &fs_bulk_out_desc, &hs_bulk_out_desc);
3080 if ((rc = enable_endpoint(fsg, fsg->bulk_out, d)) != 0)
3082 fsg->bulk_out_enabled = 1;
3083 fsg->bulk_out_maxpacket = le16_to_cpu(d->wMaxPacketSize);
3086 d = ep_desc(fsg->gadget, &fs_intr_in_desc, &hs_intr_in_desc);
3087 if ((rc = enable_endpoint(fsg, fsg->intr_in, d)) != 0)
3089 fsg->intr_in_enabled = 1;
3094 struct fsg_buffhd *bh = &fsg->buffhds[i];
3096 if ((rc = alloc_request(fsg, fsg->bulk_in, &bh->inreq)) != 0)
3098 if ((rc = alloc_request(fsg, fsg->bulk_out, &bh->outreq)) != 0)
3107 if ((rc = alloc_request(fsg, fsg->intr_in, &fsg->intreq)) != 0)
3109 fsg->intreq->complete = intr_in_complete;
3112 fsg->running = 1;
3113 for (i = 0; i < fsg->nluns; ++i)
3114 fsg->luns[i].unit_attention_data = SS_RESET_OCCURRED;
3127 static int do_set_config(struct fsg_dev *fsg, u8 new_config)
3132 if (fsg->config != 0) {
3133 DBG(fsg, "reset config\n");
3134 fsg->config = 0;
3135 rc = do_set_interface(fsg, -1);
3140 fsg->config = new_config;
3141 if ((rc = do_set_interface(fsg, 0)) != 0)
3142 fsg->config = 0; // Reset on errors
3146 switch (fsg->gadget->speed) {
3152 INFO(fsg, "%s speed config #%d\n", speed, fsg->config);
3161 static void handle_exception(struct fsg_dev *fsg)
3181 if (fsg->state < FSG_STATE_EXIT)
3182 DBG(fsg, "Main thread exiting on signal\n");
3183 raise_exception(fsg, FSG_STATE_EXIT);
3188 if (fsg->intreq_busy)
3189 usb_ep_dequeue(fsg->intr_in, fsg->intreq);
3191 bh = &fsg->buffhds[i];
3193 usb_ep_dequeue(fsg->bulk_in, bh->inreq);
3195 usb_ep_dequeue(fsg->bulk_out, bh->outreq);
3200 num_active = fsg->intreq_busy;
3202 bh = &fsg->buffhds[i];
3207 if (sleep_thread(fsg))
3212 if (fsg->bulk_in_enabled)
3213 usb_ep_fifo_flush(fsg->bulk_in);
3214 if (fsg->bulk_out_enabled)
3215 usb_ep_fifo_flush(fsg->bulk_out);
3216 if (fsg->intr_in_enabled)
3217 usb_ep_fifo_flush(fsg->intr_in);
3221 spin_lock_irq(&fsg->lock);
3224 bh = &fsg->buffhds[i];
3227 fsg->next_buffhd_to_fill = fsg->next_buffhd_to_drain =
3228 &fsg->buffhds[0];
3230 exception_req_tag = fsg->exception_req_tag;
3231 new_config = fsg->new_config;
3232 old_state = fsg->state;
3235 fsg->state = FSG_STATE_STATUS_PHASE;
3237 for (i = 0; i < fsg->nluns; ++i) {
3238 curlun = &fsg->luns[i];
3245 fsg->state = FSG_STATE_IDLE;
3247 spin_unlock_irq(&fsg->lock);
3255 send_status(fsg);
3256 spin_lock_irq(&fsg->lock);
3257 if (fsg->state == FSG_STATE_STATUS_PHASE)
3258 fsg->state = FSG_STATE_IDLE;
3259 spin_unlock_irq(&fsg->lock);
3267 &fsg->atomic_bitflags)) {
3268 usb_ep_clear_halt(fsg->bulk_in);
3269 usb_ep_clear_halt(fsg->bulk_out);
3273 if (fsg->ep0_req_tag == exception_req_tag)
3274 ep0_queue(fsg); // Complete the status stage
3277 send_status(fsg); // Status by interrupt pipe
3282 // for (i = 0; i < fsg->nluns; ++i)
3283 // fsg->luns[i].unit_attention_data = SS_RESET_OCCURRED;
3287 rc = do_set_interface(fsg, 0);
3288 if (fsg->ep0_req_tag != exception_req_tag)
3291 fsg_set_halt(fsg, fsg->ep0);
3293 ep0_queue(fsg);
3297 rc = do_set_config(fsg, new_config);
3298 if (fsg->ep0_req_tag != exception_req_tag)
3301 fsg_set_halt(fsg, fsg->ep0);
3303 ep0_queue(fsg);
3307 fsync_all(fsg);
3308 do_set_config(fsg, 0); // Unconfigured state
3313 do_set_config(fsg, 0); // Free resources
3314 spin_lock_irq(&fsg->lock);
3315 fsg->state = FSG_STATE_TERMINATED; // Stop the thread
3316 spin_unlock_irq(&fsg->lock);
3326 struct fsg_dev *fsg = fsg_;
3341 while (fsg->state != FSG_STATE_TERMINATED) {
3342 if (exception_in_progress(fsg) || signal_pending(current)) {
3343 handle_exception(fsg);
3347 if (!fsg->running) {
3348 sleep_thread(fsg);
3352 if (get_next_command(fsg))
3355 spin_lock_irq(&fsg->lock);
3356 if (!exception_in_progress(fsg))
3357 fsg->state = FSG_STATE_DATA_PHASE;
3358 spin_unlock_irq(&fsg->lock);
3360 if (do_scsi_command(fsg) || finish_reply(fsg))
3363 spin_lock_irq(&fsg->lock);
3364 if (!exception_in_progress(fsg))
3365 fsg->state = FSG_STATE_STATUS_PHASE;
3366 spin_unlock_irq(&fsg->lock);
3368 if (send_status(fsg))
3371 spin_lock_irq(&fsg->lock);
3372 if (!exception_in_progress(fsg))
3373 fsg->state = FSG_STATE_IDLE;
3374 spin_unlock_irq(&fsg->lock);
3377 spin_lock_irq(&fsg->lock);
3378 fsg->thread_task = NULL;
3379 spin_unlock_irq(&fsg->lock);
3383 if (test_and_clear_bit(REGISTERED, &fsg->atomic_bitflags)) {
3385 close_all_backing_files(fsg);
3389 complete_and_exit(&fsg->thread_notifier, 0);
3396 * the caller must own fsg->filesem for writing. */
3479 static void close_all_backing_files(struct fsg_dev *fsg)
3483 for (i = 0; i < fsg->nluns; ++i)
3484 close_backing_file(&fsg->luns[i]);
3498 struct fsg_dev *fsg = dev_get_drvdata(dev);
3502 down_read(&fsg->filesem);
3518 up_read(&fsg->filesem);
3527 struct fsg_dev *fsg = dev_get_drvdata(dev);
3535 down_read(&fsg->filesem);
3543 up_read(&fsg->filesem);
3550 struct fsg_dev *fsg = dev_get_drvdata(dev);
3563 down_write(&fsg->filesem);
3576 up_write(&fsg->filesem);
3590 struct fsg_dev *fsg = container_of(ref, struct fsg_dev, ref);
3592 kfree(fsg->luns);
3593 kfree(fsg);
3598 struct fsg_dev *fsg = dev_get_drvdata(dev);
3600 kref_put(&fsg->ref, fsg_release);
3605 struct fsg_dev *fsg = get_gadget_data(gadget);
3608 struct usb_request *req = fsg->ep0req;
3610 DBG(fsg, "unbind\n");
3611 clear_bit(REGISTERED, &fsg->atomic_bitflags);
3614 for (i = 0; i < fsg->nluns; ++i) {
3615 curlun = &fsg->luns[i];
3625 if (fsg->state != FSG_STATE_TERMINATED) {
3626 raise_exception(fsg, FSG_STATE_EXIT);
3627 wait_for_completion(&fsg->thread_notifier);
3630 complete(&fsg->thread_notifier);
3635 struct fsg_buffhd *bh = &fsg->buffhds[i];
3638 usb_ep_free_buffer(fsg->bulk_in, bh->buf, bh->dma,
3645 usb_ep_free_buffer(fsg->ep0, req->buf,
3647 usb_ep_free_request(fsg->ep0, req);
3654 static int __init check_parameters(struct fsg_dev *fsg)
3665 if (gadget_is_sh(fsg->gadget))
3670 if (gadget_is_sa1100(fsg->gadget))
3673 gcnum = usb_gadget_controller_number(fsg->gadget);
3677 WARN(fsg, "controller '%s' not recognized\n",
3678 fsg->gadget->name);
3695 ERROR(fsg, "invalid transport: %s\n", mod_data.transport_parm);
3724 ERROR(fsg, "invalid protocol: %s\n", mod_data.protocol_parm);
3730 ERROR(fsg, "invalid buflen\n");
3741 struct fsg_dev *fsg = the_fsg;
3749 fsg->gadget = gadget;
3750 set_gadget_data(gadget, fsg);
3751 fsg->ep0 = gadget->ep0;
3752 fsg->ep0->driver_data = fsg;
3754 if ((rc = check_parameters(fsg)) != 0)
3768 ERROR(fsg, "invalid number of LUNs: %d\n", i);
3775 fsg->luns = kzalloc(i * sizeof(struct lun), GFP_KERNEL);
3776 if (!fsg->luns) {
3780 fsg->nluns = i;
3782 for (i = 0; i < fsg->nluns; ++i) {
3783 curlun = &fsg->luns[i];
3788 dev_set_drvdata(&curlun->dev, fsg);
3793 INFO(fsg, "failed to register LUN%d: %d\n", i, rc);
3804 kref_get(&fsg->ref);
3811 ERROR(fsg, "no file given for LUN%d\n", i);
3822 ep->driver_data = fsg; // claim the endpoint
3823 fsg->bulk_in = ep;
3828 ep->driver_data = fsg; // claim the endpoint
3829 fsg->bulk_out = ep;
3835 ep->driver_data = fsg; // claim the endpoint
3836 fsg->intr_in = ep;
3840 device_desc.bMaxPacketSize0 = fsg->ep0->maxpacket;
3855 dev_qualifier.bMaxPacketSize0 = fsg->ep0->maxpacket;
3871 fsg->ep0req = req = usb_ep_alloc_request(fsg->ep0, GFP_KERNEL);
3874 req->buf = usb_ep_alloc_buffer(fsg->ep0, EP0_BUFSIZE,
3882 struct fsg_buffhd *bh = &fsg->buffhds[i];
3887 bh->buf = usb_ep_alloc_buffer(fsg->bulk_in, mod_data.buflen,
3893 fsg->buffhds[NUM_BUFFERS - 1].next = &fsg->buffhds[0];
3912 fsg->thread_task = kthread_create(fsg_main_thread, fsg,
3914 if (IS_ERR(fsg->thread_task)) {
3915 rc = PTR_ERR(fsg->thread_task);
3919 INFO(fsg, DRIVER_DESC ", version: " DRIVER_VERSION "\n");
3920 INFO(fsg, "Number of LUNs=%d\n", fsg->nluns);
3923 for (i = 0; i < fsg->nluns; ++i) {
3924 curlun = &fsg->luns[i];
3940 DBG(fsg, "transport=%s (x%02x)\n",
3942 DBG(fsg, "protocol=%s (x%02x)\n",
3944 DBG(fsg, "VendorID=x%04x, ProductID=x%04x, Release=x%04x\n",
3946 DBG(fsg, "removable=%d, stall=%d, buflen=%u\n",
3949 DBG(fsg, "I/O thread pid: %d\n", fsg->thread_task->pid);
3951 set_bit(REGISTERED, &fsg->atomic_bitflags);
3954 wake_up_process(fsg->thread_task);
3958 ERROR(fsg, "unable to autoconfigure all endpoints\n");
3962 fsg->state = FSG_STATE_TERMINATED; // The thread is dead
3964 close_all_backing_files(fsg);
3973 struct fsg_dev *fsg = get_gadget_data(gadget);
3975 DBG(fsg, "suspend\n");
3976 set_bit(SUSPENDED, &fsg->atomic_bitflags);
3981 struct fsg_dev *fsg = get_gadget_data(gadget);
3983 DBG(fsg, "resume\n");
3984 clear_bit(SUSPENDED, &fsg->atomic_bitflags);
4016 struct fsg_dev *fsg;
4018 fsg = kzalloc(sizeof *fsg, GFP_KERNEL);
4019 if (!fsg)
4021 spin_lock_init(&fsg->lock);
4022 init_rwsem(&fsg->filesem);
4023 kref_init(&fsg->ref);
4024 init_completion(&fsg->thread_notifier);
4026 the_fsg = fsg;
4034 struct fsg_dev *fsg;
4038 fsg = the_fsg;
4040 kref_put(&fsg->ref, fsg_release);
4048 struct fsg_dev *fsg = the_fsg;
4051 if (test_and_clear_bit(REGISTERED, &fsg->atomic_bitflags))
4055 wait_for_completion(&fsg->thread_notifier);
4057 close_all_backing_files(fsg);
4058 kref_put(&fsg->ref, fsg_release);