1/* 2 * file_storage.c -- File-backed USB Storage Gadget, for USB development 3 * 4 * Copyright (C) 2003-2008 Alan Stern 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions, and the following disclaimer, 12 * without modification. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. The names of the above-listed copyright holders may not be used 17 * to endorse or promote products derived from this software without 18 * specific prior written permission. 19 * 20 * ALTERNATIVELY, this software may be distributed under the terms of the 21 * GNU General Public License ("GPL") as published by the Free Software 22 * Foundation, either version 2 of that License or (at your option) any 23 * later version. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS 26 * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 27 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR 29 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 30 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 31 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 32 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 33 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 34 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 35 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 36 */ 37 38 39 40 41/* 42 * Driver Design 43 * 44 * The FSG driver is fairly straightforward. There is a main kernel 45 * thread that handles most of the work. Interrupt routines field 46 * callbacks from the controller driver: bulk- and interrupt-request 47 * completion notifications, endpoint-0 events, and disconnect events. 48 * Completion events are passed to the main thread by wakeup calls. Many 49 * ep0 requests are handled at interrupt time, but SetInterface, 50 * SetConfiguration, and device reset requests are forwarded to the 51 * thread in the form of "exceptions" using SIGUSR1 signals (since they 52 * should interrupt any ongoing file I/O operations). 53 * 54 * The thread's main routine implements the standard command/data/status 55 * parts of a SCSI interaction. It and its subroutines are full of tests 56 * for pending signals/exceptions -- all this polling is necessary since 57 * the kernel has no setjmp/longjmp equivalents. (Maybe this is an 58 * indication that the driver really wants to be running in userspace.) 59 * An important point is that so long as the thread is alive it keeps an 60 * open reference to the backing file. This will prevent unmounting 61 * the backing file's underlying filesystem and could cause problems 62 * during system shutdown, for example. To prevent such problems, the 63 * thread catches INT, TERM, and KILL signals and converts them into 64 * an EXIT exception. 65 * 66 * In normal operation the main thread is started during the gadget's 67 * fsg_bind() callback and stopped during fsg_unbind(). But it can also 68 * exit when it receives a signal, and there's no point leaving the 69 * gadget running when the thread is dead. So just before the thread 70 * exits, it deregisters the gadget driver. This makes things a little 71 * tricky: The driver is deregistered at two places, and the exiting 72 * thread can indirectly call fsg_unbind() which in turn can tell the 73 * thread to exit. The first problem is resolved through the use of the 74 * REGISTERED atomic bitflag; the driver will only be deregistered once. 75 * The second problem is resolved by having fsg_unbind() check 76 * fsg->state; it won't try to stop the thread if the state is already 77 * FSG_STATE_TERMINATED. 78 * 79 * To provide maximum throughput, the driver uses a circular pipeline of 80 * buffer heads (struct fsg_buffhd). In principle the pipeline can be 81 * arbitrarily long; in practice the benefits don't justify having more 82 * than 2 stages (i.e., double buffering). But it helps to think of the 83 * pipeline as being a long one. Each buffer head contains a bulk-in and 84 * a bulk-out request pointer (since the buffer can be used for both 85 * output and input -- directions always are given from the host's 86 * point of view) as well as a pointer to the buffer and various state 87 * variables. 88 * 89 * Use of the pipeline follows a simple protocol. There is a variable 90 * (fsg->next_buffhd_to_fill) that points to the next buffer head to use. 91 * At any time that buffer head may still be in use from an earlier 92 * request, so each buffer head has a state variable indicating whether 93 * it is EMPTY, FULL, or BUSY. Typical use involves waiting for the 94 * buffer head to be EMPTY, filling the buffer either by file I/O or by 95 * USB I/O (during which the buffer head is BUSY), and marking the buffer 96 * head FULL when the I/O is complete. Then the buffer will be emptied 97 * (again possibly by USB I/O, during which it is marked BUSY) and 98 * finally marked EMPTY again (possibly by a completion routine). 99 * 100 * A module parameter tells the driver to avoid stalling the bulk 101 * endpoints wherever the transport specification allows. This is 102 * necessary for some UDCs like the SuperH, which cannot reliably clear a 103 * halt on a bulk endpoint. However, under certain circumstances the 104 * Bulk-only specification requires a stall. In such cases the driver 105 * will halt the endpoint and set a flag indicating that it should clear 106 * the halt in software during the next device reset. Hopefully this 107 * will permit everything to work correctly. Furthermore, although the 108 * specification allows the bulk-out endpoint to halt when the host sends 109 * too much data, implementing this would cause an unavoidable race. 110 * The driver will always use the "no-stall" approach for OUT transfers. 111 * 112 * One subtle point concerns sending status-stage responses for ep0 113 * requests. Some of these requests, such as device reset, can involve 114 * interrupting an ongoing file I/O operation, which might take an 115 * arbitrarily long time. During that delay the host might give up on 116 * the original ep0 request and issue a new one. When that happens the 117 * driver should not notify the host about completion of the original 118 * request, as the host will no longer be waiting for it. So the driver 119 * assigns to each ep0 request a unique tag, and it keeps track of the 120 * tag value of the request associated with a long-running exception 121 * (device-reset, interface-change, or configuration-change). When the 122 * exception handler is finished, the status-stage response is submitted 123 * only if the current ep0 request tag is equal to the exception request 124 * tag. Thus only the most recently received ep0 request will get a 125 * status-stage response. 126 * 127 * Warning: This driver source file is too long. It ought to be split up 128 * into a header file plus about 3 separate .c files, to handle the details 129 * of the Gadget, USB Mass Storage, and SCSI protocols. 130 */ 131 132 133/* #define VERBOSE_DEBUG */ 134/* #define DUMP_MSGS */ 135 136 137#include <linux/blkdev.h> 138#include <linux/completion.h> 139#include <linux/dcache.h> 140#include <linux/delay.h> 141#include <linux/device.h> 142#include <linux/fcntl.h> 143#include <linux/file.h> 144#include <linux/fs.h> 145#include <linux/kref.h> 146#include <linux/kthread.h> 147#include <linux/limits.h> 148#include <linux/rwsem.h> 149#include <linux/slab.h> 150#include <linux/spinlock.h> 151#include <linux/string.h> 152#include <linux/freezer.h> 153#include <linux/utsname.h> 154 155#include <linux/usb/ch9.h> 156#include <linux/usb/gadget.h> 157 158#include "gadget_chips.h" 159 160 161 162/* 163 * Kbuild is not very cooperative with respect to linking separately 164 * compiled library objects into one module. So for now we won't use 165 * separate compilation ... ensuring init/exit sections work to shrink 166 * the runtime footprint, and giving us at least some parts of what 167 * a "gcc --combine ... part1.c part2.c part3.c ... " build would. 168 */ 169#include "usbstring.c" 170#include "config.c" 171#include "epautoconf.c" 172 173/*-------------------------------------------------------------------------*/ 174 175#define DRIVER_DESC "File-backed Storage Gadget" 176#define DRIVER_NAME "g_file_storage" 177/* DRIVER_VERSION must be at least 6 characters long, as it is used 178 * to generate a fallback serial number. */ 179#define DRIVER_VERSION "20 November 2008" 180 181static char fsg_string_manufacturer[64]; 182static const char fsg_string_product[] = DRIVER_DESC; 183static char fsg_string_serial[13]; 184static const char fsg_string_config[] = "Self-powered"; 185static const char fsg_string_interface[] = "Mass Storage"; 186 187 188#include "storage_common.c" 189 190 191MODULE_DESCRIPTION(DRIVER_DESC); 192MODULE_AUTHOR("Alan Stern"); 193MODULE_LICENSE("Dual BSD/GPL"); 194 195/* 196 * This driver assumes self-powered hardware and has no way for users to 197 * trigger remote wakeup. It uses autoconfiguration to select endpoints 198 * and endpoint addresses. 199 */ 200 201 202/*-------------------------------------------------------------------------*/ 203 204 205/* Encapsulate the module parameter settings */ 206 207static struct { 208 char *file[FSG_MAX_LUNS]; 209 int ro[FSG_MAX_LUNS]; 210 int nofua[FSG_MAX_LUNS]; 211 unsigned int num_filenames; 212 unsigned int num_ros; 213 unsigned int num_nofuas; 214 unsigned int nluns; 215 216 int removable; 217 int can_stall; 218 int cdrom; 219 220 char *transport_parm; 221 char *protocol_parm; 222 unsigned short vendor; 223 unsigned short product; 224 unsigned short release; 225 char *serial; 226 unsigned int buflen; 227 228 int transport_type; 229 char *transport_name; 230 int protocol_type; 231 char *protocol_name; 232 233} mod_data = { // Default values 234 .transport_parm = "BBB", 235 .protocol_parm = "SCSI", 236 .removable = 0, 237 .can_stall = 1, 238 .cdrom = 0, 239 .vendor = FSG_VENDOR_ID, 240 .product = FSG_PRODUCT_ID, 241 .release = 0xffff, // Use controller chip type 242 .buflen = 16384, 243 }; 244 245 246module_param_array_named(file, mod_data.file, charp, &mod_data.num_filenames, 247 S_IRUGO); 248MODULE_PARM_DESC(file, "names of backing files or devices"); 249 250module_param_array_named(ro, mod_data.ro, bool, &mod_data.num_ros, S_IRUGO); 251MODULE_PARM_DESC(ro, "true to force read-only"); 252 253module_param_array_named(nofua, mod_data.nofua, bool, &mod_data.num_nofuas, 254 S_IRUGO); 255MODULE_PARM_DESC(nofua, "true to ignore SCSI WRITE(10,12) FUA bit"); 256 257module_param_named(luns, mod_data.nluns, uint, S_IRUGO); 258MODULE_PARM_DESC(luns, "number of LUNs"); 259 260module_param_named(removable, mod_data.removable, bool, S_IRUGO); 261MODULE_PARM_DESC(removable, "true to simulate removable media"); 262 263module_param_named(stall, mod_data.can_stall, bool, S_IRUGO); 264MODULE_PARM_DESC(stall, "false to prevent bulk stalls"); 265 266module_param_named(cdrom, mod_data.cdrom, bool, S_IRUGO); 267MODULE_PARM_DESC(cdrom, "true to emulate cdrom instead of disk"); 268 269module_param_named(serial, mod_data.serial, charp, S_IRUGO); 270MODULE_PARM_DESC(serial, "USB serial number"); 271 272/* In the non-TEST version, only the module parameters listed above 273 * are available. */ 274#ifdef CONFIG_USB_FILE_STORAGE_TEST 275 276module_param_named(transport, mod_data.transport_parm, charp, S_IRUGO); 277MODULE_PARM_DESC(transport, "type of transport (BBB, CBI, or CB)"); 278 279module_param_named(protocol, mod_data.protocol_parm, charp, S_IRUGO); 280MODULE_PARM_DESC(protocol, "type of protocol (RBC, 8020, QIC, UFI, " 281 "8070, or SCSI)"); 282 283module_param_named(vendor, mod_data.vendor, ushort, S_IRUGO); 284MODULE_PARM_DESC(vendor, "USB Vendor ID"); 285 286module_param_named(product, mod_data.product, ushort, S_IRUGO); 287MODULE_PARM_DESC(product, "USB Product ID"); 288 289module_param_named(release, mod_data.release, ushort, S_IRUGO); 290MODULE_PARM_DESC(release, "USB release number"); 291 292module_param_named(buflen, mod_data.buflen, uint, S_IRUGO); 293MODULE_PARM_DESC(buflen, "I/O buffer size"); 294 295#endif /* CONFIG_USB_FILE_STORAGE_TEST */ 296 297 298/* 299 * These definitions will permit the compiler to avoid generating code for 300 * parts of the driver that aren't used in the non-TEST version. Even gcc 301 * can recognize when a test of a constant expression yields a dead code 302 * path. 303 */ 304 305#ifdef CONFIG_USB_FILE_STORAGE_TEST 306 307#define transport_is_bbb() (mod_data.transport_type == USB_PR_BULK) 308#define transport_is_cbi() (mod_data.transport_type == USB_PR_CBI) 309#define protocol_is_scsi() (mod_data.protocol_type == USB_SC_SCSI) 310 311#else 312 313#define transport_is_bbb() 1 314#define transport_is_cbi() 0 315#define protocol_is_scsi() 1 316 317#endif /* CONFIG_USB_FILE_STORAGE_TEST */ 318 319 320/*-------------------------------------------------------------------------*/ 321 322 323struct fsg_dev { 324 /* lock protects: state, all the req_busy's, and cbbuf_cmnd */ 325 spinlock_t lock; 326 struct usb_gadget *gadget; 327 328 /* filesem protects: backing files in use */ 329 struct rw_semaphore filesem; 330 331 /* reference counting: wait until all LUNs are released */ 332 struct kref ref; 333 334 struct usb_ep *ep0; // Handy copy of gadget->ep0 335 struct usb_request *ep0req; // For control responses 336 unsigned int ep0_req_tag; 337 const char *ep0req_name; 338 339 struct usb_request *intreq; // For interrupt responses 340 int intreq_busy; 341 struct fsg_buffhd *intr_buffhd; 342 343 unsigned int bulk_out_maxpacket; 344 enum fsg_state state; // For exception handling 345 unsigned int exception_req_tag; 346 347 u8 config, new_config; 348 349 unsigned int running : 1; 350 unsigned int bulk_in_enabled : 1; 351 unsigned int bulk_out_enabled : 1; 352 unsigned int intr_in_enabled : 1; 353 unsigned int phase_error : 1; 354 unsigned int short_packet_received : 1; 355 unsigned int bad_lun_okay : 1; 356 357 unsigned long atomic_bitflags; 358#define REGISTERED 0 359#define IGNORE_BULK_OUT 1 360#define SUSPENDED 2 361 362 struct usb_ep *bulk_in; 363 struct usb_ep *bulk_out; 364 struct usb_ep *intr_in; 365 366 struct fsg_buffhd *next_buffhd_to_fill; 367 struct fsg_buffhd *next_buffhd_to_drain; 368 struct fsg_buffhd buffhds[FSG_NUM_BUFFERS]; 369 370 int thread_wakeup_needed; 371 struct completion thread_notifier; 372 struct task_struct *thread_task; 373 374 int cmnd_size; 375 u8 cmnd[MAX_COMMAND_SIZE]; 376 enum data_direction data_dir; 377 u32 data_size; 378 u32 data_size_from_cmnd; 379 u32 tag; 380 unsigned int lun; 381 u32 residue; 382 u32 usb_amount_left; 383 384 /* The CB protocol offers no way for a host to know when a command 385 * has completed. As a result the next command may arrive early, 386 * and we will still have to handle it. For that reason we need 387 * a buffer to store new commands when using CB (or CBI, which 388 * does not oblige a host to wait for command completion either). */ 389 int cbbuf_cmnd_size; 390 u8 cbbuf_cmnd[MAX_COMMAND_SIZE]; 391 392 unsigned int nluns; 393 struct fsg_lun *luns; 394 struct fsg_lun *curlun; 395}; 396 397typedef void (*fsg_routine_t)(struct fsg_dev *); 398 399static int exception_in_progress(struct fsg_dev *fsg) 400{ 401 return (fsg->state > FSG_STATE_IDLE); 402} 403 404/* Make bulk-out requests be divisible by the maxpacket size */ 405static void set_bulk_out_req_length(struct fsg_dev *fsg, 406 struct fsg_buffhd *bh, unsigned int length) 407{ 408 unsigned int rem; 409 410 bh->bulk_out_intended_length = length; 411 rem = length % fsg->bulk_out_maxpacket; 412 if (rem > 0) 413 length += fsg->bulk_out_maxpacket - rem; 414 bh->outreq->length = length; 415} 416 417static struct fsg_dev *the_fsg; 418static struct usb_gadget_driver fsg_driver; 419 420 421/*-------------------------------------------------------------------------*/ 422 423static int fsg_set_halt(struct fsg_dev *fsg, struct usb_ep *ep) 424{ 425 const char *name; 426 427 if (ep == fsg->bulk_in) 428 name = "bulk-in"; 429 else if (ep == fsg->bulk_out) 430 name = "bulk-out"; 431 else 432 name = ep->name; 433 DBG(fsg, "%s set halt\n", name); 434 return usb_ep_set_halt(ep); 435} 436 437 438/*-------------------------------------------------------------------------*/ 439 440/* 441 * DESCRIPTORS ... most are static, but strings and (full) configuration 442 * descriptors are built on demand. Also the (static) config and interface 443 * descriptors are adjusted during fsg_bind(). 444 */ 445 446/* There is only one configuration. */ 447#define CONFIG_VALUE 1 448 449static struct usb_device_descriptor 450device_desc = { 451 .bLength = sizeof device_desc, 452 .bDescriptorType = USB_DT_DEVICE, 453 454 .bcdUSB = cpu_to_le16(0x0200), 455 .bDeviceClass = USB_CLASS_PER_INTERFACE, 456 457 /* The next three values can be overridden by module parameters */ 458 .idVendor = cpu_to_le16(FSG_VENDOR_ID), 459 .idProduct = cpu_to_le16(FSG_PRODUCT_ID), 460 .bcdDevice = cpu_to_le16(0xffff), 461 462 .iManufacturer = FSG_STRING_MANUFACTURER, 463 .iProduct = FSG_STRING_PRODUCT, 464 .iSerialNumber = FSG_STRING_SERIAL, 465 .bNumConfigurations = 1, 466}; 467 468static struct usb_config_descriptor 469config_desc = { 470 .bLength = sizeof config_desc, 471 .bDescriptorType = USB_DT_CONFIG, 472 473 /* wTotalLength computed by usb_gadget_config_buf() */ 474 .bNumInterfaces = 1, 475 .bConfigurationValue = CONFIG_VALUE, 476 .iConfiguration = FSG_STRING_CONFIG, 477 .bmAttributes = USB_CONFIG_ATT_ONE | USB_CONFIG_ATT_SELFPOWER, 478 .bMaxPower = CONFIG_USB_GADGET_VBUS_DRAW / 2, 479}; 480 481 482static struct usb_qualifier_descriptor 483dev_qualifier = { 484 .bLength = sizeof dev_qualifier, 485 .bDescriptorType = USB_DT_DEVICE_QUALIFIER, 486 487 .bcdUSB = cpu_to_le16(0x0200), 488 .bDeviceClass = USB_CLASS_PER_INTERFACE, 489 490 .bNumConfigurations = 1, 491}; 492 493 494 495/* 496 * Config descriptors must agree with the code that sets configurations 497 * and with code managing interfaces and their altsettings. They must 498 * also handle different speeds and other-speed requests. 499 */ 500static int populate_config_buf(struct usb_gadget *gadget, 501 u8 *buf, u8 type, unsigned index) 502{ 503 enum usb_device_speed speed = gadget->speed; 504 int len; 505 const struct usb_descriptor_header **function; 506 507 if (index > 0) 508 return -EINVAL; 509 510 if (gadget_is_dualspeed(gadget) && type == USB_DT_OTHER_SPEED_CONFIG) 511 speed = (USB_SPEED_FULL + USB_SPEED_HIGH) - speed; 512 function = gadget_is_dualspeed(gadget) && speed == USB_SPEED_HIGH 513 ? (const struct usb_descriptor_header **)fsg_hs_function 514 : (const struct usb_descriptor_header **)fsg_fs_function; 515 516 /* for now, don't advertise srp-only devices */ 517 if (!gadget_is_otg(gadget)) 518 function++; 519 520 len = usb_gadget_config_buf(&config_desc, buf, EP0_BUFSIZE, function); 521 ((struct usb_config_descriptor *) buf)->bDescriptorType = type; 522 return len; 523} 524 525 526/*-------------------------------------------------------------------------*/ 527 528/* These routines may be called in process context or in_irq */ 529 530/* Caller must hold fsg->lock */ 531static void wakeup_thread(struct fsg_dev *fsg) 532{ 533 /* Tell the main thread that something has happened */ 534 fsg->thread_wakeup_needed = 1; 535 if (fsg->thread_task) 536 wake_up_process(fsg->thread_task); 537} 538 539 540static void raise_exception(struct fsg_dev *fsg, enum fsg_state new_state) 541{ 542 unsigned long flags; 543 544 /* Do nothing if a higher-priority exception is already in progress. 545 * If a lower-or-equal priority exception is in progress, preempt it 546 * and notify the main thread by sending it a signal. */ 547 spin_lock_irqsave(&fsg->lock, flags); 548 if (fsg->state <= new_state) { 549 fsg->exception_req_tag = fsg->ep0_req_tag; 550 fsg->state = new_state; 551 if (fsg->thread_task) 552 send_sig_info(SIGUSR1, SEND_SIG_FORCED, 553 fsg->thread_task); 554 } 555 spin_unlock_irqrestore(&fsg->lock, flags); 556} 557 558 559/*-------------------------------------------------------------------------*/ 560 561/* The disconnect callback and ep0 routines. These always run in_irq, 562 * except that ep0_queue() is called in the main thread to acknowledge 563 * completion of various requests: set config, set interface, and 564 * Bulk-only device reset. */ 565 566static void fsg_disconnect(struct usb_gadget *gadget) 567{ 568 struct fsg_dev *fsg = get_gadget_data(gadget); 569 570 DBG(fsg, "disconnect or port reset\n"); 571 raise_exception(fsg, FSG_STATE_DISCONNECT); 572} 573 574 575static int ep0_queue(struct fsg_dev *fsg) 576{ 577 int rc; 578 579 rc = usb_ep_queue(fsg->ep0, fsg->ep0req, GFP_ATOMIC); 580 if (rc != 0 && rc != -ESHUTDOWN) { 581 582 /* We can't do much more than wait for a reset */ 583 WARNING(fsg, "error in submission: %s --> %d\n", 584 fsg->ep0->name, rc); 585 } 586 return rc; 587} 588 589static void ep0_complete(struct usb_ep *ep, struct usb_request *req) 590{ 591 struct fsg_dev *fsg = ep->driver_data; 592 593 if (req->actual > 0) 594 dump_msg(fsg, fsg->ep0req_name, req->buf, req->actual); 595 if (req->status || req->actual != req->length) 596 DBG(fsg, "%s --> %d, %u/%u\n", __func__, 597 req->status, req->actual, req->length); 598 if (req->status == -ECONNRESET) // Request was cancelled 599 usb_ep_fifo_flush(ep); 600 601 if (req->status == 0 && req->context) 602 ((fsg_routine_t) (req->context))(fsg); 603} 604 605 606/*-------------------------------------------------------------------------*/ 607 608/* Bulk and interrupt endpoint completion handlers. 609 * These always run in_irq. */ 610 611static void bulk_in_complete(struct usb_ep *ep, struct usb_request *req) 612{ 613 struct fsg_dev *fsg = ep->driver_data; 614 struct fsg_buffhd *bh = req->context; 615 616 if (req->status || req->actual != req->length) 617 DBG(fsg, "%s --> %d, %u/%u\n", __func__, 618 req->status, req->actual, req->length); 619 if (req->status == -ECONNRESET) // Request was cancelled 620 usb_ep_fifo_flush(ep); 621 622 /* Hold the lock while we update the request and buffer states */ 623 smp_wmb(); 624 spin_lock(&fsg->lock); 625 bh->inreq_busy = 0; 626 bh->state = BUF_STATE_EMPTY; 627 wakeup_thread(fsg); 628 spin_unlock(&fsg->lock); 629} 630 631static void bulk_out_complete(struct usb_ep *ep, struct usb_request *req) 632{ 633 struct fsg_dev *fsg = ep->driver_data; 634 struct fsg_buffhd *bh = req->context; 635 636 dump_msg(fsg, "bulk-out", req->buf, req->actual); 637 if (req->status || req->actual != bh->bulk_out_intended_length) 638 DBG(fsg, "%s --> %d, %u/%u\n", __func__, 639 req->status, req->actual, 640 bh->bulk_out_intended_length); 641 if (req->status == -ECONNRESET) // Request was cancelled 642 usb_ep_fifo_flush(ep); 643 644 /* Hold the lock while we update the request and buffer states */ 645 smp_wmb(); 646 spin_lock(&fsg->lock); 647 bh->outreq_busy = 0; 648 bh->state = BUF_STATE_FULL; 649 wakeup_thread(fsg); 650 spin_unlock(&fsg->lock); 651} 652 653 654#ifdef CONFIG_USB_FILE_STORAGE_TEST 655static void intr_in_complete(struct usb_ep *ep, struct usb_request *req) 656{ 657 struct fsg_dev *fsg = ep->driver_data; 658 struct fsg_buffhd *bh = req->context; 659 660 if (req->status || req->actual != req->length) 661 DBG(fsg, "%s --> %d, %u/%u\n", __func__, 662 req->status, req->actual, req->length); 663 if (req->status == -ECONNRESET) // Request was cancelled 664 usb_ep_fifo_flush(ep); 665 666 /* Hold the lock while we update the request and buffer states */ 667 smp_wmb(); 668 spin_lock(&fsg->lock); 669 fsg->intreq_busy = 0; 670 bh->state = BUF_STATE_EMPTY; 671 wakeup_thread(fsg); 672 spin_unlock(&fsg->lock); 673} 674 675#else 676static void intr_in_complete(struct usb_ep *ep, struct usb_request *req) 677{} 678#endif /* CONFIG_USB_FILE_STORAGE_TEST */ 679 680 681/*-------------------------------------------------------------------------*/ 682 683/* Ep0 class-specific handlers. These always run in_irq. */ 684 685#ifdef CONFIG_USB_FILE_STORAGE_TEST 686static void received_cbi_adsc(struct fsg_dev *fsg, struct fsg_buffhd *bh) 687{ 688 struct usb_request *req = fsg->ep0req; 689 static u8 cbi_reset_cmnd[6] = { 690 SC_SEND_DIAGNOSTIC, 4, 0xff, 0xff, 0xff, 0xff}; 691 692 /* Error in command transfer? */ 693 if (req->status || req->length != req->actual || 694 req->actual < 6 || req->actual > MAX_COMMAND_SIZE) { 695 696 /* Not all controllers allow a protocol stall after 697 * receiving control-out data, but we'll try anyway. */ 698 fsg_set_halt(fsg, fsg->ep0); 699 return; // Wait for reset 700 } 701 702 /* Is it the special reset command? */ 703 if (req->actual >= sizeof cbi_reset_cmnd && 704 memcmp(req->buf, cbi_reset_cmnd, 705 sizeof cbi_reset_cmnd) == 0) { 706 707 /* Raise an exception to stop the current operation 708 * and reinitialize our state. */ 709 DBG(fsg, "cbi reset request\n"); 710 raise_exception(fsg, FSG_STATE_RESET); 711 return; 712 } 713 714 VDBG(fsg, "CB[I] accept device-specific command\n"); 715 spin_lock(&fsg->lock); 716 717 /* Save the command for later */ 718 if (fsg->cbbuf_cmnd_size) 719 WARNING(fsg, "CB[I] overwriting previous command\n"); 720 fsg->cbbuf_cmnd_size = req->actual; 721 memcpy(fsg->cbbuf_cmnd, req->buf, fsg->cbbuf_cmnd_size); 722 723 wakeup_thread(fsg); 724 spin_unlock(&fsg->lock); 725} 726 727#else 728static void received_cbi_adsc(struct fsg_dev *fsg, struct fsg_buffhd *bh) 729{} 730#endif /* CONFIG_USB_FILE_STORAGE_TEST */ 731 732 733static int class_setup_req(struct fsg_dev *fsg, 734 const struct usb_ctrlrequest *ctrl) 735{ 736 struct usb_request *req = fsg->ep0req; 737 int value = -EOPNOTSUPP; 738 u16 w_index = le16_to_cpu(ctrl->wIndex); 739 u16 w_value = le16_to_cpu(ctrl->wValue); 740 u16 w_length = le16_to_cpu(ctrl->wLength); 741 742 if (!fsg->config) 743 return value; 744 745 /* Handle Bulk-only class-specific requests */ 746 if (transport_is_bbb()) { 747 switch (ctrl->bRequest) { 748 749 case USB_BULK_RESET_REQUEST: 750 if (ctrl->bRequestType != (USB_DIR_OUT | 751 USB_TYPE_CLASS | USB_RECIP_INTERFACE)) 752 break; 753 if (w_index != 0 || w_value != 0) { 754 value = -EDOM; 755 break; 756 } 757 758 /* Raise an exception to stop the current operation 759 * and reinitialize our state. */ 760 DBG(fsg, "bulk reset request\n"); 761 raise_exception(fsg, FSG_STATE_RESET); 762 value = DELAYED_STATUS; 763 break; 764 765 case USB_BULK_GET_MAX_LUN_REQUEST: 766 if (ctrl->bRequestType != (USB_DIR_IN | 767 USB_TYPE_CLASS | USB_RECIP_INTERFACE)) 768 break; 769 if (w_index != 0 || w_value != 0) { 770 value = -EDOM; 771 break; 772 } 773 VDBG(fsg, "get max LUN\n"); 774 *(u8 *) req->buf = fsg->nluns - 1; 775 value = 1; 776 break; 777 } 778 } 779 780 /* Handle CBI class-specific requests */ 781 else { 782 switch (ctrl->bRequest) { 783 784 case USB_CBI_ADSC_REQUEST: 785 if (ctrl->bRequestType != (USB_DIR_OUT | 786 USB_TYPE_CLASS | USB_RECIP_INTERFACE)) 787 break; 788 if (w_index != 0 || w_value != 0) { 789 value = -EDOM; 790 break; 791 } 792 if (w_length > MAX_COMMAND_SIZE) { 793 value = -EOVERFLOW; 794 break; 795 } 796 value = w_length; 797 fsg->ep0req->context = received_cbi_adsc; 798 break; 799 } 800 } 801 802 if (value == -EOPNOTSUPP) 803 VDBG(fsg, 804 "unknown class-specific control req " 805 "%02x.%02x v%04x i%04x l%u\n", 806 ctrl->bRequestType, ctrl->bRequest, 807 le16_to_cpu(ctrl->wValue), w_index, w_length); 808 return value; 809} 810 811 812/*-------------------------------------------------------------------------*/ 813 814/* Ep0 standard request handlers. These always run in_irq. */ 815 816static int standard_setup_req(struct fsg_dev *fsg, 817 const struct usb_ctrlrequest *ctrl) 818{ 819 struct usb_request *req = fsg->ep0req; 820 int value = -EOPNOTSUPP; 821 u16 w_index = le16_to_cpu(ctrl->wIndex); 822 u16 w_value = le16_to_cpu(ctrl->wValue); 823 824 /* Usually this just stores reply data in the pre-allocated ep0 buffer, 825 * but config change events will also reconfigure hardware. */ 826 switch (ctrl->bRequest) { 827 828 case USB_REQ_GET_DESCRIPTOR: 829 if (ctrl->bRequestType != (USB_DIR_IN | USB_TYPE_STANDARD | 830 USB_RECIP_DEVICE)) 831 break; 832 switch (w_value >> 8) { 833 834 case USB_DT_DEVICE: 835 VDBG(fsg, "get device descriptor\n"); 836 value = sizeof device_desc; 837 memcpy(req->buf, &device_desc, value); 838 break; 839 case USB_DT_DEVICE_QUALIFIER: 840 VDBG(fsg, "get device qualifier\n"); 841 if (!gadget_is_dualspeed(fsg->gadget)) 842 break; 843 value = sizeof dev_qualifier; 844 memcpy(req->buf, &dev_qualifier, value); 845 break; 846 847 case USB_DT_OTHER_SPEED_CONFIG: 848 VDBG(fsg, "get other-speed config descriptor\n"); 849 if (!gadget_is_dualspeed(fsg->gadget)) 850 break; 851 goto get_config; 852 case USB_DT_CONFIG: 853 VDBG(fsg, "get configuration descriptor\n"); 854get_config: 855 value = populate_config_buf(fsg->gadget, 856 req->buf, 857 w_value >> 8, 858 w_value & 0xff); 859 break; 860 861 case USB_DT_STRING: 862 VDBG(fsg, "get string descriptor\n"); 863 864 /* wIndex == language code */ 865 value = usb_gadget_get_string(&fsg_stringtab, 866 w_value & 0xff, req->buf); 867 break; 868 } 869 break; 870 871 /* One config, two speeds */ 872 case USB_REQ_SET_CONFIGURATION: 873 if (ctrl->bRequestType != (USB_DIR_OUT | USB_TYPE_STANDARD | 874 USB_RECIP_DEVICE)) 875 break; 876 VDBG(fsg, "set configuration\n"); 877 if (w_value == CONFIG_VALUE || w_value == 0) { 878 fsg->new_config = w_value; 879 880 /* Raise an exception to wipe out previous transaction 881 * state (queued bufs, etc) and set the new config. */ 882 raise_exception(fsg, FSG_STATE_CONFIG_CHANGE); 883 value = DELAYED_STATUS; 884 } 885 break; 886 case USB_REQ_GET_CONFIGURATION: 887 if (ctrl->bRequestType != (USB_DIR_IN | USB_TYPE_STANDARD | 888 USB_RECIP_DEVICE)) 889 break; 890 VDBG(fsg, "get configuration\n"); 891 *(u8 *) req->buf = fsg->config; 892 value = 1; 893 break; 894 895 case USB_REQ_SET_INTERFACE: 896 if (ctrl->bRequestType != (USB_DIR_OUT| USB_TYPE_STANDARD | 897 USB_RECIP_INTERFACE)) 898 break; 899 if (fsg->config && w_index == 0) { 900 901 /* Raise an exception to wipe out previous transaction 902 * state (queued bufs, etc) and install the new 903 * interface altsetting. */ 904 raise_exception(fsg, FSG_STATE_INTERFACE_CHANGE); 905 value = DELAYED_STATUS; 906 } 907 break; 908 case USB_REQ_GET_INTERFACE: 909 if (ctrl->bRequestType != (USB_DIR_IN | USB_TYPE_STANDARD | 910 USB_RECIP_INTERFACE)) 911 break; 912 if (!fsg->config) 913 break; 914 if (w_index != 0) { 915 value = -EDOM; 916 break; 917 } 918 VDBG(fsg, "get interface\n"); 919 *(u8 *) req->buf = 0; 920 value = 1; 921 break; 922 923 default: 924 VDBG(fsg, 925 "unknown control req %02x.%02x v%04x i%04x l%u\n", 926 ctrl->bRequestType, ctrl->bRequest, 927 w_value, w_index, le16_to_cpu(ctrl->wLength)); 928 } 929 930 return value; 931} 932 933 934static int fsg_setup(struct usb_gadget *gadget, 935 const struct usb_ctrlrequest *ctrl) 936{ 937 struct fsg_dev *fsg = get_gadget_data(gadget); 938 int rc; 939 int w_length = le16_to_cpu(ctrl->wLength); 940 941 ++fsg->ep0_req_tag; // Record arrival of a new request 942 fsg->ep0req->context = NULL; 943 fsg->ep0req->length = 0; 944 dump_msg(fsg, "ep0-setup", (u8 *) ctrl, sizeof(*ctrl)); 945 946 if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_CLASS) 947 rc = class_setup_req(fsg, ctrl); 948 else 949 rc = standard_setup_req(fsg, ctrl); 950 951 /* Respond with data/status or defer until later? */ 952 if (rc >= 0 && rc != DELAYED_STATUS) { 953 rc = min(rc, w_length); 954 fsg->ep0req->length = rc; 955 fsg->ep0req->zero = rc < w_length; 956 fsg->ep0req_name = (ctrl->bRequestType & USB_DIR_IN ? 957 "ep0-in" : "ep0-out"); 958 rc = ep0_queue(fsg); 959 } 960 961 /* Device either stalls (rc < 0) or reports success */ 962 return rc; 963} 964 965 966/*-------------------------------------------------------------------------*/ 967 968/* All the following routines run in process context */ 969 970 971/* Use this for bulk or interrupt transfers, not ep0 */ 972static void start_transfer(struct fsg_dev *fsg, struct usb_ep *ep, 973 struct usb_request *req, int *pbusy, 974 enum fsg_buffer_state *state) 975{ 976 int rc; 977 978 if (ep == fsg->bulk_in) 979 dump_msg(fsg, "bulk-in", req->buf, req->length); 980 else if (ep == fsg->intr_in) 981 dump_msg(fsg, "intr-in", req->buf, req->length); 982 983 spin_lock_irq(&fsg->lock); 984 *pbusy = 1; 985 *state = BUF_STATE_BUSY; 986 spin_unlock_irq(&fsg->lock); 987 rc = usb_ep_queue(ep, req, GFP_KERNEL); 988 if (rc != 0) { 989 *pbusy = 0; 990 *state = BUF_STATE_EMPTY; 991 992 /* We can't do much more than wait for a reset */ 993 994 /* Note: currently the net2280 driver fails zero-length 995 * submissions if DMA is enabled. */ 996 if (rc != -ESHUTDOWN && !(rc == -EOPNOTSUPP && 997 req->length == 0)) 998 WARNING(fsg, "error in submission: %s --> %d\n", 999 ep->name, rc); 1000 } 1001} 1002 1003 1004static int sleep_thread(struct fsg_dev *fsg) 1005{ 1006 int rc = 0; 1007 1008 /* Wait until a signal arrives or we are woken up */ 1009 for (;;) { 1010 try_to_freeze(); 1011 set_current_state(TASK_INTERRUPTIBLE); 1012 if (signal_pending(current)) { 1013 rc = -EINTR; 1014 break; 1015 } 1016 if (fsg->thread_wakeup_needed) 1017 break; 1018 schedule(); 1019 } 1020 __set_current_state(TASK_RUNNING); 1021 fsg->thread_wakeup_needed = 0; 1022 return rc; 1023} 1024 1025 1026/*-------------------------------------------------------------------------*/ 1027 1028static int do_read(struct fsg_dev *fsg) 1029{ 1030 struct fsg_lun *curlun = fsg->curlun; 1031 u32 lba; 1032 struct fsg_buffhd *bh; 1033 int rc; 1034 u32 amount_left; 1035 loff_t file_offset, file_offset_tmp; 1036 unsigned int amount; 1037 unsigned int partial_page; 1038 ssize_t nread; 1039 1040 /* Get the starting Logical Block Address and check that it's 1041 * not too big */ 1042 if (fsg->cmnd[0] == SC_READ_6) 1043 lba = get_unaligned_be24(&fsg->cmnd[1]); 1044 else { 1045 lba = get_unaligned_be32(&fsg->cmnd[2]); 1046 1047 /* We allow DPO (Disable Page Out = don't save data in the 1048 * cache) and FUA (Force Unit Access = don't read from the 1049 * cache), but we don't implement them. */ 1050 if ((fsg->cmnd[1] & ~0x18) != 0) { 1051 curlun->sense_data = SS_INVALID_FIELD_IN_CDB; 1052 return -EINVAL; 1053 } 1054 } 1055 if (lba >= curlun->num_sectors) { 1056 curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE; 1057 return -EINVAL; 1058 } 1059 file_offset = ((loff_t) lba) << 9; 1060 1061 /* Carry out the file reads */ 1062 amount_left = fsg->data_size_from_cmnd; 1063 if (unlikely(amount_left == 0)) 1064 return -EIO; // No default reply 1065 1066 for (;;) { 1067 1068 /* Figure out how much we need to read: 1069 * Try to read the remaining amount. 1070 * But don't read more than the buffer size. 1071 * And don't try to read past the end of the file. 1072 * Finally, if we're not at a page boundary, don't read past 1073 * the next page. 1074 * If this means reading 0 then we were asked to read past 1075 * the end of file. */ 1076 amount = min((unsigned int) amount_left, mod_data.buflen); 1077 amount = min((loff_t) amount, 1078 curlun->file_length - file_offset); 1079 partial_page = file_offset & (PAGE_CACHE_SIZE - 1); 1080 if (partial_page > 0) 1081 amount = min(amount, (unsigned int) PAGE_CACHE_SIZE - 1082 partial_page); 1083 1084 /* Wait for the next buffer to become available */ 1085 bh = fsg->next_buffhd_to_fill; 1086 while (bh->state != BUF_STATE_EMPTY) { 1087 rc = sleep_thread(fsg); 1088 if (rc) 1089 return rc; 1090 } 1091 1092 /* If we were asked to read past the end of file, 1093 * end with an empty buffer. */ 1094 if (amount == 0) { 1095 curlun->sense_data = 1096 SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE; 1097 curlun->sense_data_info = file_offset >> 9; 1098 curlun->info_valid = 1; 1099 bh->inreq->length = 0; 1100 bh->state = BUF_STATE_FULL; 1101 break; 1102 } 1103 1104 /* Perform the read */ 1105 file_offset_tmp = file_offset; 1106 nread = vfs_read(curlun->filp, 1107 (char __user *) bh->buf, 1108 amount, &file_offset_tmp); 1109 VLDBG(curlun, "file read %u @ %llu -> %d\n", amount, 1110 (unsigned long long) file_offset, 1111 (int) nread); 1112 if (signal_pending(current)) 1113 return -EINTR; 1114 1115 if (nread < 0) { 1116 LDBG(curlun, "error in file read: %d\n", 1117 (int) nread); 1118 nread = 0; 1119 } else if (nread < amount) { 1120 LDBG(curlun, "partial file read: %d/%u\n", 1121 (int) nread, amount); 1122 nread -= (nread & 511); // Round down to a block 1123 } 1124 file_offset += nread; 1125 amount_left -= nread; 1126 fsg->residue -= nread; 1127 bh->inreq->length = nread; 1128 bh->state = BUF_STATE_FULL; 1129 1130 /* If an error occurred, report it and its position */ 1131 if (nread < amount) { 1132 curlun->sense_data = SS_UNRECOVERED_READ_ERROR; 1133 curlun->sense_data_info = file_offset >> 9; 1134 curlun->info_valid = 1; 1135 break; 1136 } 1137 1138 if (amount_left == 0) 1139 break; // No more left to read 1140 1141 /* Send this buffer and go read some more */ 1142 bh->inreq->zero = 0; 1143 start_transfer(fsg, fsg->bulk_in, bh->inreq, 1144 &bh->inreq_busy, &bh->state); 1145 fsg->next_buffhd_to_fill = bh->next; 1146 } 1147 1148 return -EIO; // No default reply 1149} 1150 1151 1152/*-------------------------------------------------------------------------*/ 1153 1154static int do_write(struct fsg_dev *fsg) 1155{ 1156 struct fsg_lun *curlun = fsg->curlun; 1157 u32 lba; 1158 struct fsg_buffhd *bh; 1159 int get_some_more; 1160 u32 amount_left_to_req, amount_left_to_write; 1161 loff_t usb_offset, file_offset, file_offset_tmp; 1162 unsigned int amount; 1163 unsigned int partial_page; 1164 ssize_t nwritten; 1165 int rc; 1166 1167 if (curlun->ro) { 1168 curlun->sense_data = SS_WRITE_PROTECTED; 1169 return -EINVAL; 1170 } 1171 spin_lock(&curlun->filp->f_lock); 1172 curlun->filp->f_flags &= ~O_SYNC; // Default is not to wait 1173 spin_unlock(&curlun->filp->f_lock); 1174 1175 /* Get the starting Logical Block Address and check that it's 1176 * not too big */ 1177 if (fsg->cmnd[0] == SC_WRITE_6) 1178 lba = get_unaligned_be24(&fsg->cmnd[1]); 1179 else { 1180 lba = get_unaligned_be32(&fsg->cmnd[2]); 1181 1182 /* We allow DPO (Disable Page Out = don't save data in the 1183 * cache) and FUA (Force Unit Access = write directly to the 1184 * medium). We don't implement DPO; we implement FUA by 1185 * performing synchronous output. */ 1186 if ((fsg->cmnd[1] & ~0x18) != 0) { 1187 curlun->sense_data = SS_INVALID_FIELD_IN_CDB; 1188 return -EINVAL; 1189 } 1190 /* FUA */ 1191 if (!curlun->nofua && (fsg->cmnd[1] & 0x08)) { 1192 spin_lock(&curlun->filp->f_lock); 1193 curlun->filp->f_flags |= O_DSYNC; 1194 spin_unlock(&curlun->filp->f_lock); 1195 } 1196 } 1197 if (lba >= curlun->num_sectors) { 1198 curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE; 1199 return -EINVAL; 1200 } 1201 1202 /* Carry out the file writes */ 1203 get_some_more = 1; 1204 file_offset = usb_offset = ((loff_t) lba) << 9; 1205 amount_left_to_req = amount_left_to_write = fsg->data_size_from_cmnd; 1206 1207 while (amount_left_to_write > 0) { 1208 1209 /* Queue a request for more data from the host */ 1210 bh = fsg->next_buffhd_to_fill; 1211 if (bh->state == BUF_STATE_EMPTY && get_some_more) { 1212 1213 /* Figure out how much we want to get: 1214 * Try to get the remaining amount. 1215 * But don't get more than the buffer size. 1216 * And don't try to go past the end of the file. 1217 * If we're not at a page boundary, 1218 * don't go past the next page. 1219 * If this means getting 0, then we were asked 1220 * to write past the end of file. 1221 * Finally, round down to a block boundary. */ 1222 amount = min(amount_left_to_req, mod_data.buflen); 1223 amount = min((loff_t) amount, curlun->file_length - 1224 usb_offset); 1225 partial_page = usb_offset & (PAGE_CACHE_SIZE - 1); 1226 if (partial_page > 0) 1227 amount = min(amount, 1228 (unsigned int) PAGE_CACHE_SIZE - partial_page); 1229 1230 if (amount == 0) { 1231 get_some_more = 0; 1232 curlun->sense_data = 1233 SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE; 1234 curlun->sense_data_info = usb_offset >> 9; 1235 curlun->info_valid = 1; 1236 continue; 1237 } 1238 amount -= (amount & 511); 1239 if (amount == 0) { 1240 1241 /* Why were we were asked to transfer a 1242 * partial block? */ 1243 get_some_more = 0; 1244 continue; 1245 } 1246 1247 /* Get the next buffer */ 1248 usb_offset += amount; 1249 fsg->usb_amount_left -= amount; 1250 amount_left_to_req -= amount; 1251 if (amount_left_to_req == 0) 1252 get_some_more = 0; 1253 1254 /* amount is always divisible by 512, hence by 1255 * the bulk-out maxpacket size */ 1256 bh->outreq->length = bh->bulk_out_intended_length = 1257 amount; 1258 bh->outreq->short_not_ok = 1; 1259 start_transfer(fsg, fsg->bulk_out, bh->outreq, 1260 &bh->outreq_busy, &bh->state); 1261 fsg->next_buffhd_to_fill = bh->next; 1262 continue; 1263 } 1264 1265 /* Write the received data to the backing file */ 1266 bh = fsg->next_buffhd_to_drain; 1267 if (bh->state == BUF_STATE_EMPTY && !get_some_more) 1268 break; // We stopped early 1269 if (bh->state == BUF_STATE_FULL) { 1270 smp_rmb(); 1271 fsg->next_buffhd_to_drain = bh->next; 1272 bh->state = BUF_STATE_EMPTY; 1273 1274 /* Did something go wrong with the transfer? */ 1275 if (bh->outreq->status != 0) { 1276 curlun->sense_data = SS_COMMUNICATION_FAILURE; 1277 curlun->sense_data_info = file_offset >> 9; 1278 curlun->info_valid = 1; 1279 break; 1280 } 1281 1282 amount = bh->outreq->actual; 1283 if (curlun->file_length - file_offset < amount) { 1284 LERROR(curlun, 1285 "write %u @ %llu beyond end %llu\n", 1286 amount, (unsigned long long) file_offset, 1287 (unsigned long long) curlun->file_length); 1288 amount = curlun->file_length - file_offset; 1289 } 1290 1291 /* Perform the write */ 1292 file_offset_tmp = file_offset; 1293 nwritten = vfs_write(curlun->filp, 1294 (char __user *) bh->buf, 1295 amount, &file_offset_tmp); 1296 VLDBG(curlun, "file write %u @ %llu -> %d\n", amount, 1297 (unsigned long long) file_offset, 1298 (int) nwritten); 1299 if (signal_pending(current)) 1300 return -EINTR; // Interrupted! 1301 1302 if (nwritten < 0) { 1303 LDBG(curlun, "error in file write: %d\n", 1304 (int) nwritten); 1305 nwritten = 0; 1306 } else if (nwritten < amount) { 1307 LDBG(curlun, "partial file write: %d/%u\n", 1308 (int) nwritten, amount); 1309 nwritten -= (nwritten & 511); 1310 // Round down to a block 1311 } 1312 file_offset += nwritten; 1313 amount_left_to_write -= nwritten; 1314 fsg->residue -= nwritten; 1315 1316 /* If an error occurred, report it and its position */ 1317 if (nwritten < amount) { 1318 curlun->sense_data = SS_WRITE_ERROR; 1319 curlun->sense_data_info = file_offset >> 9; 1320 curlun->info_valid = 1; 1321 break; 1322 } 1323 1324 /* Did the host decide to stop early? */ 1325 if (bh->outreq->actual != bh->outreq->length) { 1326 fsg->short_packet_received = 1; 1327 break; 1328 } 1329 continue; 1330 } 1331 1332 /* Wait for something to happen */ 1333 rc = sleep_thread(fsg); 1334 if (rc) 1335 return rc; 1336 } 1337 1338 return -EIO; // No default reply 1339} 1340 1341 1342/*-------------------------------------------------------------------------*/ 1343 1344static int do_synchronize_cache(struct fsg_dev *fsg) 1345{ 1346 struct fsg_lun *curlun = fsg->curlun; 1347 int rc; 1348 1349 /* We ignore the requested LBA and write out all file's 1350 * dirty data buffers. */ 1351 rc = fsg_lun_fsync_sub(curlun); 1352 if (rc) 1353 curlun->sense_data = SS_WRITE_ERROR; 1354 return 0; 1355} 1356 1357 1358/*-------------------------------------------------------------------------*/ 1359 1360static void invalidate_sub(struct fsg_lun *curlun) 1361{ 1362 struct file *filp = curlun->filp; 1363 struct inode *inode = filp->f_path.dentry->d_inode; 1364 unsigned long rc; 1365 1366 rc = invalidate_mapping_pages(inode->i_mapping, 0, -1); 1367 VLDBG(curlun, "invalidate_mapping_pages -> %ld\n", rc); 1368} 1369 1370static int do_verify(struct fsg_dev *fsg) 1371{ 1372 struct fsg_lun *curlun = fsg->curlun; 1373 u32 lba; 1374 u32 verification_length; 1375 struct fsg_buffhd *bh = fsg->next_buffhd_to_fill; 1376 loff_t file_offset, file_offset_tmp; 1377 u32 amount_left; 1378 unsigned int amount; 1379 ssize_t nread; 1380 1381 /* Get the starting Logical Block Address and check that it's 1382 * not too big */ 1383 lba = get_unaligned_be32(&fsg->cmnd[2]); 1384 if (lba >= curlun->num_sectors) { 1385 curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE; 1386 return -EINVAL; 1387 } 1388 1389 /* We allow DPO (Disable Page Out = don't save data in the 1390 * cache) but we don't implement it. */ 1391 if ((fsg->cmnd[1] & ~0x10) != 0) { 1392 curlun->sense_data = SS_INVALID_FIELD_IN_CDB; 1393 return -EINVAL; 1394 } 1395 1396 verification_length = get_unaligned_be16(&fsg->cmnd[7]); 1397 if (unlikely(verification_length == 0)) 1398 return -EIO; // No default reply 1399 1400 /* Prepare to carry out the file verify */ 1401 amount_left = verification_length << 9; 1402 file_offset = ((loff_t) lba) << 9; 1403 1404 /* Write out all the dirty buffers before invalidating them */ 1405 fsg_lun_fsync_sub(curlun); 1406 if (signal_pending(current)) 1407 return -EINTR; 1408 1409 invalidate_sub(curlun); 1410 if (signal_pending(current)) 1411 return -EINTR; 1412 1413 /* Just try to read the requested blocks */ 1414 while (amount_left > 0) { 1415 1416 /* Figure out how much we need to read: 1417 * Try to read the remaining amount, but not more than 1418 * the buffer size. 1419 * And don't try to read past the end of the file. 1420 * If this means reading 0 then we were asked to read 1421 * past the end of file. */ 1422 amount = min((unsigned int) amount_left, mod_data.buflen); 1423 amount = min((loff_t) amount, 1424 curlun->file_length - file_offset); 1425 if (amount == 0) { 1426 curlun->sense_data = 1427 SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE; 1428 curlun->sense_data_info = file_offset >> 9; 1429 curlun->info_valid = 1; 1430 break; 1431 } 1432 1433 /* Perform the read */ 1434 file_offset_tmp = file_offset; 1435 nread = vfs_read(curlun->filp, 1436 (char __user *) bh->buf, 1437 amount, &file_offset_tmp); 1438 VLDBG(curlun, "file read %u @ %llu -> %d\n", amount, 1439 (unsigned long long) file_offset, 1440 (int) nread); 1441 if (signal_pending(current)) 1442 return -EINTR; 1443 1444 if (nread < 0) { 1445 LDBG(curlun, "error in file verify: %d\n", 1446 (int) nread); 1447 nread = 0; 1448 } else if (nread < amount) { 1449 LDBG(curlun, "partial file verify: %d/%u\n", 1450 (int) nread, amount); 1451 nread -= (nread & 511); // Round down to a sector 1452 } 1453 if (nread == 0) { 1454 curlun->sense_data = SS_UNRECOVERED_READ_ERROR; 1455 curlun->sense_data_info = file_offset >> 9; 1456 curlun->info_valid = 1; 1457 break; 1458 } 1459 file_offset += nread; 1460 amount_left -= nread; 1461 } 1462 return 0; 1463} 1464 1465 1466/*-------------------------------------------------------------------------*/ 1467 1468static int do_inquiry(struct fsg_dev *fsg, struct fsg_buffhd *bh) 1469{ 1470 u8 *buf = (u8 *) bh->buf; 1471 1472 static char vendor_id[] = "Linux "; 1473 static char product_disk_id[] = "File-Stor Gadget"; 1474 static char product_cdrom_id[] = "File-CD Gadget "; 1475 1476 if (!fsg->curlun) { // Unsupported LUNs are okay 1477 fsg->bad_lun_okay = 1; 1478 memset(buf, 0, 36); 1479 buf[0] = 0x7f; // Unsupported, no device-type 1480 buf[4] = 31; // Additional length 1481 return 36; 1482 } 1483 1484 memset(buf, 0, 8); 1485 buf[0] = (mod_data.cdrom ? TYPE_CDROM : TYPE_DISK); 1486 if (mod_data.removable) 1487 buf[1] = 0x80; 1488 buf[2] = 2; // ANSI SCSI level 2 1489 buf[3] = 2; // SCSI-2 INQUIRY data format 1490 buf[4] = 31; // Additional length 1491 // No special options 1492 sprintf(buf + 8, "%-8s%-16s%04x", vendor_id, 1493 (mod_data.cdrom ? product_cdrom_id : 1494 product_disk_id), 1495 mod_data.release); 1496 return 36; 1497} 1498 1499 1500static int do_request_sense(struct fsg_dev *fsg, struct fsg_buffhd *bh) 1501{ 1502 struct fsg_lun *curlun = fsg->curlun; 1503 u8 *buf = (u8 *) bh->buf; 1504 u32 sd, sdinfo; 1505 int valid; 1506 1507 /* 1508 * From the SCSI-2 spec., section 7.9 (Unit attention condition): 1509 * 1510 * If a REQUEST SENSE command is received from an initiator 1511 * with a pending unit attention condition (before the target 1512 * generates the contingent allegiance condition), then the 1513 * target shall either: 1514 * a) report any pending sense data and preserve the unit 1515 * attention condition on the logical unit, or, 1516 * b) report the unit attention condition, may discard any 1517 * pending sense data, and clear the unit attention 1518 * condition on the logical unit for that initiator. 1519 * 1520 * FSG normally uses option a); enable this code to use option b). 1521 */ 1522 1523 if (!curlun) { // Unsupported LUNs are okay 1524 fsg->bad_lun_okay = 1; 1525 sd = SS_LOGICAL_UNIT_NOT_SUPPORTED; 1526 sdinfo = 0; 1527 valid = 0; 1528 } else { 1529 sd = curlun->sense_data; 1530 sdinfo = curlun->sense_data_info; 1531 valid = curlun->info_valid << 7; 1532 curlun->sense_data = SS_NO_SENSE; 1533 curlun->sense_data_info = 0; 1534 curlun->info_valid = 0; 1535 } 1536 1537 memset(buf, 0, 18); 1538 buf[0] = valid | 0x70; // Valid, current error 1539 buf[2] = SK(sd); 1540 put_unaligned_be32(sdinfo, &buf[3]); /* Sense information */ 1541 buf[7] = 18 - 8; // Additional sense length 1542 buf[12] = ASC(sd); 1543 buf[13] = ASCQ(sd); 1544 return 18; 1545} 1546 1547 1548static int do_read_capacity(struct fsg_dev *fsg, struct fsg_buffhd *bh) 1549{ 1550 struct fsg_lun *curlun = fsg->curlun; 1551 u32 lba = get_unaligned_be32(&fsg->cmnd[2]); 1552 int pmi = fsg->cmnd[8]; 1553 u8 *buf = (u8 *) bh->buf; 1554 1555 /* Check the PMI and LBA fields */ 1556 if (pmi > 1 || (pmi == 0 && lba != 0)) { 1557 curlun->sense_data = SS_INVALID_FIELD_IN_CDB; 1558 return -EINVAL; 1559 } 1560 1561 put_unaligned_be32(curlun->num_sectors - 1, &buf[0]); 1562 /* Max logical block */ 1563 put_unaligned_be32(512, &buf[4]); /* Block length */ 1564 return 8; 1565} 1566 1567 1568static int do_read_header(struct fsg_dev *fsg, struct fsg_buffhd *bh) 1569{ 1570 struct fsg_lun *curlun = fsg->curlun; 1571 int msf = fsg->cmnd[1] & 0x02; 1572 u32 lba = get_unaligned_be32(&fsg->cmnd[2]); 1573 u8 *buf = (u8 *) bh->buf; 1574 1575 if ((fsg->cmnd[1] & ~0x02) != 0) { /* Mask away MSF */ 1576 curlun->sense_data = SS_INVALID_FIELD_IN_CDB; 1577 return -EINVAL; 1578 } 1579 if (lba >= curlun->num_sectors) { 1580 curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE; 1581 return -EINVAL; 1582 } 1583 1584 memset(buf, 0, 8); 1585 buf[0] = 0x01; /* 2048 bytes of user data, rest is EC */ 1586 store_cdrom_address(&buf[4], msf, lba); 1587 return 8; 1588} 1589 1590 1591static int do_read_toc(struct fsg_dev *fsg, struct fsg_buffhd *bh) 1592{ 1593 struct fsg_lun *curlun = fsg->curlun; 1594 int msf = fsg->cmnd[1] & 0x02; 1595 int start_track = fsg->cmnd[6]; 1596 u8 *buf = (u8 *) bh->buf; 1597 1598 if ((fsg->cmnd[1] & ~0x02) != 0 || /* Mask away MSF */ 1599 start_track > 1) { 1600 curlun->sense_data = SS_INVALID_FIELD_IN_CDB; 1601 return -EINVAL; 1602 } 1603 1604 memset(buf, 0, 20); 1605 buf[1] = (20-2); /* TOC data length */ 1606 buf[2] = 1; /* First track number */ 1607 buf[3] = 1; /* Last track number */ 1608 buf[5] = 0x16; /* Data track, copying allowed */ 1609 buf[6] = 0x01; /* Only track is number 1 */ 1610 store_cdrom_address(&buf[8], msf, 0); 1611 1612 buf[13] = 0x16; /* Lead-out track is data */ 1613 buf[14] = 0xAA; /* Lead-out track number */ 1614 store_cdrom_address(&buf[16], msf, curlun->num_sectors); 1615 return 20; 1616} 1617 1618 1619static int do_mode_sense(struct fsg_dev *fsg, struct fsg_buffhd *bh) 1620{ 1621 struct fsg_lun *curlun = fsg->curlun; 1622 int mscmnd = fsg->cmnd[0]; 1623 u8 *buf = (u8 *) bh->buf; 1624 u8 *buf0 = buf; 1625 int pc, page_code; 1626 int changeable_values, all_pages; 1627 int valid_page = 0; 1628 int len, limit; 1629 1630 if ((fsg->cmnd[1] & ~0x08) != 0) { // Mask away DBD 1631 curlun->sense_data = SS_INVALID_FIELD_IN_CDB; 1632 return -EINVAL; 1633 } 1634 pc = fsg->cmnd[2] >> 6; 1635 page_code = fsg->cmnd[2] & 0x3f; 1636 if (pc == 3) { 1637 curlun->sense_data = SS_SAVING_PARAMETERS_NOT_SUPPORTED; 1638 return -EINVAL; 1639 } 1640 changeable_values = (pc == 1); 1641 all_pages = (page_code == 0x3f); 1642 1643 /* Write the mode parameter header. Fixed values are: default 1644 * medium type, no cache control (DPOFUA), and no block descriptors. 1645 * The only variable value is the WriteProtect bit. We will fill in 1646 * the mode data length later. */ 1647 memset(buf, 0, 8); 1648 if (mscmnd == SC_MODE_SENSE_6) { 1649 buf[2] = (curlun->ro ? 0x80 : 0x00); // WP, DPOFUA 1650 buf += 4; 1651 limit = 255; 1652 } else { // SC_MODE_SENSE_10 1653 buf[3] = (curlun->ro ? 0x80 : 0x00); // WP, DPOFUA 1654 buf += 8; 1655 limit = 65535; // Should really be mod_data.buflen 1656 } 1657 1658 /* No block descriptors */ 1659 1660 /* The mode pages, in numerical order. The only page we support 1661 * is the Caching page. */ 1662 if (page_code == 0x08 || all_pages) { 1663 valid_page = 1; 1664 buf[0] = 0x08; // Page code 1665 buf[1] = 10; // Page length 1666 memset(buf+2, 0, 10); // None of the fields are changeable 1667 1668 if (!changeable_values) { 1669 buf[2] = 0x04; // Write cache enable, 1670 // Read cache not disabled 1671 // No cache retention priorities 1672 put_unaligned_be16(0xffff, &buf[4]); 1673 /* Don't disable prefetch */ 1674 /* Minimum prefetch = 0 */ 1675 put_unaligned_be16(0xffff, &buf[8]); 1676 /* Maximum prefetch */ 1677 put_unaligned_be16(0xffff, &buf[10]); 1678 /* Maximum prefetch ceiling */ 1679 } 1680 buf += 12; 1681 } 1682 1683 /* Check that a valid page was requested and the mode data length 1684 * isn't too long. */ 1685 len = buf - buf0; 1686 if (!valid_page || len > limit) { 1687 curlun->sense_data = SS_INVALID_FIELD_IN_CDB; 1688 return -EINVAL; 1689 } 1690 1691 /* Store the mode data length */ 1692 if (mscmnd == SC_MODE_SENSE_6) 1693 buf0[0] = len - 1; 1694 else 1695 put_unaligned_be16(len - 2, buf0); 1696 return len; 1697} 1698 1699 1700static int do_start_stop(struct fsg_dev *fsg) 1701{ 1702 struct fsg_lun *curlun = fsg->curlun; 1703 int loej, start; 1704 1705 if (!mod_data.removable) { 1706 curlun->sense_data = SS_INVALID_COMMAND; 1707 return -EINVAL; 1708 } 1709 1710 // int immed = fsg->cmnd[1] & 0x01; 1711 loej = fsg->cmnd[4] & 0x02; 1712 start = fsg->cmnd[4] & 0x01; 1713 1714#ifdef CONFIG_USB_FILE_STORAGE_TEST 1715 if ((fsg->cmnd[1] & ~0x01) != 0 || // Mask away Immed 1716 (fsg->cmnd[4] & ~0x03) != 0) { // Mask LoEj, Start 1717 curlun->sense_data = SS_INVALID_FIELD_IN_CDB; 1718 return -EINVAL; 1719 } 1720 1721 if (!start) { 1722 1723 /* Are we allowed to unload the media? */ 1724 if (curlun->prevent_medium_removal) { 1725 LDBG(curlun, "unload attempt prevented\n"); 1726 curlun->sense_data = SS_MEDIUM_REMOVAL_PREVENTED; 1727 return -EINVAL; 1728 } 1729 if (loej) { // Simulate an unload/eject 1730 up_read(&fsg->filesem); 1731 down_write(&fsg->filesem); 1732 fsg_lun_close(curlun); 1733 up_write(&fsg->filesem); 1734 down_read(&fsg->filesem); 1735 } 1736 } else { 1737 1738 /* Our emulation doesn't support mounting; the medium is 1739 * available for use as soon as it is loaded. */ 1740 if (!fsg_lun_is_open(curlun)) { 1741 curlun->sense_data = SS_MEDIUM_NOT_PRESENT; 1742 return -EINVAL; 1743 } 1744 } 1745#endif 1746 return 0; 1747} 1748 1749 1750static int do_prevent_allow(struct fsg_dev *fsg) 1751{ 1752 struct fsg_lun *curlun = fsg->curlun; 1753 int prevent; 1754 1755 if (!mod_data.removable) { 1756 curlun->sense_data = SS_INVALID_COMMAND; 1757 return -EINVAL; 1758 } 1759 1760 prevent = fsg->cmnd[4] & 0x01; 1761 if ((fsg->cmnd[4] & ~0x01) != 0) { // Mask away Prevent 1762 curlun->sense_data = SS_INVALID_FIELD_IN_CDB; 1763 return -EINVAL; 1764 } 1765 1766 if (curlun->prevent_medium_removal && !prevent) 1767 fsg_lun_fsync_sub(curlun); 1768 curlun->prevent_medium_removal = prevent; 1769 return 0; 1770} 1771 1772 1773static int do_read_format_capacities(struct fsg_dev *fsg, 1774 struct fsg_buffhd *bh) 1775{ 1776 struct fsg_lun *curlun = fsg->curlun; 1777 u8 *buf = (u8 *) bh->buf; 1778 1779 buf[0] = buf[1] = buf[2] = 0; 1780 buf[3] = 8; // Only the Current/Maximum Capacity Descriptor 1781 buf += 4; 1782 1783 put_unaligned_be32(curlun->num_sectors, &buf[0]); 1784 /* Number of blocks */ 1785 put_unaligned_be32(512, &buf[4]); /* Block length */ 1786 buf[4] = 0x02; /* Current capacity */ 1787 return 12; 1788} 1789 1790 1791static int do_mode_select(struct fsg_dev *fsg, struct fsg_buffhd *bh) 1792{ 1793 struct fsg_lun *curlun = fsg->curlun; 1794 1795 /* We don't support MODE SELECT */ 1796 curlun->sense_data = SS_INVALID_COMMAND; 1797 return -EINVAL; 1798} 1799 1800 1801/*-------------------------------------------------------------------------*/ 1802 1803static int halt_bulk_in_endpoint(struct fsg_dev *fsg) 1804{ 1805 int rc; 1806 1807 rc = fsg_set_halt(fsg, fsg->bulk_in); 1808 if (rc == -EAGAIN) 1809 VDBG(fsg, "delayed bulk-in endpoint halt\n"); 1810 while (rc != 0) { 1811 if (rc != -EAGAIN) { 1812 WARNING(fsg, "usb_ep_set_halt -> %d\n", rc); 1813 rc = 0; 1814 break; 1815 } 1816 1817 /* Wait for a short time and then try again */ 1818 if (msleep_interruptible(100) != 0) 1819 return -EINTR; 1820 rc = usb_ep_set_halt(fsg->bulk_in); 1821 } 1822 return rc; 1823} 1824 1825static int wedge_bulk_in_endpoint(struct fsg_dev *fsg) 1826{ 1827 int rc; 1828 1829 DBG(fsg, "bulk-in set wedge\n"); 1830 rc = usb_ep_set_wedge(fsg->bulk_in); 1831 if (rc == -EAGAIN) 1832 VDBG(fsg, "delayed bulk-in endpoint wedge\n"); 1833 while (rc != 0) { 1834 if (rc != -EAGAIN) { 1835 WARNING(fsg, "usb_ep_set_wedge -> %d\n", rc); 1836 rc = 0; 1837 break; 1838 } 1839 1840 /* Wait for a short time and then try again */ 1841 if (msleep_interruptible(100) != 0) 1842 return -EINTR; 1843 rc = usb_ep_set_wedge(fsg->bulk_in); 1844 } 1845 return rc; 1846} 1847 1848static int pad_with_zeros(struct fsg_dev *fsg) 1849{ 1850 struct fsg_buffhd *bh = fsg->next_buffhd_to_fill; 1851 u32 nkeep = bh->inreq->length; 1852 u32 nsend; 1853 int rc; 1854 1855 bh->state = BUF_STATE_EMPTY; // For the first iteration 1856 fsg->usb_amount_left = nkeep + fsg->residue; 1857 while (fsg->usb_amount_left > 0) { 1858 1859 /* Wait for the next buffer to be free */ 1860 while (bh->state != BUF_STATE_EMPTY) { 1861 rc = sleep_thread(fsg); 1862 if (rc) 1863 return rc; 1864 } 1865 1866 nsend = min(fsg->usb_amount_left, (u32) mod_data.buflen); 1867 memset(bh->buf + nkeep, 0, nsend - nkeep); 1868 bh->inreq->length = nsend; 1869 bh->inreq->zero = 0; 1870 start_transfer(fsg, fsg->bulk_in, bh->inreq, 1871 &bh->inreq_busy, &bh->state); 1872 bh = fsg->next_buffhd_to_fill = bh->next; 1873 fsg->usb_amount_left -= nsend; 1874 nkeep = 0; 1875 } 1876 return 0; 1877} 1878 1879static int throw_away_data(struct fsg_dev *fsg) 1880{ 1881 struct fsg_buffhd *bh; 1882 u32 amount; 1883 int rc; 1884 1885 while ((bh = fsg->next_buffhd_to_drain)->state != BUF_STATE_EMPTY || 1886 fsg->usb_amount_left > 0) { 1887 1888 /* Throw away the data in a filled buffer */ 1889 if (bh->state == BUF_STATE_FULL) { 1890 smp_rmb(); 1891 bh->state = BUF_STATE_EMPTY; 1892 fsg->next_buffhd_to_drain = bh->next; 1893 1894 /* A short packet or an error ends everything */ 1895 if (bh->outreq->actual != bh->outreq->length || 1896 bh->outreq->status != 0) { 1897 raise_exception(fsg, FSG_STATE_ABORT_BULK_OUT); 1898 return -EINTR; 1899 } 1900 continue; 1901 } 1902 1903 /* Try to submit another request if we need one */ 1904 bh = fsg->next_buffhd_to_fill; 1905 if (bh->state == BUF_STATE_EMPTY && fsg->usb_amount_left > 0) { 1906 amount = min(fsg->usb_amount_left, 1907 (u32) mod_data.buflen); 1908 1909 /* amount is always divisible by 512, hence by 1910 * the bulk-out maxpacket size */ 1911 bh->outreq->length = bh->bulk_out_intended_length = 1912 amount; 1913 bh->outreq->short_not_ok = 1; 1914 start_transfer(fsg, fsg->bulk_out, bh->outreq, 1915 &bh->outreq_busy, &bh->state); 1916 fsg->next_buffhd_to_fill = bh->next; 1917 fsg->usb_amount_left -= amount; 1918 continue; 1919 } 1920 1921 /* Otherwise wait for something to happen */ 1922 rc = sleep_thread(fsg); 1923 if (rc) 1924 return rc; 1925 } 1926 return 0; 1927} 1928 1929 1930static int finish_reply(struct fsg_dev *fsg) 1931{ 1932 struct fsg_buffhd *bh = fsg->next_buffhd_to_fill; 1933 int rc = 0; 1934 1935 switch (fsg->data_dir) { 1936 case DATA_DIR_NONE: 1937 break; // Nothing to send 1938 1939 /* If we don't know whether the host wants to read or write, 1940 * this must be CB or CBI with an unknown command. We mustn't 1941 * try to send or receive any data. So stall both bulk pipes 1942 * if we can and wait for a reset. */ 1943 case DATA_DIR_UNKNOWN: 1944 if (mod_data.can_stall) { 1945 fsg_set_halt(fsg, fsg->bulk_out); 1946 rc = halt_bulk_in_endpoint(fsg); 1947 } 1948 break; 1949 1950 /* All but the last buffer of data must have already been sent */ 1951 case DATA_DIR_TO_HOST: 1952 if (fsg->data_size == 0) 1953 ; // Nothing to send 1954 1955 /* If there's no residue, simply send the last buffer */ 1956 else if (fsg->residue == 0) { 1957 bh->inreq->zero = 0; 1958 start_transfer(fsg, fsg->bulk_in, bh->inreq, 1959 &bh->inreq_busy, &bh->state); 1960 fsg->next_buffhd_to_fill = bh->next; 1961 } 1962 1963 /* There is a residue. For CB and CBI, simply mark the end 1964 * of the data with a short packet. However, if we are 1965 * allowed to stall, there was no data at all (residue == 1966 * data_size), and the command failed (invalid LUN or 1967 * sense data is set), then halt the bulk-in endpoint 1968 * instead. */ 1969 else if (!transport_is_bbb()) { 1970 if (mod_data.can_stall && 1971 fsg->residue == fsg->data_size && 1972 (!fsg->curlun || fsg->curlun->sense_data != SS_NO_SENSE)) { 1973 bh->state = BUF_STATE_EMPTY; 1974 rc = halt_bulk_in_endpoint(fsg); 1975 } else { 1976 bh->inreq->zero = 1; 1977 start_transfer(fsg, fsg->bulk_in, bh->inreq, 1978 &bh->inreq_busy, &bh->state); 1979 fsg->next_buffhd_to_fill = bh->next; 1980 } 1981 } 1982 1983 /* For Bulk-only, if we're allowed to stall then send the 1984 * short packet and halt the bulk-in endpoint. If we can't 1985 * stall, pad out the remaining data with 0's. */ 1986 else { 1987 if (mod_data.can_stall) { 1988 bh->inreq->zero = 1; 1989 start_transfer(fsg, fsg->bulk_in, bh->inreq, 1990 &bh->inreq_busy, &bh->state); 1991 fsg->next_buffhd_to_fill = bh->next; 1992 rc = halt_bulk_in_endpoint(fsg); 1993 } else 1994 rc = pad_with_zeros(fsg); 1995 } 1996 break; 1997 1998 /* We have processed all we want from the data the host has sent. 1999 * There may still be outstanding bulk-out requests. */ 2000 case DATA_DIR_FROM_HOST: 2001 if (fsg->residue == 0) 2002 ; // Nothing to receive 2003 2004 /* Did the host stop sending unexpectedly early? */ 2005 else if (fsg->short_packet_received) { 2006 raise_exception(fsg, FSG_STATE_ABORT_BULK_OUT); 2007 rc = -EINTR; 2008 } 2009 2010 /* We haven't processed all the incoming data. Even though 2011 * we may be allowed to stall, doing so would cause a race. 2012 * The controller may already have ACK'ed all the remaining 2013 * bulk-out packets, in which case the host wouldn't see a 2014 * STALL. Not realizing the endpoint was halted, it wouldn't 2015 * clear the halt -- leading to problems later on. */ 2016 2017 /* We can't stall. Read in the excess data and throw it 2018 * all away. */ 2019 else 2020 rc = throw_away_data(fsg); 2021 break; 2022 } 2023 return rc; 2024} 2025 2026 2027static int send_status(struct fsg_dev *fsg) 2028{ 2029 struct fsg_lun *curlun = fsg->curlun; 2030 struct fsg_buffhd *bh; 2031 int rc; 2032 u8 status = USB_STATUS_PASS; 2033 u32 sd, sdinfo = 0; 2034 2035 /* Wait for the next buffer to become available */ 2036 bh = fsg->next_buffhd_to_fill; 2037 while (bh->state != BUF_STATE_EMPTY) { 2038 rc = sleep_thread(fsg); 2039 if (rc) 2040 return rc; 2041 } 2042 2043 if (curlun) { 2044 sd = curlun->sense_data; 2045 sdinfo = curlun->sense_data_info; 2046 } else if (fsg->bad_lun_okay) 2047 sd = SS_NO_SENSE; 2048 else 2049 sd = SS_LOGICAL_UNIT_NOT_SUPPORTED; 2050 2051 if (fsg->phase_error) { 2052 DBG(fsg, "sending phase-error status\n"); 2053 status = USB_STATUS_PHASE_ERROR; 2054 sd = SS_INVALID_COMMAND; 2055 } else if (sd != SS_NO_SENSE) { 2056 DBG(fsg, "sending command-failure status\n"); 2057 status = USB_STATUS_FAIL; 2058 VDBG(fsg, " sense data: SK x%02x, ASC x%02x, ASCQ x%02x;" 2059 " info x%x\n", 2060 SK(sd), ASC(sd), ASCQ(sd), sdinfo); 2061 } 2062 2063 if (transport_is_bbb()) { 2064 struct bulk_cs_wrap *csw = bh->buf; 2065 2066 /* Store and send the Bulk-only CSW */ 2067 csw->Signature = cpu_to_le32(USB_BULK_CS_SIG); 2068 csw->Tag = fsg->tag; 2069 csw->Residue = cpu_to_le32(fsg->residue); 2070 csw->Status = status; 2071 2072 bh->inreq->length = USB_BULK_CS_WRAP_LEN; 2073 bh->inreq->zero = 0; 2074 start_transfer(fsg, fsg->bulk_in, bh->inreq, 2075 &bh->inreq_busy, &bh->state); 2076 2077 } else if (mod_data.transport_type == USB_PR_CB) { 2078 2079 /* Control-Bulk transport has no status phase! */ 2080 return 0; 2081 2082 } else { // USB_PR_CBI 2083 struct interrupt_data *buf = bh->buf; 2084 2085 /* Store and send the Interrupt data. UFI sends the ASC 2086 * and ASCQ bytes. Everything else sends a Type (which 2087 * is always 0) and the status Value. */ 2088 if (mod_data.protocol_type == USB_SC_UFI) { 2089 buf->bType = ASC(sd); 2090 buf->bValue = ASCQ(sd); 2091 } else { 2092 buf->bType = 0; 2093 buf->bValue = status; 2094 } 2095 fsg->intreq->length = CBI_INTERRUPT_DATA_LEN; 2096 2097 fsg->intr_buffhd = bh; // Point to the right buffhd 2098 fsg->intreq->buf = bh->inreq->buf; 2099 fsg->intreq->context = bh; 2100 start_transfer(fsg, fsg->intr_in, fsg->intreq, 2101 &fsg->intreq_busy, &bh->state); 2102 } 2103 2104 fsg->next_buffhd_to_fill = bh->next; 2105 return 0; 2106} 2107 2108 2109/*-------------------------------------------------------------------------*/ 2110 2111/* Check whether the command is properly formed and whether its data size 2112 * and direction agree with the values we already have. */ 2113static int check_command(struct fsg_dev *fsg, int cmnd_size, 2114 enum data_direction data_dir, unsigned int mask, 2115 int needs_medium, const char *name) 2116{ 2117 int i; 2118 int lun = fsg->cmnd[1] >> 5; 2119 static const char dirletter[4] = {'u', 'o', 'i', 'n'}; 2120 char hdlen[20]; 2121 struct fsg_lun *curlun; 2122 2123 /* Adjust the expected cmnd_size for protocol encapsulation padding. 2124 * Transparent SCSI doesn't pad. */ 2125 if (protocol_is_scsi()) 2126 ; 2127 2128 /* There's some disagreement as to whether RBC pads commands or not. 2129 * We'll play it safe and accept either form. */ 2130 else if (mod_data.protocol_type == USB_SC_RBC) { 2131 if (fsg->cmnd_size == 12) 2132 cmnd_size = 12; 2133 2134 /* All the other protocols pad to 12 bytes */ 2135 } else 2136 cmnd_size = 12; 2137 2138 hdlen[0] = 0; 2139 if (fsg->data_dir != DATA_DIR_UNKNOWN) 2140 sprintf(hdlen, ", H%c=%u", dirletter[(int) fsg->data_dir], 2141 fsg->data_size); 2142 VDBG(fsg, "SCSI command: %s; Dc=%d, D%c=%u; Hc=%d%s\n", 2143 name, cmnd_size, dirletter[(int) data_dir], 2144 fsg->data_size_from_cmnd, fsg->cmnd_size, hdlen); 2145 2146 /* We can't reply at all until we know the correct data direction 2147 * and size. */ 2148 if (fsg->data_size_from_cmnd == 0) 2149 data_dir = DATA_DIR_NONE; 2150 if (fsg->data_dir == DATA_DIR_UNKNOWN) { // CB or CBI 2151 fsg->data_dir = data_dir; 2152 fsg->data_size = fsg->data_size_from_cmnd; 2153 2154 } else { // Bulk-only 2155 if (fsg->data_size < fsg->data_size_from_cmnd) { 2156 2157 /* Host data size < Device data size is a phase error. 2158 * Carry out the command, but only transfer as much 2159 * as we are allowed. */ 2160 fsg->data_size_from_cmnd = fsg->data_size; 2161 fsg->phase_error = 1; 2162 } 2163 } 2164 fsg->residue = fsg->usb_amount_left = fsg->data_size; 2165 2166 /* Conflicting data directions is a phase error */ 2167 if (fsg->data_dir != data_dir && fsg->data_size_from_cmnd > 0) { 2168 fsg->phase_error = 1; 2169 return -EINVAL; 2170 } 2171 2172 /* Verify the length of the command itself */ 2173 if (cmnd_size != fsg->cmnd_size) { 2174 2175 if (cmnd_size <= fsg->cmnd_size) { 2176 DBG(fsg, "%s is buggy! Expected length %d " 2177 "but we got %d\n", name, 2178 cmnd_size, fsg->cmnd_size); 2179 cmnd_size = fsg->cmnd_size; 2180 } else { 2181 fsg->phase_error = 1; 2182 return -EINVAL; 2183 } 2184 } 2185 2186 /* Check that the LUN values are consistent */ 2187 if (transport_is_bbb()) { 2188 if (fsg->lun != lun) 2189 DBG(fsg, "using LUN %d from CBW, " 2190 "not LUN %d from CDB\n", 2191 fsg->lun, lun); 2192 } else 2193 fsg->lun = lun; // Use LUN from the command 2194 2195 /* Check the LUN */ 2196 if (fsg->lun >= 0 && fsg->lun < fsg->nluns) { 2197 fsg->curlun = curlun = &fsg->luns[fsg->lun]; 2198 if (fsg->cmnd[0] != SC_REQUEST_SENSE) { 2199 curlun->sense_data = SS_NO_SENSE; 2200 curlun->sense_data_info = 0; 2201 curlun->info_valid = 0; 2202 } 2203 } else { 2204 fsg->curlun = curlun = NULL; 2205 fsg->bad_lun_okay = 0; 2206 2207 /* INQUIRY and REQUEST SENSE commands are explicitly allowed 2208 * to use unsupported LUNs; all others may not. */ 2209 if (fsg->cmnd[0] != SC_INQUIRY && 2210 fsg->cmnd[0] != SC_REQUEST_SENSE) { 2211 DBG(fsg, "unsupported LUN %d\n", fsg->lun); 2212 return -EINVAL; 2213 } 2214 } 2215 2216 /* If a unit attention condition exists, only INQUIRY and 2217 * REQUEST SENSE commands are allowed; anything else must fail. */ 2218 if (curlun && curlun->unit_attention_data != SS_NO_SENSE && 2219 fsg->cmnd[0] != SC_INQUIRY && 2220 fsg->cmnd[0] != SC_REQUEST_SENSE) { 2221 curlun->sense_data = curlun->unit_attention_data; 2222 curlun->unit_attention_data = SS_NO_SENSE; 2223 return -EINVAL; 2224 } 2225 2226 /* Check that only command bytes listed in the mask are non-zero */ 2227 fsg->cmnd[1] &= 0x1f; // Mask away the LUN 2228 for (i = 1; i < cmnd_size; ++i) { 2229 if (fsg->cmnd[i] && !(mask & (1 << i))) { 2230 if (curlun) 2231 curlun->sense_data = SS_INVALID_FIELD_IN_CDB; 2232 return -EINVAL; 2233 } 2234 } 2235 2236 /* If the medium isn't mounted and the command needs to access 2237 * it, return an error. */ 2238 if (curlun && !fsg_lun_is_open(curlun) && needs_medium) { 2239 curlun->sense_data = SS_MEDIUM_NOT_PRESENT; 2240 return -EINVAL; 2241 } 2242 2243 return 0; 2244} 2245 2246 2247static int do_scsi_command(struct fsg_dev *fsg) 2248{ 2249 struct fsg_buffhd *bh; 2250 int rc; 2251 int reply = -EINVAL; 2252 int i; 2253 static char unknown[16]; 2254 2255 dump_cdb(fsg); 2256 2257 /* Wait for the next buffer to become available for data or status */ 2258 bh = fsg->next_buffhd_to_drain = fsg->next_buffhd_to_fill; 2259 while (bh->state != BUF_STATE_EMPTY) { 2260 rc = sleep_thread(fsg); 2261 if (rc) 2262 return rc; 2263 } 2264 fsg->phase_error = 0; 2265 fsg->short_packet_received = 0; 2266 2267 down_read(&fsg->filesem); // We're using the backing file 2268 switch (fsg->cmnd[0]) { 2269 2270 case SC_INQUIRY: 2271 fsg->data_size_from_cmnd = fsg->cmnd[4]; 2272 if ((reply = check_command(fsg, 6, DATA_DIR_TO_HOST, 2273 (1<<4), 0, 2274 "INQUIRY")) == 0) 2275 reply = do_inquiry(fsg, bh); 2276 break; 2277 2278 case SC_MODE_SELECT_6: 2279 fsg->data_size_from_cmnd = fsg->cmnd[4]; 2280 if ((reply = check_command(fsg, 6, DATA_DIR_FROM_HOST, 2281 (1<<1) | (1<<4), 0, 2282 "MODE SELECT(6)")) == 0) 2283 reply = do_mode_select(fsg, bh); 2284 break; 2285 2286 case SC_MODE_SELECT_10: 2287 fsg->data_size_from_cmnd = get_unaligned_be16(&fsg->cmnd[7]); 2288 if ((reply = check_command(fsg, 10, DATA_DIR_FROM_HOST, 2289 (1<<1) | (3<<7), 0, 2290 "MODE SELECT(10)")) == 0) 2291 reply = do_mode_select(fsg, bh); 2292 break; 2293 2294 case SC_MODE_SENSE_6: 2295 fsg->data_size_from_cmnd = fsg->cmnd[4]; 2296 if ((reply = check_command(fsg, 6, DATA_DIR_TO_HOST, 2297 (1<<1) | (1<<2) | (1<<4), 0, 2298 "MODE SENSE(6)")) == 0) 2299 reply = do_mode_sense(fsg, bh); 2300 break; 2301 2302 case SC_MODE_SENSE_10: 2303 fsg->data_size_from_cmnd = get_unaligned_be16(&fsg->cmnd[7]); 2304 if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST, 2305 (1<<1) | (1<<2) | (3<<7), 0, 2306 "MODE SENSE(10)")) == 0) 2307 reply = do_mode_sense(fsg, bh); 2308 break; 2309 2310 case SC_PREVENT_ALLOW_MEDIUM_REMOVAL: 2311 fsg->data_size_from_cmnd = 0; 2312 if ((reply = check_command(fsg, 6, DATA_DIR_NONE, 2313 (1<<4), 0, 2314 "PREVENT-ALLOW MEDIUM REMOVAL")) == 0) 2315 reply = do_prevent_allow(fsg); 2316 break; 2317 2318 case SC_READ_6: 2319 i = fsg->cmnd[4]; 2320 fsg->data_size_from_cmnd = (i == 0 ? 256 : i) << 9; 2321 if ((reply = check_command(fsg, 6, DATA_DIR_TO_HOST, 2322 (7<<1) | (1<<4), 1, 2323 "READ(6)")) == 0) 2324 reply = do_read(fsg); 2325 break; 2326 2327 case SC_READ_10: 2328 fsg->data_size_from_cmnd = 2329 get_unaligned_be16(&fsg->cmnd[7]) << 9; 2330 if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST, 2331 (1<<1) | (0xf<<2) | (3<<7), 1, 2332 "READ(10)")) == 0) 2333 reply = do_read(fsg); 2334 break; 2335 2336 case SC_READ_12: 2337 fsg->data_size_from_cmnd = 2338 get_unaligned_be32(&fsg->cmnd[6]) << 9; 2339 if ((reply = check_command(fsg, 12, DATA_DIR_TO_HOST, 2340 (1<<1) | (0xf<<2) | (0xf<<6), 1, 2341 "READ(12)")) == 0) 2342 reply = do_read(fsg); 2343 break; 2344 2345 case SC_READ_CAPACITY: 2346 fsg->data_size_from_cmnd = 8; 2347 if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST, 2348 (0xf<<2) | (1<<8), 1, 2349 "READ CAPACITY")) == 0) 2350 reply = do_read_capacity(fsg, bh); 2351 break; 2352 2353 case SC_READ_HEADER: 2354 if (!mod_data.cdrom) 2355 goto unknown_cmnd; 2356 fsg->data_size_from_cmnd = get_unaligned_be16(&fsg->cmnd[7]); 2357 if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST, 2358 (3<<7) | (0x1f<<1), 1, 2359 "READ HEADER")) == 0) 2360 reply = do_read_header(fsg, bh); 2361 break; 2362 2363 case SC_READ_TOC: 2364 if (!mod_data.cdrom) 2365 goto unknown_cmnd; 2366 fsg->data_size_from_cmnd = get_unaligned_be16(&fsg->cmnd[7]); 2367 if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST, 2368 (7<<6) | (1<<1), 1, 2369 "READ TOC")) == 0) 2370 reply = do_read_toc(fsg, bh); 2371 break; 2372 2373 case SC_READ_FORMAT_CAPACITIES: 2374 fsg->data_size_from_cmnd = get_unaligned_be16(&fsg->cmnd[7]); 2375 if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST, 2376 (3<<7), 1, 2377 "READ FORMAT CAPACITIES")) == 0) 2378 reply = do_read_format_capacities(fsg, bh); 2379 break; 2380 2381 case SC_REQUEST_SENSE: 2382 fsg->data_size_from_cmnd = fsg->cmnd[4]; 2383 if ((reply = check_command(fsg, 6, DATA_DIR_TO_HOST, 2384 (1<<4), 0, 2385 "REQUEST SENSE")) == 0) 2386 reply = do_request_sense(fsg, bh); 2387 break; 2388 2389 case SC_START_STOP_UNIT: 2390 fsg->data_size_from_cmnd = 0; 2391 if ((reply = check_command(fsg, 6, DATA_DIR_NONE, 2392 (1<<1) | (1<<4), 0, 2393 "START-STOP UNIT")) == 0) 2394 reply = do_start_stop(fsg); 2395 break; 2396 2397 case SC_SYNCHRONIZE_CACHE: 2398 fsg->data_size_from_cmnd = 0; 2399 if ((reply = check_command(fsg, 10, DATA_DIR_NONE, 2400 (0xf<<2) | (3<<7), 1, 2401 "SYNCHRONIZE CACHE")) == 0) 2402 reply = do_synchronize_cache(fsg); 2403 break; 2404 2405 case SC_TEST_UNIT_READY: 2406 fsg->data_size_from_cmnd = 0; 2407 reply = check_command(fsg, 6, DATA_DIR_NONE, 2408 0, 1, 2409 "TEST UNIT READY"); 2410 break; 2411 2412 /* Although optional, this command is used by MS-Windows. We 2413 * support a minimal version: BytChk must be 0. */ 2414 case SC_VERIFY: 2415 fsg->data_size_from_cmnd = 0; 2416 if ((reply = check_command(fsg, 10, DATA_DIR_NONE, 2417 (1<<1) | (0xf<<2) | (3<<7), 1, 2418 "VERIFY")) == 0) 2419 reply = do_verify(fsg); 2420 break; 2421 2422 case SC_WRITE_6: 2423 i = fsg->cmnd[4]; 2424 fsg->data_size_from_cmnd = (i == 0 ? 256 : i) << 9; 2425 if ((reply = check_command(fsg, 6, DATA_DIR_FROM_HOST, 2426 (7<<1) | (1<<4), 1, 2427 "WRITE(6)")) == 0) 2428 reply = do_write(fsg); 2429 break; 2430 2431 case SC_WRITE_10: 2432 fsg->data_size_from_cmnd = 2433 get_unaligned_be16(&fsg->cmnd[7]) << 9; 2434 if ((reply = check_command(fsg, 10, DATA_DIR_FROM_HOST, 2435 (1<<1) | (0xf<<2) | (3<<7), 1, 2436 "WRITE(10)")) == 0) 2437 reply = do_write(fsg); 2438 break; 2439 2440 case SC_WRITE_12: 2441 fsg->data_size_from_cmnd = 2442 get_unaligned_be32(&fsg->cmnd[6]) << 9; 2443 if ((reply = check_command(fsg, 12, DATA_DIR_FROM_HOST, 2444 (1<<1) | (0xf<<2) | (0xf<<6), 1, 2445 "WRITE(12)")) == 0) 2446 reply = do_write(fsg); 2447 break; 2448 2449 /* Some mandatory commands that we recognize but don't implement. 2450 * They don't mean much in this setting. It's left as an exercise 2451 * for anyone interested to implement RESERVE and RELEASE in terms 2452 * of Posix locks. */ 2453 case SC_FORMAT_UNIT: 2454 case SC_RELEASE: 2455 case SC_RESERVE: 2456 case SC_SEND_DIAGNOSTIC: 2457 // Fall through 2458 2459 default: 2460 unknown_cmnd: 2461 fsg->data_size_from_cmnd = 0; 2462 sprintf(unknown, "Unknown x%02x", fsg->cmnd[0]); 2463 if ((reply = check_command(fsg, fsg->cmnd_size, 2464 DATA_DIR_UNKNOWN, 0xff, 0, unknown)) == 0) { 2465 fsg->curlun->sense_data = SS_INVALID_COMMAND; 2466 reply = -EINVAL; 2467 } 2468 break; 2469 } 2470 up_read(&fsg->filesem); 2471 2472 if (reply == -EINTR || signal_pending(current)) 2473 return -EINTR; 2474 2475 /* Set up the single reply buffer for finish_reply() */ 2476 if (reply == -EINVAL) 2477 reply = 0; // Error reply length 2478 if (reply >= 0 && fsg->data_dir == DATA_DIR_TO_HOST) { 2479 reply = min((u32) reply, fsg->data_size_from_cmnd); 2480 bh->inreq->length = reply; 2481 bh->state = BUF_STATE_FULL; 2482 fsg->residue -= reply; 2483 } // Otherwise it's already set 2484 2485 return 0; 2486} 2487 2488 2489/*-------------------------------------------------------------------------*/ 2490 2491static int received_cbw(struct fsg_dev *fsg, struct fsg_buffhd *bh) 2492{ 2493 struct usb_request *req = bh->outreq; 2494 struct fsg_bulk_cb_wrap *cbw = req->buf; 2495 2496 /* Was this a real packet? Should it be ignored? */ 2497 if (req->status || test_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags)) 2498 return -EINVAL; 2499 2500 /* Is the CBW valid? */ 2501 if (req->actual != USB_BULK_CB_WRAP_LEN || 2502 cbw->Signature != cpu_to_le32( 2503 USB_BULK_CB_SIG)) { 2504 DBG(fsg, "invalid CBW: len %u sig 0x%x\n", 2505 req->actual, 2506 le32_to_cpu(cbw->Signature)); 2507 2508 /* The Bulk-only spec says we MUST stall the IN endpoint 2509 * (6.6.1), so it's unavoidable. It also says we must 2510 * retain this state until the next reset, but there's 2511 * no way to tell the controller driver it should ignore 2512 * Clear-Feature(HALT) requests. 2513 * 2514 * We aren't required to halt the OUT endpoint; instead 2515 * we can simply accept and discard any data received 2516 * until the next reset. */ 2517 wedge_bulk_in_endpoint(fsg); 2518 set_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags); 2519 return -EINVAL; 2520 } 2521 2522 /* Is the CBW meaningful? */ 2523 if (cbw->Lun >= FSG_MAX_LUNS || cbw->Flags & ~USB_BULK_IN_FLAG || 2524 cbw->Length <= 0 || cbw->Length > MAX_COMMAND_SIZE) { 2525 DBG(fsg, "non-meaningful CBW: lun = %u, flags = 0x%x, " 2526 "cmdlen %u\n", 2527 cbw->Lun, cbw->Flags, cbw->Length); 2528 2529 /* We can do anything we want here, so let's stall the 2530 * bulk pipes if we are allowed to. */ 2531 if (mod_data.can_stall) { 2532 fsg_set_halt(fsg, fsg->bulk_out); 2533 halt_bulk_in_endpoint(fsg); 2534 } 2535 return -EINVAL; 2536 } 2537 2538 /* Save the command for later */ 2539 fsg->cmnd_size = cbw->Length; 2540 memcpy(fsg->cmnd, cbw->CDB, fsg->cmnd_size); 2541 if (cbw->Flags & USB_BULK_IN_FLAG) 2542 fsg->data_dir = DATA_DIR_TO_HOST; 2543 else 2544 fsg->data_dir = DATA_DIR_FROM_HOST; 2545 fsg->data_size = le32_to_cpu(cbw->DataTransferLength); 2546 if (fsg->data_size == 0) 2547 fsg->data_dir = DATA_DIR_NONE; 2548 fsg->lun = cbw->Lun; 2549 fsg->tag = cbw->Tag; 2550 return 0; 2551} 2552 2553 2554static int get_next_command(struct fsg_dev *fsg) 2555{ 2556 struct fsg_buffhd *bh; 2557 int rc = 0; 2558 2559 if (transport_is_bbb()) { 2560 2561 /* Wait for the next buffer to become available */ 2562 bh = fsg->next_buffhd_to_fill; 2563 while (bh->state != BUF_STATE_EMPTY) { 2564 rc = sleep_thread(fsg); 2565 if (rc) 2566 return rc; 2567 } 2568 2569 /* Queue a request to read a Bulk-only CBW */ 2570 set_bulk_out_req_length(fsg, bh, USB_BULK_CB_WRAP_LEN); 2571 bh->outreq->short_not_ok = 1; 2572 start_transfer(fsg, fsg->bulk_out, bh->outreq, 2573 &bh->outreq_busy, &bh->state); 2574 2575 /* We will drain the buffer in software, which means we 2576 * can reuse it for the next filling. No need to advance 2577 * next_buffhd_to_fill. */ 2578 2579 /* Wait for the CBW to arrive */ 2580 while (bh->state != BUF_STATE_FULL) { 2581 rc = sleep_thread(fsg); 2582 if (rc) 2583 return rc; 2584 } 2585 smp_rmb(); 2586 rc = received_cbw(fsg, bh); 2587 bh->state = BUF_STATE_EMPTY; 2588 2589 } else { // USB_PR_CB or USB_PR_CBI 2590 2591 /* Wait for the next command to arrive */ 2592 while (fsg->cbbuf_cmnd_size == 0) { 2593 rc = sleep_thread(fsg); 2594 if (rc) 2595 return rc; 2596 } 2597 2598 /* Is the previous status interrupt request still busy? 2599 * The host is allowed to skip reading the status, 2600 * so we must cancel it. */ 2601 if (fsg->intreq_busy) 2602 usb_ep_dequeue(fsg->intr_in, fsg->intreq); 2603 2604 /* Copy the command and mark the buffer empty */ 2605 fsg->data_dir = DATA_DIR_UNKNOWN; 2606 spin_lock_irq(&fsg->lock); 2607 fsg->cmnd_size = fsg->cbbuf_cmnd_size; 2608 memcpy(fsg->cmnd, fsg->cbbuf_cmnd, fsg->cmnd_size); 2609 fsg->cbbuf_cmnd_size = 0; 2610 spin_unlock_irq(&fsg->lock); 2611 } 2612 return rc; 2613} 2614 2615 2616/*-------------------------------------------------------------------------*/ 2617 2618static int enable_endpoint(struct fsg_dev *fsg, struct usb_ep *ep, 2619 const struct usb_endpoint_descriptor *d) 2620{ 2621 int rc; 2622 2623 ep->driver_data = fsg; 2624 rc = usb_ep_enable(ep, d); 2625 if (rc) 2626 ERROR(fsg, "can't enable %s, result %d\n", ep->name, rc); 2627 return rc; 2628} 2629 2630static int alloc_request(struct fsg_dev *fsg, struct usb_ep *ep, 2631 struct usb_request **preq) 2632{ 2633 *preq = usb_ep_alloc_request(ep, GFP_ATOMIC); 2634 if (*preq) 2635 return 0; 2636 ERROR(fsg, "can't allocate request for %s\n", ep->name); 2637 return -ENOMEM; 2638} 2639 2640/* 2641 * Reset interface setting and re-init endpoint state (toggle etc). 2642 * Call with altsetting < 0 to disable the interface. The only other 2643 * available altsetting is 0, which enables the interface. 2644 */ 2645static int do_set_interface(struct fsg_dev *fsg, int altsetting) 2646{ 2647 int rc = 0; 2648 int i; 2649 const struct usb_endpoint_descriptor *d; 2650 2651 if (fsg->running) 2652 DBG(fsg, "reset interface\n"); 2653 2654reset: 2655 /* Deallocate the requests */ 2656 for (i = 0; i < FSG_NUM_BUFFERS; ++i) { 2657 struct fsg_buffhd *bh = &fsg->buffhds[i]; 2658 2659 if (bh->inreq) { 2660 usb_ep_free_request(fsg->bulk_in, bh->inreq); 2661 bh->inreq = NULL; 2662 } 2663 if (bh->outreq) { 2664 usb_ep_free_request(fsg->bulk_out, bh->outreq); 2665 bh->outreq = NULL; 2666 } 2667 } 2668 if (fsg->intreq) { 2669 usb_ep_free_request(fsg->intr_in, fsg->intreq); 2670 fsg->intreq = NULL; 2671 } 2672 2673 /* Disable the endpoints */ 2674 if (fsg->bulk_in_enabled) { 2675 usb_ep_disable(fsg->bulk_in); 2676 fsg->bulk_in_enabled = 0; 2677 } 2678 if (fsg->bulk_out_enabled) { 2679 usb_ep_disable(fsg->bulk_out); 2680 fsg->bulk_out_enabled = 0; 2681 } 2682 if (fsg->intr_in_enabled) { 2683 usb_ep_disable(fsg->intr_in); 2684 fsg->intr_in_enabled = 0; 2685 } 2686 2687 fsg->running = 0; 2688 if (altsetting < 0 || rc != 0) 2689 return rc; 2690 2691 DBG(fsg, "set interface %d\n", altsetting); 2692 2693 /* Enable the endpoints */ 2694 d = fsg_ep_desc(fsg->gadget, 2695 &fsg_fs_bulk_in_desc, &fsg_hs_bulk_in_desc); 2696 if ((rc = enable_endpoint(fsg, fsg->bulk_in, d)) != 0) 2697 goto reset; 2698 fsg->bulk_in_enabled = 1; 2699 2700 d = fsg_ep_desc(fsg->gadget, 2701 &fsg_fs_bulk_out_desc, &fsg_hs_bulk_out_desc); 2702 if ((rc = enable_endpoint(fsg, fsg->bulk_out, d)) != 0) 2703 goto reset; 2704 fsg->bulk_out_enabled = 1; 2705 fsg->bulk_out_maxpacket = le16_to_cpu(d->wMaxPacketSize); 2706 clear_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags); 2707 2708 if (transport_is_cbi()) { 2709 d = fsg_ep_desc(fsg->gadget, 2710 &fsg_fs_intr_in_desc, &fsg_hs_intr_in_desc); 2711 if ((rc = enable_endpoint(fsg, fsg->intr_in, d)) != 0) 2712 goto reset; 2713 fsg->intr_in_enabled = 1; 2714 } 2715 2716 /* Allocate the requests */ 2717 for (i = 0; i < FSG_NUM_BUFFERS; ++i) { 2718 struct fsg_buffhd *bh = &fsg->buffhds[i]; 2719 2720 if ((rc = alloc_request(fsg, fsg->bulk_in, &bh->inreq)) != 0) 2721 goto reset; 2722 if ((rc = alloc_request(fsg, fsg->bulk_out, &bh->outreq)) != 0) 2723 goto reset; 2724 bh->inreq->buf = bh->outreq->buf = bh->buf; 2725 bh->inreq->context = bh->outreq->context = bh; 2726 bh->inreq->complete = bulk_in_complete; 2727 bh->outreq->complete = bulk_out_complete; 2728 } 2729 if (transport_is_cbi()) { 2730 if ((rc = alloc_request(fsg, fsg->intr_in, &fsg->intreq)) != 0) 2731 goto reset; 2732 fsg->intreq->complete = intr_in_complete; 2733 } 2734 2735 fsg->running = 1; 2736 for (i = 0; i < fsg->nluns; ++i) 2737 fsg->luns[i].unit_attention_data = SS_RESET_OCCURRED; 2738 return rc; 2739} 2740 2741 2742/* 2743 * Change our operational configuration. This code must agree with the code 2744 * that returns config descriptors, and with interface altsetting code. 2745 * 2746 * It's also responsible for power management interactions. Some 2747 * configurations might not work with our current power sources. 2748 * For now we just assume the gadget is always self-powered. 2749 */ 2750static int do_set_config(struct fsg_dev *fsg, u8 new_config) 2751{ 2752 int rc = 0; 2753 2754 /* Disable the single interface */ 2755 if (fsg->config != 0) { 2756 DBG(fsg, "reset config\n"); 2757 fsg->config = 0; 2758 rc = do_set_interface(fsg, -1); 2759 } 2760 2761 /* Enable the interface */ 2762 if (new_config != 0) { 2763 fsg->config = new_config; 2764 if ((rc = do_set_interface(fsg, 0)) != 0) 2765 fsg->config = 0; // Reset on errors 2766 else { 2767 char *speed; 2768 2769 switch (fsg->gadget->speed) { 2770 case USB_SPEED_LOW: speed = "low"; break; 2771 case USB_SPEED_FULL: speed = "full"; break; 2772 case USB_SPEED_HIGH: speed = "high"; break; 2773 default: speed = "?"; break; 2774 } 2775 INFO(fsg, "%s speed config #%d\n", speed, fsg->config); 2776 } 2777 } 2778 return rc; 2779} 2780 2781 2782/*-------------------------------------------------------------------------*/ 2783 2784static void handle_exception(struct fsg_dev *fsg) 2785{ 2786 siginfo_t info; 2787 int sig; 2788 int i; 2789 int num_active; 2790 struct fsg_buffhd *bh; 2791 enum fsg_state old_state; 2792 u8 new_config; 2793 struct fsg_lun *curlun; 2794 unsigned int exception_req_tag; 2795 int rc; 2796 2797 /* Clear the existing signals. Anything but SIGUSR1 is converted 2798 * into a high-priority EXIT exception. */ 2799 for (;;) { 2800 sig = dequeue_signal_lock(current, ¤t->blocked, &info); 2801 if (!sig) 2802 break; 2803 if (sig != SIGUSR1) { 2804 if (fsg->state < FSG_STATE_EXIT) 2805 DBG(fsg, "Main thread exiting on signal\n"); 2806 raise_exception(fsg, FSG_STATE_EXIT); 2807 } 2808 } 2809 2810 /* Cancel all the pending transfers */ 2811 if (fsg->intreq_busy) 2812 usb_ep_dequeue(fsg->intr_in, fsg->intreq); 2813 for (i = 0; i < FSG_NUM_BUFFERS; ++i) { 2814 bh = &fsg->buffhds[i]; 2815 if (bh->inreq_busy) 2816 usb_ep_dequeue(fsg->bulk_in, bh->inreq); 2817 if (bh->outreq_busy) 2818 usb_ep_dequeue(fsg->bulk_out, bh->outreq); 2819 } 2820 2821 /* Wait until everything is idle */ 2822 for (;;) { 2823 num_active = fsg->intreq_busy; 2824 for (i = 0; i < FSG_NUM_BUFFERS; ++i) { 2825 bh = &fsg->buffhds[i]; 2826 num_active += bh->inreq_busy + bh->outreq_busy; 2827 } 2828 if (num_active == 0) 2829 break; 2830 if (sleep_thread(fsg)) 2831 return; 2832 } 2833 2834 /* Clear out the controller's fifos */ 2835 if (fsg->bulk_in_enabled) 2836 usb_ep_fifo_flush(fsg->bulk_in); 2837 if (fsg->bulk_out_enabled) 2838 usb_ep_fifo_flush(fsg->bulk_out); 2839 if (fsg->intr_in_enabled) 2840 usb_ep_fifo_flush(fsg->intr_in); 2841 2842 /* Reset the I/O buffer states and pointers, the SCSI 2843 * state, and the exception. Then invoke the handler. */ 2844 spin_lock_irq(&fsg->lock); 2845 2846 for (i = 0; i < FSG_NUM_BUFFERS; ++i) { 2847 bh = &fsg->buffhds[i]; 2848 bh->state = BUF_STATE_EMPTY; 2849 } 2850 fsg->next_buffhd_to_fill = fsg->next_buffhd_to_drain = 2851 &fsg->buffhds[0]; 2852 2853 exception_req_tag = fsg->exception_req_tag; 2854 new_config = fsg->new_config; 2855 old_state = fsg->state; 2856 2857 if (old_state == FSG_STATE_ABORT_BULK_OUT) 2858 fsg->state = FSG_STATE_STATUS_PHASE; 2859 else { 2860 for (i = 0; i < fsg->nluns; ++i) { 2861 curlun = &fsg->luns[i]; 2862 curlun->prevent_medium_removal = 0; 2863 curlun->sense_data = curlun->unit_attention_data = 2864 SS_NO_SENSE; 2865 curlun->sense_data_info = 0; 2866 curlun->info_valid = 0; 2867 } 2868 fsg->state = FSG_STATE_IDLE; 2869 } 2870 spin_unlock_irq(&fsg->lock); 2871 2872 /* Carry out any extra actions required for the exception */ 2873 switch (old_state) { 2874 default: 2875 break; 2876 2877 case FSG_STATE_ABORT_BULK_OUT: 2878 send_status(fsg); 2879 spin_lock_irq(&fsg->lock); 2880 if (fsg->state == FSG_STATE_STATUS_PHASE) 2881 fsg->state = FSG_STATE_IDLE; 2882 spin_unlock_irq(&fsg->lock); 2883 break; 2884 2885 case FSG_STATE_RESET: 2886 /* In case we were forced against our will to halt a 2887 * bulk endpoint, clear the halt now. (The SuperH UDC 2888 * requires this.) */ 2889 if (test_and_clear_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags)) 2890 usb_ep_clear_halt(fsg->bulk_in); 2891 2892 if (transport_is_bbb()) { 2893 if (fsg->ep0_req_tag == exception_req_tag) 2894 ep0_queue(fsg); // Complete the status stage 2895 2896 } else if (transport_is_cbi()) 2897 send_status(fsg); // Status by interrupt pipe 2898 2899 /* Technically this should go here, but it would only be 2900 * a waste of time. Ditto for the INTERFACE_CHANGE and 2901 * CONFIG_CHANGE cases. */ 2902 // for (i = 0; i < fsg->nluns; ++i) 2903 // fsg->luns[i].unit_attention_data = SS_RESET_OCCURRED; 2904 break; 2905 2906 case FSG_STATE_INTERFACE_CHANGE: 2907 rc = do_set_interface(fsg, 0); 2908 if (fsg->ep0_req_tag != exception_req_tag) 2909 break; 2910 if (rc != 0) // STALL on errors 2911 fsg_set_halt(fsg, fsg->ep0); 2912 else // Complete the status stage 2913 ep0_queue(fsg); 2914 break; 2915 2916 case FSG_STATE_CONFIG_CHANGE: 2917 rc = do_set_config(fsg, new_config); 2918 if (fsg->ep0_req_tag != exception_req_tag) 2919 break; 2920 if (rc != 0) // STALL on errors 2921 fsg_set_halt(fsg, fsg->ep0); 2922 else // Complete the status stage 2923 ep0_queue(fsg); 2924 break; 2925 2926 case FSG_STATE_DISCONNECT: 2927 for (i = 0; i < fsg->nluns; ++i) 2928 fsg_lun_fsync_sub(fsg->luns + i); 2929 do_set_config(fsg, 0); // Unconfigured state 2930 break; 2931 2932 case FSG_STATE_EXIT: 2933 case FSG_STATE_TERMINATED: 2934 do_set_config(fsg, 0); // Free resources 2935 spin_lock_irq(&fsg->lock); 2936 fsg->state = FSG_STATE_TERMINATED; // Stop the thread 2937 spin_unlock_irq(&fsg->lock); 2938 break; 2939 } 2940} 2941 2942 2943/*-------------------------------------------------------------------------*/ 2944 2945static int fsg_main_thread(void *fsg_) 2946{ 2947 struct fsg_dev *fsg = fsg_; 2948 2949 /* Allow the thread to be killed by a signal, but set the signal mask 2950 * to block everything but INT, TERM, KILL, and USR1. */ 2951 allow_signal(SIGINT); 2952 allow_signal(SIGTERM); 2953 allow_signal(SIGKILL); 2954 allow_signal(SIGUSR1); 2955 2956 /* Allow the thread to be frozen */ 2957 set_freezable(); 2958 2959 /* Arrange for userspace references to be interpreted as kernel 2960 * pointers. That way we can pass a kernel pointer to a routine 2961 * that expects a __user pointer and it will work okay. */ 2962 set_fs(get_ds()); 2963 2964 /* The main loop */ 2965 while (fsg->state != FSG_STATE_TERMINATED) { 2966 if (exception_in_progress(fsg) || signal_pending(current)) { 2967 handle_exception(fsg); 2968 continue; 2969 } 2970 2971 if (!fsg->running) { 2972 sleep_thread(fsg); 2973 continue; 2974 } 2975 2976 if (get_next_command(fsg)) 2977 continue; 2978 2979 spin_lock_irq(&fsg->lock); 2980 if (!exception_in_progress(fsg)) 2981 fsg->state = FSG_STATE_DATA_PHASE; 2982 spin_unlock_irq(&fsg->lock); 2983 2984 if (do_scsi_command(fsg) || finish_reply(fsg)) 2985 continue; 2986 2987 spin_lock_irq(&fsg->lock); 2988 if (!exception_in_progress(fsg)) 2989 fsg->state = FSG_STATE_STATUS_PHASE; 2990 spin_unlock_irq(&fsg->lock); 2991 2992 if (send_status(fsg)) 2993 continue; 2994 2995 spin_lock_irq(&fsg->lock); 2996 if (!exception_in_progress(fsg)) 2997 fsg->state = FSG_STATE_IDLE; 2998 spin_unlock_irq(&fsg->lock); 2999 } 3000 3001 spin_lock_irq(&fsg->lock); 3002 fsg->thread_task = NULL; 3003 spin_unlock_irq(&fsg->lock); 3004 3005 /* If we are exiting because of a signal, unregister the 3006 * gadget driver. */ 3007 if (test_and_clear_bit(REGISTERED, &fsg->atomic_bitflags)) 3008 usb_gadget_unregister_driver(&fsg_driver); 3009 3010 /* Let the unbind and cleanup routines know the thread has exited */ 3011 complete_and_exit(&fsg->thread_notifier, 0); 3012} 3013 3014 3015/*-------------------------------------------------------------------------*/ 3016 3017 3018/* The write permissions and store_xxx pointers are set in fsg_bind() */ 3019static DEVICE_ATTR(ro, 0444, fsg_show_ro, NULL); 3020static DEVICE_ATTR(nofua, 0644, fsg_show_nofua, NULL); 3021static DEVICE_ATTR(file, 0444, fsg_show_file, NULL); 3022 3023 3024/*-------------------------------------------------------------------------*/ 3025 3026static void fsg_release(struct kref *ref) 3027{ 3028 struct fsg_dev *fsg = container_of(ref, struct fsg_dev, ref); 3029 3030 kfree(fsg->luns); 3031 kfree(fsg); 3032} 3033 3034static void lun_release(struct device *dev) 3035{ 3036 struct rw_semaphore *filesem = dev_get_drvdata(dev); 3037 struct fsg_dev *fsg = 3038 container_of(filesem, struct fsg_dev, filesem); 3039 3040 kref_put(&fsg->ref, fsg_release); 3041} 3042 3043static void /* __init_or_exit */ fsg_unbind(struct usb_gadget *gadget) 3044{ 3045 struct fsg_dev *fsg = get_gadget_data(gadget); 3046 int i; 3047 struct fsg_lun *curlun; 3048 struct usb_request *req = fsg->ep0req; 3049 3050 DBG(fsg, "unbind\n"); 3051 clear_bit(REGISTERED, &fsg->atomic_bitflags); 3052 3053 /* Unregister the sysfs attribute files and the LUNs */ 3054 for (i = 0; i < fsg->nluns; ++i) { 3055 curlun = &fsg->luns[i]; 3056 if (curlun->registered) { 3057 device_remove_file(&curlun->dev, &dev_attr_ro); 3058 device_remove_file(&curlun->dev, &dev_attr_file); 3059 fsg_lun_close(curlun); 3060 device_unregister(&curlun->dev); 3061 curlun->registered = 0; 3062 } 3063 } 3064 3065 /* If the thread isn't already dead, tell it to exit now */ 3066 if (fsg->state != FSG_STATE_TERMINATED) { 3067 raise_exception(fsg, FSG_STATE_EXIT); 3068 wait_for_completion(&fsg->thread_notifier); 3069 3070 /* The cleanup routine waits for this completion also */ 3071 complete(&fsg->thread_notifier); 3072 } 3073 3074 /* Free the data buffers */ 3075 for (i = 0; i < FSG_NUM_BUFFERS; ++i) 3076 kfree(fsg->buffhds[i].buf); 3077 3078 /* Free the request and buffer for endpoint 0 */ 3079 if (req) { 3080 kfree(req->buf); 3081 usb_ep_free_request(fsg->ep0, req); 3082 } 3083 3084 set_gadget_data(gadget, NULL); 3085} 3086 3087 3088static int __init check_parameters(struct fsg_dev *fsg) 3089{ 3090 int prot; 3091 int gcnum; 3092 int i; 3093 3094 /* Store the default values */ 3095 mod_data.transport_type = USB_PR_BULK; 3096 mod_data.transport_name = "Bulk-only"; 3097 mod_data.protocol_type = USB_SC_SCSI; 3098 mod_data.protocol_name = "Transparent SCSI"; 3099 3100 /* Some peripheral controllers are known not to be able to 3101 * halt bulk endpoints correctly. If one of them is present, 3102 * disable stalls. 3103 */ 3104 if (gadget_is_at91(fsg->gadget)) 3105 mod_data.can_stall = 0; 3106 3107 if (mod_data.release == 0xffff) { // Parameter wasn't set 3108 gcnum = usb_gadget_controller_number(fsg->gadget); 3109 if (gcnum >= 0) 3110 mod_data.release = 0x0300 + gcnum; 3111 else { 3112 WARNING(fsg, "controller '%s' not recognized\n", 3113 fsg->gadget->name); 3114 mod_data.release = 0x0399; 3115 } 3116 } 3117 3118 prot = simple_strtol(mod_data.protocol_parm, NULL, 0); 3119 3120#ifdef CONFIG_USB_FILE_STORAGE_TEST 3121 if (strnicmp(mod_data.transport_parm, "BBB", 10) == 0) { 3122 ; // Use default setting 3123 } else if (strnicmp(mod_data.transport_parm, "CB", 10) == 0) { 3124 mod_data.transport_type = USB_PR_CB; 3125 mod_data.transport_name = "Control-Bulk"; 3126 } else if (strnicmp(mod_data.transport_parm, "CBI", 10) == 0) { 3127 mod_data.transport_type = USB_PR_CBI; 3128 mod_data.transport_name = "Control-Bulk-Interrupt"; 3129 } else { 3130 ERROR(fsg, "invalid transport: %s\n", mod_data.transport_parm); 3131 return -EINVAL; 3132 } 3133 3134 if (strnicmp(mod_data.protocol_parm, "SCSI", 10) == 0 || 3135 prot == USB_SC_SCSI) { 3136 ; // Use default setting 3137 } else if (strnicmp(mod_data.protocol_parm, "RBC", 10) == 0 || 3138 prot == USB_SC_RBC) { 3139 mod_data.protocol_type = USB_SC_RBC; 3140 mod_data.protocol_name = "RBC"; 3141 } else if (strnicmp(mod_data.protocol_parm, "8020", 4) == 0 || 3142 strnicmp(mod_data.protocol_parm, "ATAPI", 10) == 0 || 3143 prot == USB_SC_8020) { 3144 mod_data.protocol_type = USB_SC_8020; 3145 mod_data.protocol_name = "8020i (ATAPI)"; 3146 } else if (strnicmp(mod_data.protocol_parm, "QIC", 3) == 0 || 3147 prot == USB_SC_QIC) { 3148 mod_data.protocol_type = USB_SC_QIC; 3149 mod_data.protocol_name = "QIC-157"; 3150 } else if (strnicmp(mod_data.protocol_parm, "UFI", 10) == 0 || 3151 prot == USB_SC_UFI) { 3152 mod_data.protocol_type = USB_SC_UFI; 3153 mod_data.protocol_name = "UFI"; 3154 } else if (strnicmp(mod_data.protocol_parm, "8070", 4) == 0 || 3155 prot == USB_SC_8070) { 3156 mod_data.protocol_type = USB_SC_8070; 3157 mod_data.protocol_name = "8070i"; 3158 } else { 3159 ERROR(fsg, "invalid protocol: %s\n", mod_data.protocol_parm); 3160 return -EINVAL; 3161 } 3162 3163 mod_data.buflen &= PAGE_CACHE_MASK; 3164 if (mod_data.buflen <= 0) { 3165 ERROR(fsg, "invalid buflen\n"); 3166 return -ETOOSMALL; 3167 } 3168 3169#endif /* CONFIG_USB_FILE_STORAGE_TEST */ 3170 3171 /* Serial string handling. 3172 * On a real device, the serial string would be loaded 3173 * from permanent storage. */ 3174 if (mod_data.serial) { 3175 const char *ch; 3176 unsigned len = 0; 3177 3178 /* Sanity check : 3179 * The CB[I] specification limits the serial string to 3180 * 12 uppercase hexadecimal characters. 3181 * BBB need at least 12 uppercase hexadecimal characters, 3182 * with a maximum of 126. */ 3183 for (ch = mod_data.serial; *ch; ++ch) { 3184 ++len; 3185 if ((*ch < '0' || *ch > '9') && 3186 (*ch < 'A' || *ch > 'F')) { /* not uppercase hex */ 3187 WARNING(fsg, 3188 "Invalid serial string character: %c; " 3189 "Failing back to default\n", 3190 *ch); 3191 goto fill_serial; 3192 } 3193 } 3194 if (len > 126 || 3195 (mod_data.transport_type == USB_PR_BULK && len < 12) || 3196 (mod_data.transport_type != USB_PR_BULK && len > 12)) { 3197 WARNING(fsg, 3198 "Invalid serial string length; " 3199 "Failing back to default\n"); 3200 goto fill_serial; 3201 } 3202 fsg_strings[FSG_STRING_SERIAL - 1].s = mod_data.serial; 3203 } else { 3204 WARNING(fsg, 3205 "Userspace failed to provide serial number; " 3206 "Failing back to default\n"); 3207fill_serial: 3208 /* Serial number not specified or invalid, make our own. 3209 * We just encode it from the driver version string, 3210 * 12 characters to comply with both CB[I] and BBB spec. 3211 * Warning : Two devices running the same kernel will have 3212 * the same fallback serial number. */ 3213 for (i = 0; i < 12; i += 2) { 3214 unsigned char c = DRIVER_VERSION[i / 2]; 3215 3216 if (!c) 3217 break; 3218 sprintf(&fsg_string_serial[i], "%02X", c); 3219 } 3220 } 3221 3222 return 0; 3223} 3224 3225 3226static int __ref fsg_bind(struct usb_gadget *gadget) 3227{ 3228 struct fsg_dev *fsg = the_fsg; 3229 int rc; 3230 int i; 3231 struct fsg_lun *curlun; 3232 struct usb_ep *ep; 3233 struct usb_request *req; 3234 char *pathbuf, *p; 3235 3236 fsg->gadget = gadget; 3237 set_gadget_data(gadget, fsg); 3238 fsg->ep0 = gadget->ep0; 3239 fsg->ep0->driver_data = fsg; 3240 3241 if ((rc = check_parameters(fsg)) != 0) 3242 goto out; 3243 3244 if (mod_data.removable) { // Enable the store_xxx attributes 3245 dev_attr_file.attr.mode = 0644; 3246 dev_attr_file.store = fsg_store_file; 3247 if (!mod_data.cdrom) { 3248 dev_attr_ro.attr.mode = 0644; 3249 dev_attr_ro.store = fsg_store_ro; 3250 } 3251 } 3252 3253 /* Only for removable media? */ 3254 dev_attr_nofua.attr.mode = 0644; 3255 dev_attr_nofua.store = fsg_store_nofua; 3256 3257 /* Find out how many LUNs there should be */ 3258 i = mod_data.nluns; 3259 if (i == 0) 3260 i = max(mod_data.num_filenames, 1u); 3261 if (i > FSG_MAX_LUNS) { 3262 ERROR(fsg, "invalid number of LUNs: %d\n", i); 3263 rc = -EINVAL; 3264 goto out; 3265 } 3266 3267 /* Create the LUNs, open their backing files, and register the 3268 * LUN devices in sysfs. */ 3269 fsg->luns = kzalloc(i * sizeof(struct fsg_lun), GFP_KERNEL); 3270 if (!fsg->luns) { 3271 rc = -ENOMEM; 3272 goto out; 3273 } 3274 fsg->nluns = i; 3275 3276 for (i = 0; i < fsg->nluns; ++i) { 3277 curlun = &fsg->luns[i]; 3278 curlun->cdrom = !!mod_data.cdrom; 3279 curlun->ro = mod_data.cdrom || mod_data.ro[i]; 3280 curlun->initially_ro = curlun->ro; 3281 curlun->removable = mod_data.removable; 3282 curlun->nofua = mod_data.nofua[i]; 3283 curlun->dev.release = lun_release; 3284 curlun->dev.parent = &gadget->dev; 3285 curlun->dev.driver = &fsg_driver.driver; 3286 dev_set_drvdata(&curlun->dev, &fsg->filesem); 3287 dev_set_name(&curlun->dev,"%s-lun%d", 3288 dev_name(&gadget->dev), i); 3289 3290 if ((rc = device_register(&curlun->dev)) != 0) { 3291 INFO(fsg, "failed to register LUN%d: %d\n", i, rc); 3292 goto out; 3293 } 3294 if ((rc = device_create_file(&curlun->dev, 3295 &dev_attr_ro)) != 0 || 3296 (rc = device_create_file(&curlun->dev, 3297 &dev_attr_nofua)) != 0 || 3298 (rc = device_create_file(&curlun->dev, 3299 &dev_attr_file)) != 0) { 3300 device_unregister(&curlun->dev); 3301 goto out; 3302 } 3303 curlun->registered = 1; 3304 kref_get(&fsg->ref); 3305 3306 if (mod_data.file[i] && *mod_data.file[i]) { 3307 if ((rc = fsg_lun_open(curlun, 3308 mod_data.file[i])) != 0) 3309 goto out; 3310 } else if (!mod_data.removable) { 3311 ERROR(fsg, "no file given for LUN%d\n", i); 3312 rc = -EINVAL; 3313 goto out; 3314 } 3315 } 3316 3317 /* Find all the endpoints we will use */ 3318 usb_ep_autoconfig_reset(gadget); 3319 ep = usb_ep_autoconfig(gadget, &fsg_fs_bulk_in_desc); 3320 if (!ep) 3321 goto autoconf_fail; 3322 ep->driver_data = fsg; // claim the endpoint 3323 fsg->bulk_in = ep; 3324 3325 ep = usb_ep_autoconfig(gadget, &fsg_fs_bulk_out_desc); 3326 if (!ep) 3327 goto autoconf_fail; 3328 ep->driver_data = fsg; // claim the endpoint 3329 fsg->bulk_out = ep; 3330 3331 if (transport_is_cbi()) { 3332 ep = usb_ep_autoconfig(gadget, &fsg_fs_intr_in_desc); 3333 if (!ep) 3334 goto autoconf_fail; 3335 ep->driver_data = fsg; // claim the endpoint 3336 fsg->intr_in = ep; 3337 } 3338 3339 /* Fix up the descriptors */ 3340 device_desc.bMaxPacketSize0 = fsg->ep0->maxpacket; 3341 device_desc.idVendor = cpu_to_le16(mod_data.vendor); 3342 device_desc.idProduct = cpu_to_le16(mod_data.product); 3343 device_desc.bcdDevice = cpu_to_le16(mod_data.release); 3344 3345 i = (transport_is_cbi() ? 3 : 2); // Number of endpoints 3346 fsg_intf_desc.bNumEndpoints = i; 3347 fsg_intf_desc.bInterfaceSubClass = mod_data.protocol_type; 3348 fsg_intf_desc.bInterfaceProtocol = mod_data.transport_type; 3349 fsg_fs_function[i + FSG_FS_FUNCTION_PRE_EP_ENTRIES] = NULL; 3350 3351 if (gadget_is_dualspeed(gadget)) { 3352 fsg_hs_function[i + FSG_HS_FUNCTION_PRE_EP_ENTRIES] = NULL; 3353 3354 /* Assume ep0 uses the same maxpacket value for both speeds */ 3355 dev_qualifier.bMaxPacketSize0 = fsg->ep0->maxpacket; 3356 3357 /* Assume endpoint addresses are the same for both speeds */ 3358 fsg_hs_bulk_in_desc.bEndpointAddress = 3359 fsg_fs_bulk_in_desc.bEndpointAddress; 3360 fsg_hs_bulk_out_desc.bEndpointAddress = 3361 fsg_fs_bulk_out_desc.bEndpointAddress; 3362 fsg_hs_intr_in_desc.bEndpointAddress = 3363 fsg_fs_intr_in_desc.bEndpointAddress; 3364 } 3365 3366 if (gadget_is_otg(gadget)) 3367 fsg_otg_desc.bmAttributes |= USB_OTG_HNP; 3368 3369 rc = -ENOMEM; 3370 3371 /* Allocate the request and buffer for endpoint 0 */ 3372 fsg->ep0req = req = usb_ep_alloc_request(fsg->ep0, GFP_KERNEL); 3373 if (!req) 3374 goto out; 3375 req->buf = kmalloc(EP0_BUFSIZE, GFP_KERNEL); 3376 if (!req->buf) 3377 goto out; 3378 req->complete = ep0_complete; 3379 3380 /* Allocate the data buffers */ 3381 for (i = 0; i < FSG_NUM_BUFFERS; ++i) { 3382 struct fsg_buffhd *bh = &fsg->buffhds[i]; 3383 3384 /* Allocate for the bulk-in endpoint. We assume that 3385 * the buffer will also work with the bulk-out (and 3386 * interrupt-in) endpoint. */ 3387 bh->buf = kmalloc(mod_data.buflen, GFP_KERNEL); 3388 if (!bh->buf) 3389 goto out; 3390 bh->next = bh + 1; 3391 } 3392 fsg->buffhds[FSG_NUM_BUFFERS - 1].next = &fsg->buffhds[0]; 3393 3394 /* This should reflect the actual gadget power source */ 3395 usb_gadget_set_selfpowered(gadget); 3396 3397 snprintf(fsg_string_manufacturer, sizeof fsg_string_manufacturer, 3398 "%s %s with %s", 3399 init_utsname()->sysname, init_utsname()->release, 3400 gadget->name); 3401 3402 fsg->thread_task = kthread_create(fsg_main_thread, fsg, 3403 "file-storage-gadget"); 3404 if (IS_ERR(fsg->thread_task)) { 3405 rc = PTR_ERR(fsg->thread_task); 3406 goto out; 3407 } 3408 3409 INFO(fsg, DRIVER_DESC ", version: " DRIVER_VERSION "\n"); 3410 INFO(fsg, "Number of LUNs=%d\n", fsg->nluns); 3411 3412 pathbuf = kmalloc(PATH_MAX, GFP_KERNEL); 3413 for (i = 0; i < fsg->nluns; ++i) { 3414 curlun = &fsg->luns[i]; 3415 if (fsg_lun_is_open(curlun)) { 3416 p = NULL; 3417 if (pathbuf) { 3418 p = d_path(&curlun->filp->f_path, 3419 pathbuf, PATH_MAX); 3420 if (IS_ERR(p)) 3421 p = NULL; 3422 } 3423 LINFO(curlun, "ro=%d, nofua=%d, file: %s\n", 3424 curlun->ro, curlun->nofua, (p ? p : "(error)")); 3425 } 3426 } 3427 kfree(pathbuf); 3428 3429 DBG(fsg, "transport=%s (x%02x)\n", 3430 mod_data.transport_name, mod_data.transport_type); 3431 DBG(fsg, "protocol=%s (x%02x)\n", 3432 mod_data.protocol_name, mod_data.protocol_type); 3433 DBG(fsg, "VendorID=x%04x, ProductID=x%04x, Release=x%04x\n", 3434 mod_data.vendor, mod_data.product, mod_data.release); 3435 DBG(fsg, "removable=%d, stall=%d, cdrom=%d, buflen=%u\n", 3436 mod_data.removable, mod_data.can_stall, 3437 mod_data.cdrom, mod_data.buflen); 3438 DBG(fsg, "I/O thread pid: %d\n", task_pid_nr(fsg->thread_task)); 3439 3440 set_bit(REGISTERED, &fsg->atomic_bitflags); 3441 3442 /* Tell the thread to start working */ 3443 wake_up_process(fsg->thread_task); 3444 return 0; 3445 3446autoconf_fail: 3447 ERROR(fsg, "unable to autoconfigure all endpoints\n"); 3448 rc = -ENOTSUPP; 3449 3450out: 3451 fsg->state = FSG_STATE_TERMINATED; // The thread is dead 3452 fsg_unbind(gadget); 3453 complete(&fsg->thread_notifier); 3454 return rc; 3455} 3456 3457 3458/*-------------------------------------------------------------------------*/ 3459 3460static void fsg_suspend(struct usb_gadget *gadget) 3461{ 3462 struct fsg_dev *fsg = get_gadget_data(gadget); 3463 3464 DBG(fsg, "suspend\n"); 3465 set_bit(SUSPENDED, &fsg->atomic_bitflags); 3466} 3467 3468static void fsg_resume(struct usb_gadget *gadget) 3469{ 3470 struct fsg_dev *fsg = get_gadget_data(gadget); 3471 3472 DBG(fsg, "resume\n"); 3473 clear_bit(SUSPENDED, &fsg->atomic_bitflags); 3474} 3475 3476 3477/*-------------------------------------------------------------------------*/ 3478 3479static struct usb_gadget_driver fsg_driver = { 3480#ifdef CONFIG_USB_GADGET_DUALSPEED 3481 .speed = USB_SPEED_HIGH, 3482#else 3483 .speed = USB_SPEED_FULL, 3484#endif 3485 .function = (char *) fsg_string_product, 3486 .bind = fsg_bind, 3487 .unbind = fsg_unbind, 3488 .disconnect = fsg_disconnect, 3489 .setup = fsg_setup, 3490 .suspend = fsg_suspend, 3491 .resume = fsg_resume, 3492 3493 .driver = { 3494 .name = DRIVER_NAME, 3495 .owner = THIS_MODULE, 3496 // .release = ... 3497 // .suspend = ... 3498 // .resume = ... 3499 }, 3500}; 3501 3502 3503static int __init fsg_alloc(void) 3504{ 3505 struct fsg_dev *fsg; 3506 3507 fsg = kzalloc(sizeof *fsg, GFP_KERNEL); 3508 if (!fsg) 3509 return -ENOMEM; 3510 spin_lock_init(&fsg->lock); 3511 init_rwsem(&fsg->filesem); 3512 kref_init(&fsg->ref); 3513 init_completion(&fsg->thread_notifier); 3514 3515 the_fsg = fsg; 3516 return 0; 3517} 3518 3519 3520static int __init fsg_init(void) 3521{ 3522 int rc; 3523 struct fsg_dev *fsg; 3524 3525 if ((rc = fsg_alloc()) != 0) 3526 return rc; 3527 fsg = the_fsg; 3528 if ((rc = usb_gadget_register_driver(&fsg_driver)) != 0) 3529 kref_put(&fsg->ref, fsg_release); 3530 return rc; 3531} 3532module_init(fsg_init); 3533 3534 3535static void __exit fsg_cleanup(void) 3536{ 3537 struct fsg_dev *fsg = the_fsg; 3538 3539 /* Unregister the driver iff the thread hasn't already done so */ 3540 if (test_and_clear_bit(REGISTERED, &fsg->atomic_bitflags)) 3541 usb_gadget_unregister_driver(&fsg_driver); 3542 3543 /* Wait for the thread to finish up */ 3544 wait_for_completion(&fsg->thread_notifier); 3545 3546 kref_put(&fsg->ref, fsg_release); 3547} 3548module_exit(fsg_cleanup); 3549