1
2#include <linux/kernel.h>
3#include <linux/module.h>
4#include <linux/usb.h>
5#include <linux/usb_usual.h>
6#include <linux/blkdev.h>
7#include <linux/timer.h>
8#include <scsi/scsi.h>
9
10#define DRV_NAME "ub"
11
12#define UB_MAJOR 180
13
14/*
15 * The command state machine is the key model for understanding of this driver.
16 *
17 * The general rule is that all transitions are done towards the bottom
18 * of the diagram, thus preventing any loops.
19 *
20 * An exception to that is how the STAT state is handled. A counter allows it
21 * to be re-entered along the path marked with [C].
22 *
23 *       +--------+
24 *       ! INIT   !
25 *       +--------+
26 *           !
27 *        ub_scsi_cmd_start fails ->--------------------------------------\
28 *           !                                                            !
29 *           V                                                            !
30 *       +--------+                                                       !
31 *       ! CMD    !                                                       !
32 *       +--------+                                                       !
33 *           !                                            +--------+      !
34 *         was -EPIPE -->-------------------------------->! CLEAR  !      !
35 *           !                                            +--------+      !
36 *           !                                                !           !
37 *         was error -->------------------------------------- ! --------->\
38 *           !                                                !           !
39 *  /--<-- cmd->dir == NONE ?                                 !           !
40 *  !        !                                                !           !
41 *  !        V                                                !           !
42 *  !    +--------+                                           !           !
43 *  !    ! DATA   !                                           !           !
44 *  !    +--------+                                           !           !
45 *  !        !                           +---------+          !           !
46 *  !      was -EPIPE -->--------------->! CLR2STS !          !           !
47 *  !        !                           +---------+          !           !
48 *  !        !                                !               !           !
49 *  !        !                              was error -->---- ! --------->\
50 *  !      was error -->--------------------- ! ------------- ! --------->\
51 *  !        !                                !               !           !
52 *  !        V                                !               !           !
53 *  \--->+--------+                           !               !           !
54 *       ! STAT   !<--------------------------/               !           !
55 *  /--->+--------+                                           !           !
56 *  !        !                                                !           !
57 * [C]     was -EPIPE -->-----------\                         !           !
58 *  !        !                      !                         !           !
59 *  +<---- len == 0                 !                         !           !
60 *  !        !                      !                         !           !
61 *  !      was error -->--------------------------------------!---------->\
62 *  !        !                      !                         !           !
63 *  +<---- bad CSW                  !                         !           !
64 *  +<---- bad tag                  !                         !           !
65 *  !        !                      V                         !           !
66 *  !        !                 +--------+                     !           !
67 *  !        !                 ! CLRRS  !                     !           !
68 *  !        !                 +--------+                     !           !
69 *  !        !                      !                         !           !
70 *  \------- ! --------------------[C]--------\               !           !
71 *           !                                !               !           !
72 *         cmd->error---\                +--------+           !           !
73 *           !          +--------------->! SENSE  !<----------/           !
74 *         STAT_FAIL----/                +--------+                       !
75 *           !                                !                           V
76 *           !                                V                      +--------+
77 *           \--------------------------------\--------------------->! DONE   !
78 *                                                                   +--------+
79 */
80
81/*
82 * This many LUNs per USB device.
83 * Every one of them takes a host, see UB_MAX_HOSTS.
84 */
85#define UB_MAX_LUNS   9
86
87/*
88 */
89
90#define UB_PARTS_PER_LUN      8
91
92#define UB_MAX_CDB_SIZE      16		/* Corresponds to Bulk */
93
94#define UB_SENSE_SIZE  18
95
96/*
97 */
98
99/* command block wrapper */
100struct bulk_cb_wrap {
101	__le32	Signature;		/* contains 'USBC' */
102	u32	Tag;			/* unique per command id */
103	__le32	DataTransferLength;	/* size of data */
104	u8	Flags;			/* direction in bit 0 */
105	u8	Lun;			/* LUN */
106	u8	Length;			/* of of the CDB */
107	u8	CDB[UB_MAX_CDB_SIZE];	/* max command */
108};
109
110#define US_BULK_CB_WRAP_LEN	31
111#define US_BULK_CB_SIGN		0x43425355	/*spells out USBC */
112#define US_BULK_FLAG_IN		1
113#define US_BULK_FLAG_OUT	0
114
115/* command status wrapper */
116struct bulk_cs_wrap {
117	__le32	Signature;		/* should = 'USBS' */
118	u32	Tag;			/* same as original command */
119	__le32	Residue;		/* amount not transferred */
120	u8	Status;			/* see below */
121};
122
123#define US_BULK_CS_WRAP_LEN	13
124#define US_BULK_CS_SIGN		0x53425355	/* spells out 'USBS' */
125#define US_BULK_STAT_OK		0
126#define US_BULK_STAT_FAIL	1
127#define US_BULK_STAT_PHASE	2
128
129/* bulk-only class specific requests */
130#define US_BULK_RESET_REQUEST	0xff
131#define US_BULK_GET_MAX_LUN	0xfe
132
133/*
134 */
135struct ub_dev;
136
137#define UB_MAX_REQ_SG	9	/* cdrecord requires 32KB and maybe a header */
138#define UB_MAX_SECTORS 64
139
140/*
141 * A second is more than enough for a 32K transfer (UB_MAX_SECTORS)
142 * even if a webcam hogs the bus, but some devices need time to spin up.
143 */
144#define UB_URB_TIMEOUT	(HZ*2)
145#define UB_DATA_TIMEOUT	(HZ*5)	/* ZIP does spin-ups in the data phase */
146#define UB_STAT_TIMEOUT	(HZ*5)	/* Same spinups and eject for a dataless cmd. */
147#define UB_CTRL_TIMEOUT	(HZ/2)	/* 500ms ought to be enough to clear a stall */
148
149/*
150 * An instance of a SCSI command in transit.
151 */
152#define UB_DIR_NONE	0
153#define UB_DIR_READ	1
154#define UB_DIR_ILLEGAL2	2
155#define UB_DIR_WRITE	3
156
157#define UB_DIR_CHAR(c)  (((c)==UB_DIR_WRITE)? 'w': \
158			 (((c)==UB_DIR_READ)? 'r': 'n'))
159
160enum ub_scsi_cmd_state {
161	UB_CMDST_INIT,			/* Initial state */
162	UB_CMDST_CMD,			/* Command submitted */
163	UB_CMDST_DATA,			/* Data phase */
164	UB_CMDST_CLR2STS,		/* Clearing before requesting status */
165	UB_CMDST_STAT,			/* Status phase */
166	UB_CMDST_CLEAR,			/* Clearing a stall (halt, actually) */
167	UB_CMDST_CLRRS,			/* Clearing before retrying status */
168	UB_CMDST_SENSE,			/* Sending Request Sense */
169	UB_CMDST_DONE			/* Final state */
170};
171
172struct ub_scsi_cmd {
173	unsigned char cdb[UB_MAX_CDB_SIZE];
174	unsigned char cdb_len;
175
176	unsigned char dir;		/* 0 - none, 1 - read, 3 - write. */
177	enum ub_scsi_cmd_state state;
178	unsigned int tag;
179	struct ub_scsi_cmd *next;
180
181	int error;			/* Return code - valid upon done */
182	unsigned int act_len;		/* Return size */
183	unsigned char key, asc, ascq;	/* May be valid if error==-EIO */
184
185	int stat_count;			/* Retries getting status. */
186
187	unsigned int len;		/* Requested length */
188	unsigned int current_sg;
189	unsigned int nsg;		/* sgv[nsg] */
190	struct scatterlist sgv[UB_MAX_REQ_SG];
191
192	struct ub_lun *lun;
193	void (*done)(struct ub_dev *, struct ub_scsi_cmd *);
194	void *back;
195};
196
197struct ub_request {
198	struct request *rq;
199	unsigned int current_try;
200	unsigned int nsg;		/* sgv[nsg] */
201	struct scatterlist sgv[UB_MAX_REQ_SG];
202};
203
204/*
205 */
206struct ub_capacity {
207	unsigned long nsec;		/* Linux size - 512 byte sectors */
208	unsigned int bsize;		/* Linux hardsect_size */
209	unsigned int bshift;		/* Shift between 512 and hard sects */
210};
211
212/*
213 * This is a direct take-off from linux/include/completion.h
214 * The difference is that I do not wait on this thing, just poll.
215 * When I want to wait (ub_probe), I just use the stock completion.
216 *
217 * Note that INIT_COMPLETION takes no lock. It is correct. But why
218 * in the bloody hell that thing takes struct instead of pointer to struct
219 * is quite beyond me. I just copied it from the stock completion.
220 */
221struct ub_completion {
222	unsigned int done;
223	spinlock_t lock;
224};
225
226static inline void ub_init_completion(struct ub_completion *x)
227{
228	x->done = 0;
229	spin_lock_init(&x->lock);
230}
231
232#define UB_INIT_COMPLETION(x)	((x).done = 0)
233
234static void ub_complete(struct ub_completion *x)
235{
236	unsigned long flags;
237
238	spin_lock_irqsave(&x->lock, flags);
239	x->done++;
240	spin_unlock_irqrestore(&x->lock, flags);
241}
242
243static int ub_is_completed(struct ub_completion *x)
244{
245	unsigned long flags;
246	int ret;
247
248	spin_lock_irqsave(&x->lock, flags);
249	ret = x->done;
250	spin_unlock_irqrestore(&x->lock, flags);
251	return ret;
252}
253
254/*
255 */
256struct ub_scsi_cmd_queue {
257	int qlen, qmax;
258	struct ub_scsi_cmd *head, *tail;
259};
260
261/*
262 * The block device instance (one per LUN).
263 */
264struct ub_lun {
265	struct ub_dev *udev;
266	struct list_head link;
267	struct gendisk *disk;
268	int id;				/* Host index */
269	int num;			/* LUN number */
270	char name[16];
271
272	int changed;			/* Media was changed */
273	int removable;
274	int readonly;
275
276	struct ub_request urq;
277
278	/* Use Ingo's mempool if or when we have more than one command. */
279	/*
280	 * Currently we never need more than one command for the whole device.
281	 * However, giving every LUN a command is a cheap and automatic way
282	 * to enforce fairness between them.
283	 */
284	int cmda[1];
285	struct ub_scsi_cmd cmdv[1];
286
287	struct ub_capacity capacity;
288};
289
290/*
291 * The USB device instance.
292 */
293struct ub_dev {
294	spinlock_t *lock;
295	atomic_t poison;		/* The USB device is disconnected */
296	int openc;			/* protected by ub_lock! */
297					/* kref is too implicit for our taste */
298	int reset;			/* Reset is running */
299	unsigned int tagcnt;
300	char name[12];
301	struct usb_device *dev;
302	struct usb_interface *intf;
303
304	struct list_head luns;
305
306	unsigned int send_bulk_pipe;	/* cached pipe values */
307	unsigned int recv_bulk_pipe;
308	unsigned int send_ctrl_pipe;
309	unsigned int recv_ctrl_pipe;
310
311	struct tasklet_struct tasklet;
312
313	struct ub_scsi_cmd_queue cmd_queue;
314	struct ub_scsi_cmd top_rqs_cmd;	/* REQUEST SENSE */
315	unsigned char top_sense[UB_SENSE_SIZE];
316
317	struct ub_completion work_done;
318	struct urb work_urb;
319	struct timer_list work_timer;
320	int last_pipe;			/* What might need clearing */
321	__le32 signature;		/* Learned signature */
322	struct bulk_cb_wrap work_bcb;
323	struct bulk_cs_wrap work_bcs;
324	struct usb_ctrlrequest work_cr;
325
326	struct work_struct reset_work;
327	wait_queue_head_t reset_wait;
328
329	int sg_stat[6];
330};
331
332/*
333 */
334static void ub_cleanup(struct ub_dev *sc);
335static int ub_request_fn_1(struct ub_lun *lun, struct request *rq);
336static void ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun,
337    struct ub_scsi_cmd *cmd, struct ub_request *urq);
338static void ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun,
339    struct ub_scsi_cmd *cmd, struct ub_request *urq);
340static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
341static void ub_end_rq(struct request *rq, unsigned int status);
342static int ub_rw_cmd_retry(struct ub_dev *sc, struct ub_lun *lun,
343    struct ub_request *urq, struct ub_scsi_cmd *cmd);
344static int ub_submit_scsi(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
345static void ub_urb_complete(struct urb *urb);
346static void ub_scsi_action(unsigned long _dev);
347static void ub_scsi_dispatch(struct ub_dev *sc);
348static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
349static void ub_data_start(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
350static void ub_state_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd, int rc);
351static int __ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
352static void ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
353static void ub_state_stat_counted(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
354static void ub_state_sense(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
355static int ub_submit_clear_stall(struct ub_dev *sc, struct ub_scsi_cmd *cmd,
356    int stalled_pipe);
357static void ub_top_sense_done(struct ub_dev *sc, struct ub_scsi_cmd *scmd);
358static void ub_reset_enter(struct ub_dev *sc, int try);
359static void ub_reset_task(struct work_struct *work);
360static int ub_sync_tur(struct ub_dev *sc, struct ub_lun *lun);
361static int ub_sync_read_cap(struct ub_dev *sc, struct ub_lun *lun,
362    struct ub_capacity *ret);
363static int ub_sync_reset(struct ub_dev *sc);
364static int ub_probe_clear_stall(struct ub_dev *sc, int stalled_pipe);
365static int ub_probe_lun(struct ub_dev *sc, int lnum);
366
367/*
368 */
369#ifdef CONFIG_USB_LIBUSUAL
370
371#define ub_usb_ids  storage_usb_ids
372#else
373
374static struct usb_device_id ub_usb_ids[] = {
375	{ USB_INTERFACE_INFO(USB_CLASS_MASS_STORAGE, US_SC_SCSI, US_PR_BULK) },
376	{ }
377};
378
379MODULE_DEVICE_TABLE(usb, ub_usb_ids);
380#endif /* CONFIG_USB_LIBUSUAL */
381
382/*
383 * Find me a way to identify "next free minor" for add_disk(),
384 * and the array disappears the next day. However, the number of
385 * hosts has something to do with the naming and /proc/partitions.
386 * This has to be thought out in detail before changing.
387 * If UB_MAX_HOST was 1000, we'd use a bitmap. Or a better data structure.
388 */
389#define UB_MAX_HOSTS  26
390static char ub_hostv[UB_MAX_HOSTS];
391
392#define UB_QLOCK_NUM 5
393static spinlock_t ub_qlockv[UB_QLOCK_NUM];
394static int ub_qlock_next = 0;
395
396static DEFINE_SPINLOCK(ub_lock);	/* Locks globals and ->openc */
397
398/*
399 * The id allocator.
400 *
401 * This also stores the host for indexing by minor, which is somewhat dirty.
402 */
403static int ub_id_get(void)
404{
405	unsigned long flags;
406	int i;
407
408	spin_lock_irqsave(&ub_lock, flags);
409	for (i = 0; i < UB_MAX_HOSTS; i++) {
410		if (ub_hostv[i] == 0) {
411			ub_hostv[i] = 1;
412			spin_unlock_irqrestore(&ub_lock, flags);
413			return i;
414		}
415	}
416	spin_unlock_irqrestore(&ub_lock, flags);
417	return -1;
418}
419
420static void ub_id_put(int id)
421{
422	unsigned long flags;
423
424	if (id < 0 || id >= UB_MAX_HOSTS) {
425		printk(KERN_ERR DRV_NAME ": bad host ID %d\n", id);
426		return;
427	}
428
429	spin_lock_irqsave(&ub_lock, flags);
430	if (ub_hostv[id] == 0) {
431		spin_unlock_irqrestore(&ub_lock, flags);
432		printk(KERN_ERR DRV_NAME ": freeing free host ID %d\n", id);
433		return;
434	}
435	ub_hostv[id] = 0;
436	spin_unlock_irqrestore(&ub_lock, flags);
437}
438
439/*
440 * This is necessitated by the fact that blk_cleanup_queue does not
441 * necesserily destroy the queue. Instead, it may merely decrease q->refcnt.
442 * Since our blk_init_queue() passes a spinlock common with ub_dev,
443 * we have life time issues when ub_cleanup frees ub_dev.
444 */
445static spinlock_t *ub_next_lock(void)
446{
447	unsigned long flags;
448	spinlock_t *ret;
449
450	spin_lock_irqsave(&ub_lock, flags);
451	ret = &ub_qlockv[ub_qlock_next];
452	ub_qlock_next = (ub_qlock_next + 1) % UB_QLOCK_NUM;
453	spin_unlock_irqrestore(&ub_lock, flags);
454	return ret;
455}
456
457/*
458 * Downcount for deallocation. This rides on two assumptions:
459 *  - once something is poisoned, its refcount cannot grow
460 *  - opens cannot happen at this time (del_gendisk was done)
461 * If the above is true, we can drop the lock, which we need for
462 * blk_cleanup_queue(): the silly thing may attempt to sleep.
463 * [Actually, it never needs to sleep for us, but it calls might_sleep()]
464 */
465static void ub_put(struct ub_dev *sc)
466{
467	unsigned long flags;
468
469	spin_lock_irqsave(&ub_lock, flags);
470	--sc->openc;
471	if (sc->openc == 0 && atomic_read(&sc->poison)) {
472		spin_unlock_irqrestore(&ub_lock, flags);
473		ub_cleanup(sc);
474	} else {
475		spin_unlock_irqrestore(&ub_lock, flags);
476	}
477}
478
479/*
480 * Final cleanup and deallocation.
481 */
482static void ub_cleanup(struct ub_dev *sc)
483{
484	struct list_head *p;
485	struct ub_lun *lun;
486	request_queue_t *q;
487
488	while (!list_empty(&sc->luns)) {
489		p = sc->luns.next;
490		lun = list_entry(p, struct ub_lun, link);
491		list_del(p);
492
493		/* I don't think queue can be NULL. But... Stolen from sx8.c */
494		if ((q = lun->disk->queue) != NULL)
495			blk_cleanup_queue(q);
496		/*
497		 * If we zero disk->private_data BEFORE put_disk, we have
498		 * to check for NULL all over the place in open, release,
499		 * check_media and revalidate, because the block level
500		 * semaphore is well inside the put_disk.
501		 * But we cannot zero after the call, because *disk is gone.
502		 * The sd.c is blatantly racy in this area.
503		 */
504		/* disk->private_data = NULL; */
505		put_disk(lun->disk);
506		lun->disk = NULL;
507
508		ub_id_put(lun->id);
509		kfree(lun);
510	}
511
512	usb_set_intfdata(sc->intf, NULL);
513	usb_put_intf(sc->intf);
514	usb_put_dev(sc->dev);
515	kfree(sc);
516}
517
518/*
519 * The "command allocator".
520 */
521static struct ub_scsi_cmd *ub_get_cmd(struct ub_lun *lun)
522{
523	struct ub_scsi_cmd *ret;
524
525	if (lun->cmda[0])
526		return NULL;
527	ret = &lun->cmdv[0];
528	lun->cmda[0] = 1;
529	return ret;
530}
531
532static void ub_put_cmd(struct ub_lun *lun, struct ub_scsi_cmd *cmd)
533{
534	if (cmd != &lun->cmdv[0]) {
535		printk(KERN_WARNING "%s: releasing a foreign cmd %p\n",
536		    lun->name, cmd);
537		return;
538	}
539	if (!lun->cmda[0]) {
540		printk(KERN_WARNING "%s: releasing a free cmd\n", lun->name);
541		return;
542	}
543	lun->cmda[0] = 0;
544}
545
546/*
547 * The command queue.
548 */
549static void ub_cmdq_add(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
550{
551	struct ub_scsi_cmd_queue *t = &sc->cmd_queue;
552
553	if (t->qlen++ == 0) {
554		t->head = cmd;
555		t->tail = cmd;
556	} else {
557		t->tail->next = cmd;
558		t->tail = cmd;
559	}
560
561	if (t->qlen > t->qmax)
562		t->qmax = t->qlen;
563}
564
565static void ub_cmdq_insert(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
566{
567	struct ub_scsi_cmd_queue *t = &sc->cmd_queue;
568
569	if (t->qlen++ == 0) {
570		t->head = cmd;
571		t->tail = cmd;
572	} else {
573		cmd->next = t->head;
574		t->head = cmd;
575	}
576
577	if (t->qlen > t->qmax)
578		t->qmax = t->qlen;
579}
580
581static struct ub_scsi_cmd *ub_cmdq_pop(struct ub_dev *sc)
582{
583	struct ub_scsi_cmd_queue *t = &sc->cmd_queue;
584	struct ub_scsi_cmd *cmd;
585
586	if (t->qlen == 0)
587		return NULL;
588	if (--t->qlen == 0)
589		t->tail = NULL;
590	cmd = t->head;
591	t->head = cmd->next;
592	cmd->next = NULL;
593	return cmd;
594}
595
596#define ub_cmdq_peek(sc)  ((sc)->cmd_queue.head)
597
598/*
599 * The request function is our main entry point
600 */
601
602static void ub_request_fn(request_queue_t *q)
603{
604	struct ub_lun *lun = q->queuedata;
605	struct request *rq;
606
607	while ((rq = elv_next_request(q)) != NULL) {
608		if (ub_request_fn_1(lun, rq) != 0) {
609			blk_stop_queue(q);
610			break;
611		}
612	}
613}
614
615static int ub_request_fn_1(struct ub_lun *lun, struct request *rq)
616{
617	struct ub_dev *sc = lun->udev;
618	struct ub_scsi_cmd *cmd;
619	struct ub_request *urq;
620	int n_elem;
621
622	if (atomic_read(&sc->poison)) {
623		blkdev_dequeue_request(rq);
624		ub_end_rq(rq, DID_NO_CONNECT << 16);
625		return 0;
626	}
627
628	if (lun->changed && !blk_pc_request(rq)) {
629		blkdev_dequeue_request(rq);
630		ub_end_rq(rq, SAM_STAT_CHECK_CONDITION);
631		return 0;
632	}
633
634	if (lun->urq.rq != NULL)
635		return -1;
636	if ((cmd = ub_get_cmd(lun)) == NULL)
637		return -1;
638	memset(cmd, 0, sizeof(struct ub_scsi_cmd));
639
640	blkdev_dequeue_request(rq);
641
642	urq = &lun->urq;
643	memset(urq, 0, sizeof(struct ub_request));
644	urq->rq = rq;
645
646	/*
647	 * get scatterlist from block layer
648	 */
649	n_elem = blk_rq_map_sg(lun->disk->queue, rq, &urq->sgv[0]);
650	if (n_elem < 0) {
651		/* Impossible, because blk_rq_map_sg should not hit ENOMEM. */
652		printk(KERN_INFO "%s: failed request map (%d)\n",
653		    lun->name, n_elem);
654		goto drop;
655	}
656	if (n_elem > UB_MAX_REQ_SG) {	/* Paranoia */
657		printk(KERN_WARNING "%s: request with %d segments\n",
658		    lun->name, n_elem);
659		goto drop;
660	}
661	urq->nsg = n_elem;
662	sc->sg_stat[n_elem < 5 ? n_elem : 5]++;
663
664	if (blk_pc_request(rq)) {
665		ub_cmd_build_packet(sc, lun, cmd, urq);
666	} else {
667		ub_cmd_build_block(sc, lun, cmd, urq);
668	}
669	cmd->state = UB_CMDST_INIT;
670	cmd->lun = lun;
671	cmd->done = ub_rw_cmd_done;
672	cmd->back = urq;
673
674	cmd->tag = sc->tagcnt++;
675	if (ub_submit_scsi(sc, cmd) != 0)
676		goto drop;
677
678	return 0;
679
680drop:
681	ub_put_cmd(lun, cmd);
682	ub_end_rq(rq, DID_ERROR << 16);
683	return 0;
684}
685
686static void ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun,
687    struct ub_scsi_cmd *cmd, struct ub_request *urq)
688{
689	struct request *rq = urq->rq;
690	unsigned int block, nblks;
691
692	if (rq_data_dir(rq) == WRITE)
693		cmd->dir = UB_DIR_WRITE;
694	else
695		cmd->dir = UB_DIR_READ;
696
697	cmd->nsg = urq->nsg;
698	memcpy(cmd->sgv, urq->sgv, sizeof(struct scatterlist) * cmd->nsg);
699
700	/*
701	 * build the command
702	 *
703	 * The call to blk_queue_hardsect_size() guarantees that request
704	 * is aligned, but it is given in terms of 512 byte units, always.
705	 */
706	block = rq->sector >> lun->capacity.bshift;
707	nblks = rq->nr_sectors >> lun->capacity.bshift;
708
709	cmd->cdb[0] = (cmd->dir == UB_DIR_READ)? READ_10: WRITE_10;
710	/* 10-byte uses 4 bytes of LBA: 2147483648KB, 2097152MB, 2048GB */
711	cmd->cdb[2] = block >> 24;
712	cmd->cdb[3] = block >> 16;
713	cmd->cdb[4] = block >> 8;
714	cmd->cdb[5] = block;
715	cmd->cdb[7] = nblks >> 8;
716	cmd->cdb[8] = nblks;
717	cmd->cdb_len = 10;
718
719	cmd->len = rq->nr_sectors * 512;
720}
721
722static void ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun,
723    struct ub_scsi_cmd *cmd, struct ub_request *urq)
724{
725	struct request *rq = urq->rq;
726
727	if (rq->data_len == 0) {
728		cmd->dir = UB_DIR_NONE;
729	} else {
730		if (rq_data_dir(rq) == WRITE)
731			cmd->dir = UB_DIR_WRITE;
732		else
733			cmd->dir = UB_DIR_READ;
734	}
735
736	cmd->nsg = urq->nsg;
737	memcpy(cmd->sgv, urq->sgv, sizeof(struct scatterlist) * cmd->nsg);
738
739	memcpy(&cmd->cdb, rq->cmd, rq->cmd_len);
740	cmd->cdb_len = rq->cmd_len;
741
742	cmd->len = rq->data_len;
743}
744
745static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
746{
747	struct ub_lun *lun = cmd->lun;
748	struct ub_request *urq = cmd->back;
749	struct request *rq;
750	unsigned int scsi_status;
751
752	rq = urq->rq;
753
754	if (cmd->error == 0) {
755		if (blk_pc_request(rq)) {
756			if (cmd->act_len >= rq->data_len)
757				rq->data_len = 0;
758			else
759				rq->data_len -= cmd->act_len;
760		}
761		scsi_status = 0;
762	} else {
763		if (blk_pc_request(rq)) {
764			/* UB_SENSE_SIZE is smaller than SCSI_SENSE_BUFFERSIZE */
765			memcpy(rq->sense, sc->top_sense, UB_SENSE_SIZE);
766			rq->sense_len = UB_SENSE_SIZE;
767			if (sc->top_sense[0] != 0)
768				scsi_status = SAM_STAT_CHECK_CONDITION;
769			else
770				scsi_status = DID_ERROR << 16;
771		} else {
772			if (cmd->error == -EIO) {
773				if (ub_rw_cmd_retry(sc, lun, urq, cmd) == 0)
774					return;
775			}
776			scsi_status = SAM_STAT_CHECK_CONDITION;
777		}
778	}
779
780	urq->rq = NULL;
781
782	ub_put_cmd(lun, cmd);
783	ub_end_rq(rq, scsi_status);
784	blk_start_queue(lun->disk->queue);
785}
786
787static void ub_end_rq(struct request *rq, unsigned int scsi_status)
788{
789	int uptodate;
790
791	if (scsi_status == 0) {
792		uptodate = 1;
793	} else {
794		uptodate = 0;
795		rq->errors = scsi_status;
796	}
797	end_that_request_first(rq, uptodate, rq->hard_nr_sectors);
798	end_that_request_last(rq, uptodate);
799}
800
801static int ub_rw_cmd_retry(struct ub_dev *sc, struct ub_lun *lun,
802    struct ub_request *urq, struct ub_scsi_cmd *cmd)
803{
804
805	if (atomic_read(&sc->poison))
806		return -ENXIO;
807
808	ub_reset_enter(sc, urq->current_try);
809
810	if (urq->current_try >= 3)
811		return -EIO;
812	urq->current_try++;
813
814	/* Remove this if anyone complains of flooding. */
815	printk(KERN_DEBUG "%s: dir %c len/act %d/%d "
816	    "[sense %x %02x %02x] retry %d\n",
817	    sc->name, UB_DIR_CHAR(cmd->dir), cmd->len, cmd->act_len,
818	    cmd->key, cmd->asc, cmd->ascq, urq->current_try);
819
820	memset(cmd, 0, sizeof(struct ub_scsi_cmd));
821	ub_cmd_build_block(sc, lun, cmd, urq);
822
823	cmd->state = UB_CMDST_INIT;
824	cmd->lun = lun;
825	cmd->done = ub_rw_cmd_done;
826	cmd->back = urq;
827
828	cmd->tag = sc->tagcnt++;
829
830	ub_cmdq_add(sc, cmd);
831	return 0;
832}
833
834/*
835 * Submit a regular SCSI operation (not an auto-sense).
836 *
837 * The Iron Law of Good Submit Routine is:
838 * Zero return - callback is done, Nonzero return - callback is not done.
839 * No exceptions.
840 *
841 * Host is assumed locked.
842 */
843static int ub_submit_scsi(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
844{
845
846	if (cmd->state != UB_CMDST_INIT ||
847	    (cmd->dir != UB_DIR_NONE && cmd->len == 0)) {
848		return -EINVAL;
849	}
850
851	ub_cmdq_add(sc, cmd);
852	/*
853	 * We can call ub_scsi_dispatch(sc) right away here, but it's a little
854	 * safer to jump to a tasklet, in case upper layers do something silly.
855	 */
856	tasklet_schedule(&sc->tasklet);
857	return 0;
858}
859
860/*
861 * Submit the first URB for the queued command.
862 * This function does not deal with queueing in any way.
863 */
864static int ub_scsi_cmd_start(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
865{
866	struct bulk_cb_wrap *bcb;
867	int rc;
868
869	bcb = &sc->work_bcb;
870
871	/*
872	 * ``If the allocation length is eighteen or greater, and a device
873	 * server returns less than eithteen bytes of data, the application
874	 * client should assume that the bytes not transferred would have been
875	 * zeroes had the device server returned those bytes.''
876	 *
877	 * We zero sense for all commands so that when a packet request
878	 * fails it does not return a stale sense.
879	 */
880	memset(&sc->top_sense, 0, UB_SENSE_SIZE);
881
882	/* set up the command wrapper */
883	bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
884	bcb->Tag = cmd->tag;		/* Endianness is not important */
885	bcb->DataTransferLength = cpu_to_le32(cmd->len);
886	bcb->Flags = (cmd->dir == UB_DIR_READ) ? 0x80 : 0;
887	bcb->Lun = (cmd->lun != NULL) ? cmd->lun->num : 0;
888	bcb->Length = cmd->cdb_len;
889
890	/* copy the command payload */
891	memcpy(bcb->CDB, cmd->cdb, UB_MAX_CDB_SIZE);
892
893	UB_INIT_COMPLETION(sc->work_done);
894
895	sc->last_pipe = sc->send_bulk_pipe;
896	usb_fill_bulk_urb(&sc->work_urb, sc->dev, sc->send_bulk_pipe,
897	    bcb, US_BULK_CB_WRAP_LEN, ub_urb_complete, sc);
898
899	/* Fill what we shouldn't be filling, because usb-storage did so. */
900	sc->work_urb.actual_length = 0;
901	sc->work_urb.error_count = 0;
902	sc->work_urb.status = 0;
903
904	if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) {
905		ub_complete(&sc->work_done);
906		return rc;
907	}
908
909	sc->work_timer.expires = jiffies + UB_URB_TIMEOUT;
910	add_timer(&sc->work_timer);
911
912	cmd->state = UB_CMDST_CMD;
913	return 0;
914}
915
916/*
917 * Timeout handler.
918 */
919static void ub_urb_timeout(unsigned long arg)
920{
921	struct ub_dev *sc = (struct ub_dev *) arg;
922	unsigned long flags;
923
924	spin_lock_irqsave(sc->lock, flags);
925	if (!ub_is_completed(&sc->work_done))
926		usb_unlink_urb(&sc->work_urb);
927	spin_unlock_irqrestore(sc->lock, flags);
928}
929
930/*
931 * Completion routine for the work URB.
932 *
933 * This can be called directly from usb_submit_urb (while we have
934 * the sc->lock taken) and from an interrupt (while we do NOT have
935 * the sc->lock taken). Therefore, bounce this off to a tasklet.
936 */
937static void ub_urb_complete(struct urb *urb)
938{
939	struct ub_dev *sc = urb->context;
940
941	ub_complete(&sc->work_done);
942	tasklet_schedule(&sc->tasklet);
943}
944
945static void ub_scsi_action(unsigned long _dev)
946{
947	struct ub_dev *sc = (struct ub_dev *) _dev;
948	unsigned long flags;
949
950	spin_lock_irqsave(sc->lock, flags);
951	ub_scsi_dispatch(sc);
952	spin_unlock_irqrestore(sc->lock, flags);
953}
954
955static void ub_scsi_dispatch(struct ub_dev *sc)
956{
957	struct ub_scsi_cmd *cmd;
958	int rc;
959
960	while (!sc->reset && (cmd = ub_cmdq_peek(sc)) != NULL) {
961		if (cmd->state == UB_CMDST_DONE) {
962			ub_cmdq_pop(sc);
963			(*cmd->done)(sc, cmd);
964		} else if (cmd->state == UB_CMDST_INIT) {
965			if ((rc = ub_scsi_cmd_start(sc, cmd)) == 0)
966				break;
967			cmd->error = rc;
968			cmd->state = UB_CMDST_DONE;
969		} else {
970			if (!ub_is_completed(&sc->work_done))
971				break;
972			del_timer(&sc->work_timer);
973			ub_scsi_urb_compl(sc, cmd);
974		}
975	}
976}
977
978static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
979{
980	struct urb *urb = &sc->work_urb;
981	struct bulk_cs_wrap *bcs;
982	int len;
983	int rc;
984
985	if (atomic_read(&sc->poison)) {
986		ub_state_done(sc, cmd, -ENODEV);
987		return;
988	}
989
990	if (cmd->state == UB_CMDST_CLEAR) {
991		if (urb->status == -EPIPE) {
992			/*
993			 * STALL while clearning STALL.
994			 * The control pipe clears itself - nothing to do.
995			 */
996			printk(KERN_NOTICE "%s: stall on control pipe\n",
997			    sc->name);
998			goto Bad_End;
999		}
1000
1001		/*
1002		 * We ignore the result for the halt clear.
1003		 */
1004
1005		/* reset the endpoint toggle */
1006		usb_settoggle(sc->dev, usb_pipeendpoint(sc->last_pipe),
1007			usb_pipeout(sc->last_pipe), 0);
1008
1009		ub_state_sense(sc, cmd);
1010
1011	} else if (cmd->state == UB_CMDST_CLR2STS) {
1012		if (urb->status == -EPIPE) {
1013			printk(KERN_NOTICE "%s: stall on control pipe\n",
1014			    sc->name);
1015			goto Bad_End;
1016		}
1017
1018		/*
1019		 * We ignore the result for the halt clear.
1020		 */
1021
1022		/* reset the endpoint toggle */
1023		usb_settoggle(sc->dev, usb_pipeendpoint(sc->last_pipe),
1024			usb_pipeout(sc->last_pipe), 0);
1025
1026		ub_state_stat(sc, cmd);
1027
1028	} else if (cmd->state == UB_CMDST_CLRRS) {
1029		if (urb->status == -EPIPE) {
1030			printk(KERN_NOTICE "%s: stall on control pipe\n",
1031			    sc->name);
1032			goto Bad_End;
1033		}
1034
1035		/*
1036		 * We ignore the result for the halt clear.
1037		 */
1038
1039		/* reset the endpoint toggle */
1040		usb_settoggle(sc->dev, usb_pipeendpoint(sc->last_pipe),
1041			usb_pipeout(sc->last_pipe), 0);
1042
1043		ub_state_stat_counted(sc, cmd);
1044
1045	} else if (cmd->state == UB_CMDST_CMD) {
1046		switch (urb->status) {
1047		case 0:
1048			break;
1049		case -EOVERFLOW:
1050			goto Bad_End;
1051		case -EPIPE:
1052			rc = ub_submit_clear_stall(sc, cmd, sc->last_pipe);
1053			if (rc != 0) {
1054				printk(KERN_NOTICE "%s: "
1055				    "unable to submit clear (%d)\n",
1056				    sc->name, rc);
1057				/*
1058				 * This is typically ENOMEM or some other such shit.
1059				 * Retrying is pointless. Just do Bad End on it...
1060				 */
1061				ub_state_done(sc, cmd, rc);
1062				return;
1063			}
1064			cmd->state = UB_CMDST_CLEAR;
1065			return;
1066		case -ESHUTDOWN:	/* unplug */
1067		case -EILSEQ:		/* unplug timeout on uhci */
1068			ub_state_done(sc, cmd, -ENODEV);
1069			return;
1070		default:
1071			goto Bad_End;
1072		}
1073		if (urb->actual_length != US_BULK_CB_WRAP_LEN) {
1074			goto Bad_End;
1075		}
1076
1077		if (cmd->dir == UB_DIR_NONE || cmd->nsg < 1) {
1078			ub_state_stat(sc, cmd);
1079			return;
1080		}
1081
1082		// udelay(125);		// usb-storage has this
1083		ub_data_start(sc, cmd);
1084
1085	} else if (cmd->state == UB_CMDST_DATA) {
1086		if (urb->status == -EPIPE) {
1087			rc = ub_submit_clear_stall(sc, cmd, sc->last_pipe);
1088			if (rc != 0) {
1089				printk(KERN_NOTICE "%s: "
1090				    "unable to submit clear (%d)\n",
1091				    sc->name, rc);
1092				ub_state_done(sc, cmd, rc);
1093				return;
1094			}
1095			cmd->state = UB_CMDST_CLR2STS;
1096			return;
1097		}
1098		if (urb->status == -EOVERFLOW) {
1099			/*
1100			 * A babble? Failure, but we must transfer CSW now.
1101			 */
1102			cmd->error = -EOVERFLOW;	/* A cheap trick... */
1103			ub_state_stat(sc, cmd);
1104			return;
1105		}
1106
1107		if (cmd->dir == UB_DIR_WRITE) {
1108			/*
1109			 * Do not continue writes in case of a failure.
1110			 * Doing so would cause sectors to be mixed up,
1111			 * which is worse than sectors lost.
1112			 *
1113			 * We must try to read the CSW, or many devices
1114			 * get confused.
1115			 */
1116			len = urb->actual_length;
1117			if (urb->status != 0 ||
1118			    len != cmd->sgv[cmd->current_sg].length) {
1119				cmd->act_len += len;
1120
1121				cmd->error = -EIO;
1122				ub_state_stat(sc, cmd);
1123				return;
1124			}
1125
1126		} else {
1127			/*
1128			 * If an error occurs on read, we record it, and
1129			 * continue to fetch data in order to avoid bubble.
1130			 *
1131			 * As a small shortcut, we stop if we detect that
1132			 * a CSW mixed into data.
1133			 */
1134			if (urb->status != 0)
1135				cmd->error = -EIO;
1136
1137			len = urb->actual_length;
1138			if (urb->status != 0 ||
1139			    len != cmd->sgv[cmd->current_sg].length) {
1140				if ((len & 0x1FF) == US_BULK_CS_WRAP_LEN)
1141					goto Bad_End;
1142			}
1143		}
1144
1145		cmd->act_len += urb->actual_length;
1146
1147		if (++cmd->current_sg < cmd->nsg) {
1148			ub_data_start(sc, cmd);
1149			return;
1150		}
1151		ub_state_stat(sc, cmd);
1152
1153	} else if (cmd->state == UB_CMDST_STAT) {
1154		if (urb->status == -EPIPE) {
1155			rc = ub_submit_clear_stall(sc, cmd, sc->last_pipe);
1156			if (rc != 0) {
1157				printk(KERN_NOTICE "%s: "
1158				    "unable to submit clear (%d)\n",
1159				    sc->name, rc);
1160				ub_state_done(sc, cmd, rc);
1161				return;
1162			}
1163
1164			/*
1165			 * Having a stall when getting CSW is an error, so
1166			 * make sure uppper levels are not oblivious to it.
1167			 */
1168			cmd->error = -EIO;		/* A cheap trick... */
1169
1170			cmd->state = UB_CMDST_CLRRS;
1171			return;
1172		}
1173
1174		/* Catch everything, including -EOVERFLOW and other nasties. */
1175		if (urb->status != 0)
1176			goto Bad_End;
1177
1178		if (urb->actual_length == 0) {
1179			ub_state_stat_counted(sc, cmd);
1180			return;
1181		}
1182
1183		/*
1184		 * Check the returned Bulk protocol status.
1185		 * The status block has to be validated first.
1186		 */
1187
1188		bcs = &sc->work_bcs;
1189
1190		if (sc->signature == cpu_to_le32(0)) {
1191			/*
1192			 * This is the first reply, so do not perform the check.
1193			 * Instead, remember the signature the device uses
1194			 * for future checks. But do not allow a nul.
1195			 */
1196			sc->signature = bcs->Signature;
1197			if (sc->signature == cpu_to_le32(0)) {
1198				ub_state_stat_counted(sc, cmd);
1199				return;
1200			}
1201		} else {
1202			if (bcs->Signature != sc->signature) {
1203				ub_state_stat_counted(sc, cmd);
1204				return;
1205			}
1206		}
1207
1208		if (bcs->Tag != cmd->tag) {
1209			/*
1210			 * This usually happens when we disagree with the
1211			 * device's microcode about something. For instance,
1212			 * a few of them throw this after timeouts. They buffer
1213			 * commands and reply at commands we timed out before.
1214			 * Without flushing these replies we loop forever.
1215			 */
1216			ub_state_stat_counted(sc, cmd);
1217			return;
1218		}
1219
1220		len = le32_to_cpu(bcs->Residue);
1221		if (len != cmd->len - cmd->act_len) {
1222			/*
1223			 * It is all right to transfer less, the caller has
1224			 * to check. But it's not all right if the device
1225			 * counts disagree with our counts.
1226			 */
1227			goto Bad_End;
1228		}
1229
1230		switch (bcs->Status) {
1231		case US_BULK_STAT_OK:
1232			break;
1233		case US_BULK_STAT_FAIL:
1234			ub_state_sense(sc, cmd);
1235			return;
1236		case US_BULK_STAT_PHASE:
1237			goto Bad_End;
1238		default:
1239			printk(KERN_INFO "%s: unknown CSW status 0x%x\n",
1240			    sc->name, bcs->Status);
1241			ub_state_done(sc, cmd, -EINVAL);
1242			return;
1243		}
1244
1245		/* Not zeroing error to preserve a babble indicator */
1246		if (cmd->error != 0) {
1247			ub_state_sense(sc, cmd);
1248			return;
1249		}
1250		cmd->state = UB_CMDST_DONE;
1251		ub_cmdq_pop(sc);
1252		(*cmd->done)(sc, cmd);
1253
1254	} else if (cmd->state == UB_CMDST_SENSE) {
1255		ub_state_done(sc, cmd, -EIO);
1256
1257	} else {
1258		printk(KERN_WARNING "%s: "
1259		    "wrong command state %d\n",
1260		    sc->name, cmd->state);
1261		ub_state_done(sc, cmd, -EINVAL);
1262		return;
1263	}
1264	return;
1265
1266Bad_End: /* Little Excel is dead */
1267	ub_state_done(sc, cmd, -EIO);
1268}
1269
1270/*
1271 * Factorization helper for the command state machine:
1272 * Initiate a data segment transfer.
1273 */
1274static void ub_data_start(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1275{
1276	struct scatterlist *sg = &cmd->sgv[cmd->current_sg];
1277	int pipe;
1278	int rc;
1279
1280	UB_INIT_COMPLETION(sc->work_done);
1281
1282	if (cmd->dir == UB_DIR_READ)
1283		pipe = sc->recv_bulk_pipe;
1284	else
1285		pipe = sc->send_bulk_pipe;
1286	sc->last_pipe = pipe;
1287	usb_fill_bulk_urb(&sc->work_urb, sc->dev, pipe,
1288	    page_address(sg->page) + sg->offset, sg->length,
1289	    ub_urb_complete, sc);
1290	sc->work_urb.actual_length = 0;
1291	sc->work_urb.error_count = 0;
1292	sc->work_urb.status = 0;
1293
1294	if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) {
1295		ub_complete(&sc->work_done);
1296		ub_state_done(sc, cmd, rc);
1297		return;
1298	}
1299
1300	sc->work_timer.expires = jiffies + UB_DATA_TIMEOUT;
1301	add_timer(&sc->work_timer);
1302
1303	cmd->state = UB_CMDST_DATA;
1304}
1305
1306/*
1307 * Factorization helper for the command state machine:
1308 * Finish the command.
1309 */
1310static void ub_state_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd, int rc)
1311{
1312
1313	cmd->error = rc;
1314	cmd->state = UB_CMDST_DONE;
1315	ub_cmdq_pop(sc);
1316	(*cmd->done)(sc, cmd);
1317}
1318
1319/*
1320 * Factorization helper for the command state machine:
1321 * Submit a CSW read.
1322 */
1323static int __ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1324{
1325	int rc;
1326
1327	UB_INIT_COMPLETION(sc->work_done);
1328
1329	sc->last_pipe = sc->recv_bulk_pipe;
1330	usb_fill_bulk_urb(&sc->work_urb, sc->dev, sc->recv_bulk_pipe,
1331	    &sc->work_bcs, US_BULK_CS_WRAP_LEN, ub_urb_complete, sc);
1332	sc->work_urb.actual_length = 0;
1333	sc->work_urb.error_count = 0;
1334	sc->work_urb.status = 0;
1335
1336	if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) {
1337		ub_complete(&sc->work_done);
1338		ub_state_done(sc, cmd, rc);
1339		return -1;
1340	}
1341
1342	sc->work_timer.expires = jiffies + UB_STAT_TIMEOUT;
1343	add_timer(&sc->work_timer);
1344	return 0;
1345}
1346
1347/*
1348 * Factorization helper for the command state machine:
1349 * Submit a CSW read and go to STAT state.
1350 */
1351static void ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1352{
1353
1354	if (__ub_state_stat(sc, cmd) != 0)
1355		return;
1356
1357	cmd->stat_count = 0;
1358	cmd->state = UB_CMDST_STAT;
1359}
1360
1361/*
1362 * Factorization helper for the command state machine:
1363 * Submit a CSW read and go to STAT state with counter (along [C] path).
1364 */
1365static void ub_state_stat_counted(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1366{
1367
1368	if (++cmd->stat_count >= 4) {
1369		ub_state_sense(sc, cmd);
1370		return;
1371	}
1372
1373	if (__ub_state_stat(sc, cmd) != 0)
1374		return;
1375
1376	cmd->state = UB_CMDST_STAT;
1377}
1378
1379/*
1380 * Factorization helper for the command state machine:
1381 * Submit a REQUEST SENSE and go to SENSE state.
1382 */
1383static void ub_state_sense(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1384{
1385	struct ub_scsi_cmd *scmd;
1386	struct scatterlist *sg;
1387	int rc;
1388
1389	if (cmd->cdb[0] == REQUEST_SENSE) {
1390		rc = -EPIPE;
1391		goto error;
1392	}
1393
1394	scmd = &sc->top_rqs_cmd;
1395	memset(scmd, 0, sizeof(struct ub_scsi_cmd));
1396	scmd->cdb[0] = REQUEST_SENSE;
1397	scmd->cdb[4] = UB_SENSE_SIZE;
1398	scmd->cdb_len = 6;
1399	scmd->dir = UB_DIR_READ;
1400	scmd->state = UB_CMDST_INIT;
1401	scmd->nsg = 1;
1402	sg = &scmd->sgv[0];
1403	sg->page = virt_to_page(sc->top_sense);
1404	sg->offset = (unsigned long)sc->top_sense & (PAGE_SIZE-1);
1405	sg->length = UB_SENSE_SIZE;
1406	scmd->len = UB_SENSE_SIZE;
1407	scmd->lun = cmd->lun;
1408	scmd->done = ub_top_sense_done;
1409	scmd->back = cmd;
1410
1411	scmd->tag = sc->tagcnt++;
1412
1413	cmd->state = UB_CMDST_SENSE;
1414
1415	ub_cmdq_insert(sc, scmd);
1416	return;
1417
1418error:
1419	ub_state_done(sc, cmd, rc);
1420}
1421
1422/*
1423 * A helper for the command's state machine:
1424 * Submit a stall clear.
1425 */
1426static int ub_submit_clear_stall(struct ub_dev *sc, struct ub_scsi_cmd *cmd,
1427    int stalled_pipe)
1428{
1429	int endp;
1430	struct usb_ctrlrequest *cr;
1431	int rc;
1432
1433	endp = usb_pipeendpoint(stalled_pipe);
1434	if (usb_pipein (stalled_pipe))
1435		endp |= USB_DIR_IN;
1436
1437	cr = &sc->work_cr;
1438	cr->bRequestType = USB_RECIP_ENDPOINT;
1439	cr->bRequest = USB_REQ_CLEAR_FEATURE;
1440	cr->wValue = cpu_to_le16(USB_ENDPOINT_HALT);
1441	cr->wIndex = cpu_to_le16(endp);
1442	cr->wLength = cpu_to_le16(0);
1443
1444	UB_INIT_COMPLETION(sc->work_done);
1445
1446	usb_fill_control_urb(&sc->work_urb, sc->dev, sc->send_ctrl_pipe,
1447	    (unsigned char*) cr, NULL, 0, ub_urb_complete, sc);
1448	sc->work_urb.actual_length = 0;
1449	sc->work_urb.error_count = 0;
1450	sc->work_urb.status = 0;
1451
1452	if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) {
1453		ub_complete(&sc->work_done);
1454		return rc;
1455	}
1456
1457	sc->work_timer.expires = jiffies + UB_CTRL_TIMEOUT;
1458	add_timer(&sc->work_timer);
1459	return 0;
1460}
1461
1462/*
1463 */
1464static void ub_top_sense_done(struct ub_dev *sc, struct ub_scsi_cmd *scmd)
1465{
1466	unsigned char *sense = sc->top_sense;
1467	struct ub_scsi_cmd *cmd;
1468
1469	/*
1470	 * Find the command which triggered the unit attention or a check,
1471	 * save the sense into it, and advance its state machine.
1472	 */
1473	if ((cmd = ub_cmdq_peek(sc)) == NULL) {
1474		printk(KERN_WARNING "%s: sense done while idle\n", sc->name);
1475		return;
1476	}
1477	if (cmd != scmd->back) {
1478		printk(KERN_WARNING "%s: "
1479		    "sense done for wrong command 0x%x\n",
1480		    sc->name, cmd->tag);
1481		return;
1482	}
1483	if (cmd->state != UB_CMDST_SENSE) {
1484		printk(KERN_WARNING "%s: "
1485		    "sense done with bad cmd state %d\n",
1486		    sc->name, cmd->state);
1487		return;
1488	}
1489
1490	/*
1491	 * Ignoring scmd->act_len, because the buffer was pre-zeroed.
1492	 */
1493	cmd->key = sense[2] & 0x0F;
1494	cmd->asc = sense[12];
1495	cmd->ascq = sense[13];
1496
1497	ub_scsi_urb_compl(sc, cmd);
1498}
1499
1500
1501static void ub_reset_enter(struct ub_dev *sc, int try)
1502{
1503
1504	if (sc->reset) {
1505		/* This happens often on multi-LUN devices. */
1506		return;
1507	}
1508	sc->reset = try + 1;
1509
1510
1511
1512	schedule_work(&sc->reset_work);
1513}
1514
1515static void ub_reset_task(struct work_struct *work)
1516{
1517	struct ub_dev *sc = container_of(work, struct ub_dev, reset_work);
1518	unsigned long flags;
1519	struct list_head *p;
1520	struct ub_lun *lun;
1521	int lkr, rc;
1522
1523	if (!sc->reset) {
1524		printk(KERN_WARNING "%s: Running reset unrequested\n",
1525		    sc->name);
1526		return;
1527	}
1528
1529	if (atomic_read(&sc->poison)) {
1530		;
1531	} else if ((sc->reset & 1) == 0) {
1532		ub_sync_reset(sc);
1533		msleep(700);	/* usb-storage sleeps 6s (!) */
1534		ub_probe_clear_stall(sc, sc->recv_bulk_pipe);
1535		ub_probe_clear_stall(sc, sc->send_bulk_pipe);
1536	} else if (sc->dev->actconfig->desc.bNumInterfaces != 1) {
1537		;
1538	} else {
1539		if ((lkr = usb_lock_device_for_reset(sc->dev, sc->intf)) < 0) {
1540			printk(KERN_NOTICE
1541			    "%s: usb_lock_device_for_reset failed (%d)\n",
1542			    sc->name, lkr);
1543		} else {
1544			rc = usb_reset_device(sc->dev);
1545			if (rc < 0) {
1546				printk(KERN_NOTICE "%s: "
1547				    "usb_lock_device_for_reset failed (%d)\n",
1548				    sc->name, rc);
1549			}
1550
1551			if (lkr)
1552				usb_unlock_device(sc->dev);
1553		}
1554	}
1555
1556	/*
1557	 * In theory, no commands can be running while reset is active,
1558	 * so nobody can ask for another reset, and so we do not need any
1559	 * queues of resets or anything. We do need a spinlock though,
1560	 * to interact with block layer.
1561	 */
1562	spin_lock_irqsave(sc->lock, flags);
1563	sc->reset = 0;
1564	tasklet_schedule(&sc->tasklet);
1565	list_for_each(p, &sc->luns) {
1566		lun = list_entry(p, struct ub_lun, link);
1567		blk_start_queue(lun->disk->queue);
1568	}
1569	wake_up(&sc->reset_wait);
1570	spin_unlock_irqrestore(sc->lock, flags);
1571}
1572
1573/*
1574 * This is called from a process context.
1575 */
1576static void ub_revalidate(struct ub_dev *sc, struct ub_lun *lun)
1577{
1578
1579	lun->readonly = 0;
1580
1581	lun->capacity.nsec = 0;
1582	lun->capacity.bsize = 512;
1583	lun->capacity.bshift = 0;
1584
1585	if (ub_sync_tur(sc, lun) != 0)
1586		return;			/* Not ready */
1587	lun->changed = 0;
1588
1589	if (ub_sync_read_cap(sc, lun, &lun->capacity) != 0) {
1590		/*
1591		 * The retry here means something is wrong, either with the
1592		 * device, with the transport, or with our code.
1593		 * We keep this because sd.c has retries for capacity.
1594		 */
1595		if (ub_sync_read_cap(sc, lun, &lun->capacity) != 0) {
1596			lun->capacity.nsec = 0;
1597			lun->capacity.bsize = 512;
1598			lun->capacity.bshift = 0;
1599		}
1600	}
1601}
1602
1603/*
1604 * The open funcion.
1605 * This is mostly needed to keep refcounting, but also to support
1606 * media checks on removable media drives.
1607 */
1608static int ub_bd_open(struct inode *inode, struct file *filp)
1609{
1610	struct gendisk *disk = inode->i_bdev->bd_disk;
1611	struct ub_lun *lun = disk->private_data;
1612	struct ub_dev *sc = lun->udev;
1613	unsigned long flags;
1614	int rc;
1615
1616	spin_lock_irqsave(&ub_lock, flags);
1617	if (atomic_read(&sc->poison)) {
1618		spin_unlock_irqrestore(&ub_lock, flags);
1619		return -ENXIO;
1620	}
1621	sc->openc++;
1622	spin_unlock_irqrestore(&ub_lock, flags);
1623
1624	if (lun->removable || lun->readonly)
1625		check_disk_change(inode->i_bdev);
1626
1627	/*
1628	 * The sd.c considers ->media_present and ->changed not equivalent,
1629	 * under some pretty murky conditions (a failure of READ CAPACITY).
1630	 * We may need it one day.
1631	 */
1632	if (lun->removable && lun->changed && !(filp->f_flags & O_NDELAY)) {
1633		rc = -ENOMEDIUM;
1634		goto err_open;
1635	}
1636
1637	if (lun->readonly && (filp->f_mode & FMODE_WRITE)) {
1638		rc = -EROFS;
1639		goto err_open;
1640	}
1641
1642	return 0;
1643
1644err_open:
1645	ub_put(sc);
1646	return rc;
1647}
1648
1649/*
1650 */
1651static int ub_bd_release(struct inode *inode, struct file *filp)
1652{
1653	struct gendisk *disk = inode->i_bdev->bd_disk;
1654	struct ub_lun *lun = disk->private_data;
1655	struct ub_dev *sc = lun->udev;
1656
1657	ub_put(sc);
1658	return 0;
1659}
1660
1661/*
1662 * The ioctl interface.
1663 */
1664static int ub_bd_ioctl(struct inode *inode, struct file *filp,
1665    unsigned int cmd, unsigned long arg)
1666{
1667	struct gendisk *disk = inode->i_bdev->bd_disk;
1668	void __user *usermem = (void __user *) arg;
1669
1670	return scsi_cmd_ioctl(filp, disk, cmd, usermem);
1671}
1672
1673/*
1674 * This is called once a new disk was seen by the block layer or by ub_probe().
1675 * The main onjective here is to discover the features of the media such as
1676 * the capacity, read-only status, etc. USB storage generally does not
1677 * need to be spun up, but if we needed it, this would be the place.
1678 *
1679 * This call can sleep.
1680 *
1681 * The return code is not used.
1682 */
1683static int ub_bd_revalidate(struct gendisk *disk)
1684{
1685	struct ub_lun *lun = disk->private_data;
1686
1687	ub_revalidate(lun->udev, lun);
1688
1689	blk_queue_hardsect_size(disk->queue, lun->capacity.bsize);
1690	set_capacity(disk, lun->capacity.nsec);
1691	// set_disk_ro(sdkp->disk, lun->readonly);
1692
1693	return 0;
1694}
1695
1696/*
1697 * The check is called by the block layer to verify if the media
1698 * is still available. It is supposed to be harmless, lightweight and
1699 * non-intrusive in case the media was not changed.
1700 *
1701 * This call can sleep.
1702 *
1703 * The return code is bool!
1704 */
1705static int ub_bd_media_changed(struct gendisk *disk)
1706{
1707	struct ub_lun *lun = disk->private_data;
1708
1709	if (!lun->removable)
1710		return 0;
1711
1712	/*
1713	 * We clean checks always after every command, so this is not
1714	 * as dangerous as it looks. If the TEST_UNIT_READY fails here,
1715	 * the device is actually not ready with operator or software
1716	 * intervention required. One dangerous item might be a drive which
1717	 * spins itself down, and come the time to write dirty pages, this
1718	 * will fail, then block layer discards the data. Since we never
1719	 * spin drives up, such devices simply cannot be used with ub anyway.
1720	 */
1721	if (ub_sync_tur(lun->udev, lun) != 0) {
1722		lun->changed = 1;
1723		return 1;
1724	}
1725
1726	return lun->changed;
1727}
1728
1729static struct block_device_operations ub_bd_fops = {
1730	.owner		= THIS_MODULE,
1731	.open		= ub_bd_open,
1732	.release	= ub_bd_release,
1733	.ioctl		= ub_bd_ioctl,
1734	.media_changed	= ub_bd_media_changed,
1735	.revalidate_disk = ub_bd_revalidate,
1736};
1737
1738/*
1739 * Common ->done routine for commands executed synchronously.
1740 */
1741static void ub_probe_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1742{
1743	struct completion *cop = cmd->back;
1744	complete(cop);
1745}
1746
1747/*
1748 * Test if the device has a check condition on it, synchronously.
1749 */
1750static int ub_sync_tur(struct ub_dev *sc, struct ub_lun *lun)
1751{
1752	struct ub_scsi_cmd *cmd;
1753	enum { ALLOC_SIZE = sizeof(struct ub_scsi_cmd) };
1754	unsigned long flags;
1755	struct completion compl;
1756	int rc;
1757
1758	init_completion(&compl);
1759
1760	rc = -ENOMEM;
1761	if ((cmd = kzalloc(ALLOC_SIZE, GFP_KERNEL)) == NULL)
1762		goto err_alloc;
1763
1764	cmd->cdb[0] = TEST_UNIT_READY;
1765	cmd->cdb_len = 6;
1766	cmd->dir = UB_DIR_NONE;
1767	cmd->state = UB_CMDST_INIT;
1768	cmd->lun = lun;			/* This may be NULL, but that's ok */
1769	cmd->done = ub_probe_done;
1770	cmd->back = &compl;
1771
1772	spin_lock_irqsave(sc->lock, flags);
1773	cmd->tag = sc->tagcnt++;
1774
1775	rc = ub_submit_scsi(sc, cmd);
1776	spin_unlock_irqrestore(sc->lock, flags);
1777
1778	if (rc != 0)
1779		goto err_submit;
1780
1781	wait_for_completion(&compl);
1782
1783	rc = cmd->error;
1784
1785	if (rc == -EIO && cmd->key != 0)	/* Retries for benh's key */
1786		rc = cmd->key;
1787
1788err_submit:
1789	kfree(cmd);
1790err_alloc:
1791	return rc;
1792}
1793
1794/*
1795 * Read the SCSI capacity synchronously (for probing).
1796 */
1797static int ub_sync_read_cap(struct ub_dev *sc, struct ub_lun *lun,
1798    struct ub_capacity *ret)
1799{
1800	struct ub_scsi_cmd *cmd;
1801	struct scatterlist *sg;
1802	char *p;
1803	enum { ALLOC_SIZE = sizeof(struct ub_scsi_cmd) + 8 };
1804	unsigned long flags;
1805	unsigned int bsize, shift;
1806	unsigned long nsec;
1807	struct completion compl;
1808	int rc;
1809
1810	init_completion(&compl);
1811
1812	rc = -ENOMEM;
1813	if ((cmd = kzalloc(ALLOC_SIZE, GFP_KERNEL)) == NULL)
1814		goto err_alloc;
1815	p = (char *)cmd + sizeof(struct ub_scsi_cmd);
1816
1817	cmd->cdb[0] = 0x25;
1818	cmd->cdb_len = 10;
1819	cmd->dir = UB_DIR_READ;
1820	cmd->state = UB_CMDST_INIT;
1821	cmd->nsg = 1;
1822	sg = &cmd->sgv[0];
1823	sg->page = virt_to_page(p);
1824	sg->offset = (unsigned long)p & (PAGE_SIZE-1);
1825	sg->length = 8;
1826	cmd->len = 8;
1827	cmd->lun = lun;
1828	cmd->done = ub_probe_done;
1829	cmd->back = &compl;
1830
1831	spin_lock_irqsave(sc->lock, flags);
1832	cmd->tag = sc->tagcnt++;
1833
1834	rc = ub_submit_scsi(sc, cmd);
1835	spin_unlock_irqrestore(sc->lock, flags);
1836
1837	if (rc != 0)
1838		goto err_submit;
1839
1840	wait_for_completion(&compl);
1841
1842	if (cmd->error != 0) {
1843		rc = -EIO;
1844		goto err_read;
1845	}
1846	if (cmd->act_len != 8) {
1847		rc = -EIO;
1848		goto err_read;
1849	}
1850
1851	/* sd.c special-cases sector size of 0 to mean 512. Needed? Safe? */
1852	nsec = be32_to_cpu(*(__be32 *)p) + 1;
1853	bsize = be32_to_cpu(*(__be32 *)(p + 4));
1854	switch (bsize) {
1855	case 512:	shift = 0;	break;
1856	case 1024:	shift = 1;	break;
1857	case 2048:	shift = 2;	break;
1858	case 4096:	shift = 3;	break;
1859	default:
1860		rc = -EDOM;
1861		goto err_inv_bsize;
1862	}
1863
1864	ret->bsize = bsize;
1865	ret->bshift = shift;
1866	ret->nsec = nsec << shift;
1867	rc = 0;
1868
1869err_inv_bsize:
1870err_read:
1871err_submit:
1872	kfree(cmd);
1873err_alloc:
1874	return rc;
1875}
1876
1877/*
1878 */
1879static void ub_probe_urb_complete(struct urb *urb)
1880{
1881	struct completion *cop = urb->context;
1882	complete(cop);
1883}
1884
1885static void ub_probe_timeout(unsigned long arg)
1886{
1887	struct completion *cop = (struct completion *) arg;
1888	complete(cop);
1889}
1890
1891/*
1892 * Reset with a Bulk reset.
1893 */
1894static int ub_sync_reset(struct ub_dev *sc)
1895{
1896	int ifnum = sc->intf->cur_altsetting->desc.bInterfaceNumber;
1897	struct usb_ctrlrequest *cr;
1898	struct completion compl;
1899	struct timer_list timer;
1900	int rc;
1901
1902	init_completion(&compl);
1903
1904	cr = &sc->work_cr;
1905	cr->bRequestType = USB_TYPE_CLASS | USB_RECIP_INTERFACE;
1906	cr->bRequest = US_BULK_RESET_REQUEST;
1907	cr->wValue = cpu_to_le16(0);
1908	cr->wIndex = cpu_to_le16(ifnum);
1909	cr->wLength = cpu_to_le16(0);
1910
1911	usb_fill_control_urb(&sc->work_urb, sc->dev, sc->send_ctrl_pipe,
1912	    (unsigned char*) cr, NULL, 0, ub_probe_urb_complete, &compl);
1913	sc->work_urb.actual_length = 0;
1914	sc->work_urb.error_count = 0;
1915	sc->work_urb.status = 0;
1916
1917	if ((rc = usb_submit_urb(&sc->work_urb, GFP_KERNEL)) != 0) {
1918		printk(KERN_WARNING
1919		     "%s: Unable to submit a bulk reset (%d)\n", sc->name, rc);
1920		return rc;
1921	}
1922
1923	init_timer(&timer);
1924	timer.function = ub_probe_timeout;
1925	timer.data = (unsigned long) &compl;
1926	timer.expires = jiffies + UB_CTRL_TIMEOUT;
1927	add_timer(&timer);
1928
1929	wait_for_completion(&compl);
1930
1931	del_timer_sync(&timer);
1932	usb_kill_urb(&sc->work_urb);
1933
1934	return sc->work_urb.status;
1935}
1936
1937/*
1938 * Get number of LUNs by the way of Bulk GetMaxLUN command.
1939 */
1940static int ub_sync_getmaxlun(struct ub_dev *sc)
1941{
1942	int ifnum = sc->intf->cur_altsetting->desc.bInterfaceNumber;
1943	unsigned char *p;
1944	enum { ALLOC_SIZE = 1 };
1945	struct usb_ctrlrequest *cr;
1946	struct completion compl;
1947	struct timer_list timer;
1948	int nluns;
1949	int rc;
1950
1951	init_completion(&compl);
1952
1953	rc = -ENOMEM;
1954	if ((p = kmalloc(ALLOC_SIZE, GFP_KERNEL)) == NULL)
1955		goto err_alloc;
1956	*p = 55;
1957
1958	cr = &sc->work_cr;
1959	cr->bRequestType = USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE;
1960	cr->bRequest = US_BULK_GET_MAX_LUN;
1961	cr->wValue = cpu_to_le16(0);
1962	cr->wIndex = cpu_to_le16(ifnum);
1963	cr->wLength = cpu_to_le16(1);
1964
1965	usb_fill_control_urb(&sc->work_urb, sc->dev, sc->recv_ctrl_pipe,
1966	    (unsigned char*) cr, p, 1, ub_probe_urb_complete, &compl);
1967	sc->work_urb.actual_length = 0;
1968	sc->work_urb.error_count = 0;
1969	sc->work_urb.status = 0;
1970
1971	if ((rc = usb_submit_urb(&sc->work_urb, GFP_KERNEL)) != 0)
1972		goto err_submit;
1973
1974	init_timer(&timer);
1975	timer.function = ub_probe_timeout;
1976	timer.data = (unsigned long) &compl;
1977	timer.expires = jiffies + UB_CTRL_TIMEOUT;
1978	add_timer(&timer);
1979
1980	wait_for_completion(&compl);
1981
1982	del_timer_sync(&timer);
1983	usb_kill_urb(&sc->work_urb);
1984
1985	if ((rc = sc->work_urb.status) < 0)
1986		goto err_io;
1987
1988	if (sc->work_urb.actual_length != 1) {
1989		nluns = 0;
1990	} else {
1991		if ((nluns = *p) == 55) {
1992			nluns = 0;
1993		} else {
1994  			/* GetMaxLUN returns the maximum LUN number */
1995			nluns += 1;
1996			if (nluns > UB_MAX_LUNS)
1997				nluns = UB_MAX_LUNS;
1998		}
1999	}
2000
2001	kfree(p);
2002	return nluns;
2003
2004err_io:
2005err_submit:
2006	kfree(p);
2007err_alloc:
2008	return rc;
2009}
2010
2011/*
2012 * Clear initial stalls.
2013 */
2014static int ub_probe_clear_stall(struct ub_dev *sc, int stalled_pipe)
2015{
2016	int endp;
2017	struct usb_ctrlrequest *cr;
2018	struct completion compl;
2019	struct timer_list timer;
2020	int rc;
2021
2022	init_completion(&compl);
2023
2024	endp = usb_pipeendpoint(stalled_pipe);
2025	if (usb_pipein (stalled_pipe))
2026		endp |= USB_DIR_IN;
2027
2028	cr = &sc->work_cr;
2029	cr->bRequestType = USB_RECIP_ENDPOINT;
2030	cr->bRequest = USB_REQ_CLEAR_FEATURE;
2031	cr->wValue = cpu_to_le16(USB_ENDPOINT_HALT);
2032	cr->wIndex = cpu_to_le16(endp);
2033	cr->wLength = cpu_to_le16(0);
2034
2035	usb_fill_control_urb(&sc->work_urb, sc->dev, sc->send_ctrl_pipe,
2036	    (unsigned char*) cr, NULL, 0, ub_probe_urb_complete, &compl);
2037	sc->work_urb.actual_length = 0;
2038	sc->work_urb.error_count = 0;
2039	sc->work_urb.status = 0;
2040
2041	if ((rc = usb_submit_urb(&sc->work_urb, GFP_KERNEL)) != 0) {
2042		printk(KERN_WARNING
2043		     "%s: Unable to submit a probe clear (%d)\n", sc->name, rc);
2044		return rc;
2045	}
2046
2047	init_timer(&timer);
2048	timer.function = ub_probe_timeout;
2049	timer.data = (unsigned long) &compl;
2050	timer.expires = jiffies + UB_CTRL_TIMEOUT;
2051	add_timer(&timer);
2052
2053	wait_for_completion(&compl);
2054
2055	del_timer_sync(&timer);
2056	usb_kill_urb(&sc->work_urb);
2057
2058	/* reset the endpoint toggle */
2059	usb_settoggle(sc->dev, endp, usb_pipeout(sc->last_pipe), 0);
2060
2061	return 0;
2062}
2063
2064/*
2065 * Get the pipe settings.
2066 */
2067static int ub_get_pipes(struct ub_dev *sc, struct usb_device *dev,
2068    struct usb_interface *intf)
2069{
2070	struct usb_host_interface *altsetting = intf->cur_altsetting;
2071	struct usb_endpoint_descriptor *ep_in = NULL;
2072	struct usb_endpoint_descriptor *ep_out = NULL;
2073	struct usb_endpoint_descriptor *ep;
2074	int i;
2075
2076	/*
2077	 * Find the endpoints we need.
2078	 * We are expecting a minimum of 2 endpoints - in and out (bulk).
2079	 * We will ignore any others.
2080	 */
2081	for (i = 0; i < altsetting->desc.bNumEndpoints; i++) {
2082		ep = &altsetting->endpoint[i].desc;
2083
2084		/* Is it a BULK endpoint? */
2085		if ((ep->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
2086				== USB_ENDPOINT_XFER_BULK) {
2087			/* BULK in or out? */
2088			if (ep->bEndpointAddress & USB_DIR_IN) {
2089				if (ep_in == NULL)
2090					ep_in = ep;
2091			} else {
2092				if (ep_out == NULL)
2093					ep_out = ep;
2094			}
2095		}
2096	}
2097
2098	if (ep_in == NULL || ep_out == NULL) {
2099		printk(KERN_NOTICE "%s: failed endpoint check\n",
2100		    sc->name);
2101		return -ENODEV;
2102	}
2103
2104	/* Calculate and store the pipe values */
2105	sc->send_ctrl_pipe = usb_sndctrlpipe(dev, 0);
2106	sc->recv_ctrl_pipe = usb_rcvctrlpipe(dev, 0);
2107	sc->send_bulk_pipe = usb_sndbulkpipe(dev,
2108		ep_out->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
2109	sc->recv_bulk_pipe = usb_rcvbulkpipe(dev,
2110		ep_in->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
2111
2112	return 0;
2113}
2114
2115/*
2116 * Probing is done in the process context, which allows us to cheat
2117 * and not to build a state machine for the discovery.
2118 */
2119static int ub_probe(struct usb_interface *intf,
2120    const struct usb_device_id *dev_id)
2121{
2122	struct ub_dev *sc;
2123	int nluns;
2124	int rc;
2125	int i;
2126
2127	if (usb_usual_check_type(dev_id, USB_US_TYPE_UB))
2128		return -ENXIO;
2129
2130	rc = -ENOMEM;
2131	if ((sc = kzalloc(sizeof(struct ub_dev), GFP_KERNEL)) == NULL)
2132		goto err_core;
2133	sc->lock = ub_next_lock();
2134	INIT_LIST_HEAD(&sc->luns);
2135	usb_init_urb(&sc->work_urb);
2136	tasklet_init(&sc->tasklet, ub_scsi_action, (unsigned long)sc);
2137	atomic_set(&sc->poison, 0);
2138	INIT_WORK(&sc->reset_work, ub_reset_task);
2139	init_waitqueue_head(&sc->reset_wait);
2140
2141	init_timer(&sc->work_timer);
2142	sc->work_timer.data = (unsigned long) sc;
2143	sc->work_timer.function = ub_urb_timeout;
2144
2145	ub_init_completion(&sc->work_done);
2146	sc->work_done.done = 1;		/* A little yuk, but oh well... */
2147
2148	sc->dev = interface_to_usbdev(intf);
2149	sc->intf = intf;
2150	// sc->ifnum = intf->cur_altsetting->desc.bInterfaceNumber;
2151	usb_set_intfdata(intf, sc);
2152	usb_get_dev(sc->dev);
2153	/*
2154	 * Since we give the interface struct to the block level through
2155	 * disk->driverfs_dev, we have to pin it. Otherwise, block_uevent
2156	 * oopses on close after a disconnect (kernels 2.6.16 and up).
2157	 */
2158	usb_get_intf(sc->intf);
2159
2160	snprintf(sc->name, 12, DRV_NAME "(%d.%d)",
2161	    sc->dev->bus->busnum, sc->dev->devnum);
2162
2163
2164	if (ub_get_pipes(sc, sc->dev, intf) != 0)
2165		goto err_dev_desc;
2166
2167	/*
2168	 * At this point, all USB initialization is done, do upper layer.
2169	 * We really hate halfway initialized structures, so from the
2170	 * invariants perspective, this ub_dev is fully constructed at
2171	 * this point.
2172	 */
2173
2174	/*
2175	 * This is needed to clear toggles. It is a problem only if we do
2176	 * `rmmod ub && modprobe ub` without disconnects, but we like that.
2177	 */
2178
2179	/*
2180	 * The way this is used by the startup code is a little specific.
2181	 * A SCSI check causes a USB stall. Our common case code sees it
2182	 * and clears the check, after which the device is ready for use.
2183	 * But if a check was not present, any command other than
2184	 * TEST_UNIT_READY ends with a lockup (including REQUEST_SENSE).
2185	 *
2186	 * If we neglect to clear the SCSI check, the first real command fails
2187	 * (which is the capacity readout). We clear that and retry, but why
2188	 * causing spurious retries for no reason.
2189	 *
2190	 * Revalidation may start with its own TEST_UNIT_READY, but that one
2191	 * has to succeed, so we clear checks with an additional one here.
2192	 * In any case it's not our business how revaliadation is implemented.
2193	 */
2194	for (i = 0; i < 3; i++) {  /* Retries for the schwag key from KS'04 */
2195		if ((rc = ub_sync_tur(sc, NULL)) <= 0) break;
2196		if (rc != 0x6) break;
2197		msleep(10);
2198	}
2199
2200	nluns = 1;
2201	for (i = 0; i < 3; i++) {
2202		if ((rc = ub_sync_getmaxlun(sc)) < 0)
2203			break;
2204		if (rc != 0) {
2205			nluns = rc;
2206			break;
2207		}
2208		msleep(100);
2209	}
2210
2211	for (i = 0; i < nluns; i++) {
2212		ub_probe_lun(sc, i);
2213	}
2214	return 0;
2215
2216err_dev_desc:
2217	usb_set_intfdata(intf, NULL);
2218	usb_put_intf(sc->intf);
2219	usb_put_dev(sc->dev);
2220	kfree(sc);
2221err_core:
2222	return rc;
2223}
2224
2225static int ub_probe_lun(struct ub_dev *sc, int lnum)
2226{
2227	struct ub_lun *lun;
2228	request_queue_t *q;
2229	struct gendisk *disk;
2230	int rc;
2231
2232	rc = -ENOMEM;
2233	if ((lun = kzalloc(sizeof(struct ub_lun), GFP_KERNEL)) == NULL)
2234		goto err_alloc;
2235	lun->num = lnum;
2236
2237	rc = -ENOSR;
2238	if ((lun->id = ub_id_get()) == -1)
2239		goto err_id;
2240
2241	lun->udev = sc;
2242
2243	snprintf(lun->name, 16, DRV_NAME "%c(%d.%d.%d)",
2244	    lun->id + 'a', sc->dev->bus->busnum, sc->dev->devnum, lun->num);
2245
2246	lun->removable = 1;
2247	lun->changed = 1;		/* ub_revalidate clears only */
2248	ub_revalidate(sc, lun);
2249
2250	rc = -ENOMEM;
2251	if ((disk = alloc_disk(UB_PARTS_PER_LUN)) == NULL)
2252		goto err_diskalloc;
2253
2254	sprintf(disk->disk_name, DRV_NAME "%c", lun->id + 'a');
2255	disk->major = UB_MAJOR;
2256	disk->first_minor = lun->id * UB_PARTS_PER_LUN;
2257	disk->fops = &ub_bd_fops;
2258	disk->private_data = lun;
2259	disk->driverfs_dev = &sc->intf->dev;
2260
2261	rc = -ENOMEM;
2262	if ((q = blk_init_queue(ub_request_fn, sc->lock)) == NULL)
2263		goto err_blkqinit;
2264
2265	disk->queue = q;
2266
2267	blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
2268	blk_queue_max_hw_segments(q, UB_MAX_REQ_SG);
2269	blk_queue_max_phys_segments(q, UB_MAX_REQ_SG);
2270	blk_queue_segment_boundary(q, 0xffffffff);	/* Dubious. */
2271	blk_queue_max_sectors(q, UB_MAX_SECTORS);
2272	blk_queue_hardsect_size(q, lun->capacity.bsize);
2273
2274	lun->disk = disk;
2275	q->queuedata = lun;
2276	list_add(&lun->link, &sc->luns);
2277
2278	set_capacity(disk, lun->capacity.nsec);
2279	if (lun->removable)
2280		disk->flags |= GENHD_FL_REMOVABLE;
2281
2282	add_disk(disk);
2283
2284	return 0;
2285
2286err_blkqinit:
2287	put_disk(disk);
2288err_diskalloc:
2289	ub_id_put(lun->id);
2290err_id:
2291	kfree(lun);
2292err_alloc:
2293	return rc;
2294}
2295
2296static void ub_disconnect(struct usb_interface *intf)
2297{
2298	struct ub_dev *sc = usb_get_intfdata(intf);
2299	struct list_head *p;
2300	struct ub_lun *lun;
2301	unsigned long flags;
2302
2303	spin_lock_irqsave(&ub_lock, flags);
2304	sc->openc++;
2305	spin_unlock_irqrestore(&ub_lock, flags);
2306
2307	/*
2308	 * Fence stall clearnings, operations triggered by unlinkings and so on.
2309	 * We do not attempt to unlink any URBs, because we do not trust the
2310	 * unlink paths in HC drivers. Also, we get -84 upon disconnect anyway.
2311	 */
2312	atomic_set(&sc->poison, 1);
2313
2314	/*
2315	 * Wait for reset to end, if any.
2316	 */
2317	wait_event(sc->reset_wait, !sc->reset);
2318
2319	/*
2320	 * Blow away queued commands.
2321	 *
2322	 * Actually, this never works, because before we get here
2323	 * the HCD terminates outstanding URB(s). It causes our
2324	 * SCSI command queue to advance, commands fail to submit,
2325	 * and the whole queue drains. So, we just use this code to
2326	 * print warnings.
2327	 */
2328	spin_lock_irqsave(sc->lock, flags);
2329	{
2330		struct ub_scsi_cmd *cmd;
2331		int cnt = 0;
2332		while ((cmd = ub_cmdq_peek(sc)) != NULL) {
2333			cmd->error = -ENOTCONN;
2334			cmd->state = UB_CMDST_DONE;
2335			ub_cmdq_pop(sc);
2336			(*cmd->done)(sc, cmd);
2337			cnt++;
2338		}
2339		if (cnt != 0) {
2340			printk(KERN_WARNING "%s: "
2341			    "%d was queued after shutdown\n", sc->name, cnt);
2342		}
2343	}
2344	spin_unlock_irqrestore(sc->lock, flags);
2345
2346	/*
2347	 * Unregister the upper layer.
2348	 */
2349	list_for_each (p, &sc->luns) {
2350		lun = list_entry(p, struct ub_lun, link);
2351		del_gendisk(lun->disk);
2352		/*
2353		 * I wish I could do:
2354		 *    set_bit(QUEUE_FLAG_DEAD, &q->queue_flags);
2355		 * As it is, we rely on our internal poisoning and let
2356		 * the upper levels to spin furiously failing all the I/O.
2357		 */
2358	}
2359
2360	/*
2361	 * Testing for -EINPROGRESS is always a bug, so we are bending
2362	 * the rules a little.
2363	 */
2364	spin_lock_irqsave(sc->lock, flags);
2365	if (sc->work_urb.status == -EINPROGRESS) {	/* janitors: ignore */
2366		printk(KERN_WARNING "%s: "
2367		    "URB is active after disconnect\n", sc->name);
2368	}
2369	spin_unlock_irqrestore(sc->lock, flags);
2370
2371	/*
2372	 * There is virtually no chance that other CPU runs times so long
2373	 * after ub_urb_complete should have called del_timer, but only if HCD
2374	 * didn't forget to deliver a callback on unlink.
2375	 */
2376	del_timer_sync(&sc->work_timer);
2377
2378	/*
2379	 * At this point there must be no commands coming from anyone
2380	 * and no URBs left in transit.
2381	 */
2382
2383	ub_put(sc);
2384}
2385
2386static struct usb_driver ub_driver = {
2387	.name =		"ub",
2388	.probe =	ub_probe,
2389	.disconnect =	ub_disconnect,
2390	.id_table =	ub_usb_ids,
2391};
2392
2393static int __init ub_init(void)
2394{
2395	int rc;
2396	int i;
2397
2398	for (i = 0; i < UB_QLOCK_NUM; i++)
2399		spin_lock_init(&ub_qlockv[i]);
2400
2401	if ((rc = register_blkdev(UB_MAJOR, DRV_NAME)) != 0)
2402		goto err_regblkdev;
2403
2404	if ((rc = usb_register(&ub_driver)) != 0)
2405		goto err_register;
2406
2407	usb_usual_set_present(USB_US_TYPE_UB);
2408	return 0;
2409
2410err_register:
2411	unregister_blkdev(UB_MAJOR, DRV_NAME);
2412err_regblkdev:
2413	return rc;
2414}
2415
2416static void __exit ub_exit(void)
2417{
2418	usb_deregister(&ub_driver);
2419
2420	unregister_blkdev(UB_MAJOR, DRV_NAME);
2421	usb_usual_clear_present(USB_US_TYPE_UB);
2422}
2423
2424module_init(ub_init);
2425module_exit(ub_exit);
2426
2427MODULE_LICENSE("GPL");
2428