• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6/drivers/block/
1
2#include <linux/kernel.h>
3#include <linux/module.h>
4#include <linux/usb.h>
5#include <linux/usb_usual.h>
6#include <linux/blkdev.h>
7#include <linux/timer.h>
8#include <linux/scatterlist.h>
9#include <linux/slab.h>
10#include <linux/smp_lock.h>
11#include <scsi/scsi.h>
12
13#define DRV_NAME "ub"
14
15#define UB_MAJOR 180
16
17/*
18 * The command state machine is the key model for understanding of this driver.
19 *
20 * The general rule is that all transitions are done towards the bottom
21 * of the diagram, thus preventing any loops.
22 *
23 * An exception to that is how the STAT state is handled. A counter allows it
24 * to be re-entered along the path marked with [C].
25 *
26 *       +--------+
27 *       ! INIT   !
28 *       +--------+
29 *           !
30 *        ub_scsi_cmd_start fails ->--------------------------------------\
31 *           !                                                            !
32 *           V                                                            !
33 *       +--------+                                                       !
34 *       ! CMD    !                                                       !
35 *       +--------+                                                       !
36 *           !                                            +--------+      !
37 *         was -EPIPE -->-------------------------------->! CLEAR  !      !
38 *           !                                            +--------+      !
39 *           !                                                !           !
40 *         was error -->------------------------------------- ! --------->\
41 *           !                                                !           !
42 *  /--<-- cmd->dir == NONE ?                                 !           !
43 *  !        !                                                !           !
44 *  !        V                                                !           !
45 *  !    +--------+                                           !           !
46 *  !    ! DATA   !                                           !           !
47 *  !    +--------+                                           !           !
48 *  !        !                           +---------+          !           !
49 *  !      was -EPIPE -->--------------->! CLR2STS !          !           !
50 *  !        !                           +---------+          !           !
51 *  !        !                                !               !           !
52 *  !        !                              was error -->---- ! --------->\
53 *  !      was error -->--------------------- ! ------------- ! --------->\
54 *  !        !                                !               !           !
55 *  !        V                                !               !           !
56 *  \--->+--------+                           !               !           !
57 *       ! STAT   !<--------------------------/               !           !
58 *  /--->+--------+                                           !           !
59 *  !        !                                                !           !
60 * [C]     was -EPIPE -->-----------\                         !           !
61 *  !        !                      !                         !           !
62 *  +<---- len == 0                 !                         !           !
63 *  !        !                      !                         !           !
64 *  !      was error -->--------------------------------------!---------->\
65 *  !        !                      !                         !           !
66 *  +<---- bad CSW                  !                         !           !
67 *  +<---- bad tag                  !                         !           !
68 *  !        !                      V                         !           !
69 *  !        !                 +--------+                     !           !
70 *  !        !                 ! CLRRS  !                     !           !
71 *  !        !                 +--------+                     !           !
72 *  !        !                      !                         !           !
73 *  \------- ! --------------------[C]--------\               !           !
74 *           !                                !               !           !
75 *         cmd->error---\                +--------+           !           !
76 *           !          +--------------->! SENSE  !<----------/           !
77 *         STAT_FAIL----/                +--------+                       !
78 *           !                                !                           V
79 *           !                                V                      +--------+
80 *           \--------------------------------\--------------------->! DONE   !
81 *                                                                   +--------+
82 */
83
84/*
85 * This many LUNs per USB device.
86 * Every one of them takes a host, see UB_MAX_HOSTS.
87 */
88#define UB_MAX_LUNS   9
89
90/*
91 */
92
93#define UB_PARTS_PER_LUN      8
94
95#define UB_MAX_CDB_SIZE      16		/* Corresponds to Bulk */
96
97#define UB_SENSE_SIZE  18
98
99/*
100 */
101
102/* command block wrapper */
103struct bulk_cb_wrap {
104	__le32	Signature;		/* contains 'USBC' */
105	u32	Tag;			/* unique per command id */
106	__le32	DataTransferLength;	/* size of data */
107	u8	Flags;			/* direction in bit 0 */
108	u8	Lun;			/* LUN */
109	u8	Length;			/* of of the CDB */
110	u8	CDB[UB_MAX_CDB_SIZE];	/* max command */
111};
112
113#define US_BULK_CB_WRAP_LEN	31
114#define US_BULK_CB_SIGN		0x43425355	/*spells out USBC */
115#define US_BULK_FLAG_IN		1
116#define US_BULK_FLAG_OUT	0
117
118/* command status wrapper */
119struct bulk_cs_wrap {
120	__le32	Signature;		/* should = 'USBS' */
121	u32	Tag;			/* same as original command */
122	__le32	Residue;		/* amount not transferred */
123	u8	Status;			/* see below */
124};
125
126#define US_BULK_CS_WRAP_LEN	13
127#define US_BULK_CS_SIGN		0x53425355	/* spells out 'USBS' */
128#define US_BULK_STAT_OK		0
129#define US_BULK_STAT_FAIL	1
130#define US_BULK_STAT_PHASE	2
131
132/* bulk-only class specific requests */
133#define US_BULK_RESET_REQUEST	0xff
134#define US_BULK_GET_MAX_LUN	0xfe
135
136/*
137 */
138struct ub_dev;
139
140#define UB_MAX_REQ_SG	9	/* cdrecord requires 32KB and maybe a header */
141#define UB_MAX_SECTORS 64
142
143/*
144 * A second is more than enough for a 32K transfer (UB_MAX_SECTORS)
145 * even if a webcam hogs the bus, but some devices need time to spin up.
146 */
147#define UB_URB_TIMEOUT	(HZ*2)
148#define UB_DATA_TIMEOUT	(HZ*5)	/* ZIP does spin-ups in the data phase */
149#define UB_STAT_TIMEOUT	(HZ*5)	/* Same spinups and eject for a dataless cmd. */
150#define UB_CTRL_TIMEOUT	(HZ/2)	/* 500ms ought to be enough to clear a stall */
151
152/*
153 * An instance of a SCSI command in transit.
154 */
155#define UB_DIR_NONE	0
156#define UB_DIR_READ	1
157#define UB_DIR_ILLEGAL2	2
158#define UB_DIR_WRITE	3
159
160#define UB_DIR_CHAR(c)  (((c)==UB_DIR_WRITE)? 'w': \
161			 (((c)==UB_DIR_READ)? 'r': 'n'))
162
163enum ub_scsi_cmd_state {
164	UB_CMDST_INIT,			/* Initial state */
165	UB_CMDST_CMD,			/* Command submitted */
166	UB_CMDST_DATA,			/* Data phase */
167	UB_CMDST_CLR2STS,		/* Clearing before requesting status */
168	UB_CMDST_STAT,			/* Status phase */
169	UB_CMDST_CLEAR,			/* Clearing a stall (halt, actually) */
170	UB_CMDST_CLRRS,			/* Clearing before retrying status */
171	UB_CMDST_SENSE,			/* Sending Request Sense */
172	UB_CMDST_DONE			/* Final state */
173};
174
175struct ub_scsi_cmd {
176	unsigned char cdb[UB_MAX_CDB_SIZE];
177	unsigned char cdb_len;
178
179	unsigned char dir;		/* 0 - none, 1 - read, 3 - write. */
180	enum ub_scsi_cmd_state state;
181	unsigned int tag;
182	struct ub_scsi_cmd *next;
183
184	int error;			/* Return code - valid upon done */
185	unsigned int act_len;		/* Return size */
186	unsigned char key, asc, ascq;	/* May be valid if error==-EIO */
187
188	int stat_count;			/* Retries getting status. */
189	unsigned int timeo;		/* jiffies until rq->timeout changes */
190
191	unsigned int len;		/* Requested length */
192	unsigned int current_sg;
193	unsigned int nsg;		/* sgv[nsg] */
194	struct scatterlist sgv[UB_MAX_REQ_SG];
195
196	struct ub_lun *lun;
197	void (*done)(struct ub_dev *, struct ub_scsi_cmd *);
198	void *back;
199};
200
201struct ub_request {
202	struct request *rq;
203	unsigned int current_try;
204	unsigned int nsg;		/* sgv[nsg] */
205	struct scatterlist sgv[UB_MAX_REQ_SG];
206};
207
208/*
209 */
210struct ub_capacity {
211	unsigned long nsec;		/* Linux size - 512 byte sectors */
212	unsigned int bsize;		/* Linux hardsect_size */
213	unsigned int bshift;		/* Shift between 512 and hard sects */
214};
215
216/*
217 * This is a direct take-off from linux/include/completion.h
218 * The difference is that I do not wait on this thing, just poll.
219 * When I want to wait (ub_probe), I just use the stock completion.
220 *
221 * Note that INIT_COMPLETION takes no lock. It is correct. But why
222 * in the bloody hell that thing takes struct instead of pointer to struct
223 * is quite beyond me. I just copied it from the stock completion.
224 */
225struct ub_completion {
226	unsigned int done;
227	spinlock_t lock;
228};
229
230static inline void ub_init_completion(struct ub_completion *x)
231{
232	x->done = 0;
233	spin_lock_init(&x->lock);
234}
235
236#define UB_INIT_COMPLETION(x)	((x).done = 0)
237
238static void ub_complete(struct ub_completion *x)
239{
240	unsigned long flags;
241
242	spin_lock_irqsave(&x->lock, flags);
243	x->done++;
244	spin_unlock_irqrestore(&x->lock, flags);
245}
246
247static int ub_is_completed(struct ub_completion *x)
248{
249	unsigned long flags;
250	int ret;
251
252	spin_lock_irqsave(&x->lock, flags);
253	ret = x->done;
254	spin_unlock_irqrestore(&x->lock, flags);
255	return ret;
256}
257
258/*
259 */
260struct ub_scsi_cmd_queue {
261	int qlen, qmax;
262	struct ub_scsi_cmd *head, *tail;
263};
264
265/*
266 * The block device instance (one per LUN).
267 */
268struct ub_lun {
269	struct ub_dev *udev;
270	struct list_head link;
271	struct gendisk *disk;
272	int id;				/* Host index */
273	int num;			/* LUN number */
274	char name[16];
275
276	int changed;			/* Media was changed */
277	int removable;
278	int readonly;
279
280	struct ub_request urq;
281
282	/* Use Ingo's mempool if or when we have more than one command. */
283	/*
284	 * Currently we never need more than one command for the whole device.
285	 * However, giving every LUN a command is a cheap and automatic way
286	 * to enforce fairness between them.
287	 */
288	int cmda[1];
289	struct ub_scsi_cmd cmdv[1];
290
291	struct ub_capacity capacity;
292};
293
294/*
295 * The USB device instance.
296 */
297struct ub_dev {
298	spinlock_t *lock;
299	atomic_t poison;		/* The USB device is disconnected */
300	int openc;			/* protected by ub_lock! */
301					/* kref is too implicit for our taste */
302	int reset;			/* Reset is running */
303	int bad_resid;
304	unsigned int tagcnt;
305	char name[12];
306	struct usb_device *dev;
307	struct usb_interface *intf;
308
309	struct list_head luns;
310
311	unsigned int send_bulk_pipe;	/* cached pipe values */
312	unsigned int recv_bulk_pipe;
313	unsigned int send_ctrl_pipe;
314	unsigned int recv_ctrl_pipe;
315
316	struct tasklet_struct tasklet;
317
318	struct ub_scsi_cmd_queue cmd_queue;
319	struct ub_scsi_cmd top_rqs_cmd;	/* REQUEST SENSE */
320	unsigned char top_sense[UB_SENSE_SIZE];
321
322	struct ub_completion work_done;
323	struct urb work_urb;
324	struct timer_list work_timer;
325	int last_pipe;			/* What might need clearing */
326	__le32 signature;		/* Learned signature */
327	struct bulk_cb_wrap work_bcb;
328	struct bulk_cs_wrap work_bcs;
329	struct usb_ctrlrequest work_cr;
330
331	struct work_struct reset_work;
332	wait_queue_head_t reset_wait;
333};
334
335/*
336 */
337static void ub_cleanup(struct ub_dev *sc);
338static int ub_request_fn_1(struct ub_lun *lun, struct request *rq);
339static void ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun,
340    struct ub_scsi_cmd *cmd, struct ub_request *urq);
341static void ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun,
342    struct ub_scsi_cmd *cmd, struct ub_request *urq);
343static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
344static void ub_end_rq(struct request *rq, unsigned int status);
345static int ub_rw_cmd_retry(struct ub_dev *sc, struct ub_lun *lun,
346    struct ub_request *urq, struct ub_scsi_cmd *cmd);
347static int ub_submit_scsi(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
348static void ub_urb_complete(struct urb *urb);
349static void ub_scsi_action(unsigned long _dev);
350static void ub_scsi_dispatch(struct ub_dev *sc);
351static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
352static void ub_data_start(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
353static void ub_state_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd, int rc);
354static int __ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
355static void ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
356static void ub_state_stat_counted(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
357static void ub_state_sense(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
358static int ub_submit_clear_stall(struct ub_dev *sc, struct ub_scsi_cmd *cmd,
359    int stalled_pipe);
360static void ub_top_sense_done(struct ub_dev *sc, struct ub_scsi_cmd *scmd);
361static void ub_reset_enter(struct ub_dev *sc, int try);
362static void ub_reset_task(struct work_struct *work);
363static int ub_sync_tur(struct ub_dev *sc, struct ub_lun *lun);
364static int ub_sync_read_cap(struct ub_dev *sc, struct ub_lun *lun,
365    struct ub_capacity *ret);
366static int ub_sync_reset(struct ub_dev *sc);
367static int ub_probe_clear_stall(struct ub_dev *sc, int stalled_pipe);
368static int ub_probe_lun(struct ub_dev *sc, int lnum);
369
370/*
371 */
372#ifdef CONFIG_USB_LIBUSUAL
373
374#define ub_usb_ids  usb_storage_usb_ids
375#else
376
377static const struct usb_device_id ub_usb_ids[] = {
378	{ USB_INTERFACE_INFO(USB_CLASS_MASS_STORAGE, US_SC_SCSI, US_PR_BULK) },
379	{ }
380};
381
382MODULE_DEVICE_TABLE(usb, ub_usb_ids);
383#endif /* CONFIG_USB_LIBUSUAL */
384
385/*
386 * Find me a way to identify "next free minor" for add_disk(),
387 * and the array disappears the next day. However, the number of
388 * hosts has something to do with the naming and /proc/partitions.
389 * This has to be thought out in detail before changing.
390 * If UB_MAX_HOST was 1000, we'd use a bitmap. Or a better data structure.
391 */
392#define UB_MAX_HOSTS  26
393static char ub_hostv[UB_MAX_HOSTS];
394
395#define UB_QLOCK_NUM 5
396static spinlock_t ub_qlockv[UB_QLOCK_NUM];
397static int ub_qlock_next = 0;
398
399static DEFINE_SPINLOCK(ub_lock);	/* Locks globals and ->openc */
400
401/*
402 * The id allocator.
403 *
404 * This also stores the host for indexing by minor, which is somewhat dirty.
405 */
406static int ub_id_get(void)
407{
408	unsigned long flags;
409	int i;
410
411	spin_lock_irqsave(&ub_lock, flags);
412	for (i = 0; i < UB_MAX_HOSTS; i++) {
413		if (ub_hostv[i] == 0) {
414			ub_hostv[i] = 1;
415			spin_unlock_irqrestore(&ub_lock, flags);
416			return i;
417		}
418	}
419	spin_unlock_irqrestore(&ub_lock, flags);
420	return -1;
421}
422
423static void ub_id_put(int id)
424{
425	unsigned long flags;
426
427	if (id < 0 || id >= UB_MAX_HOSTS) {
428		printk(KERN_ERR DRV_NAME ": bad host ID %d\n", id);
429		return;
430	}
431
432	spin_lock_irqsave(&ub_lock, flags);
433	if (ub_hostv[id] == 0) {
434		spin_unlock_irqrestore(&ub_lock, flags);
435		printk(KERN_ERR DRV_NAME ": freeing free host ID %d\n", id);
436		return;
437	}
438	ub_hostv[id] = 0;
439	spin_unlock_irqrestore(&ub_lock, flags);
440}
441
442/*
443 * This is necessitated by the fact that blk_cleanup_queue does not
444 * necesserily destroy the queue. Instead, it may merely decrease q->refcnt.
445 * Since our blk_init_queue() passes a spinlock common with ub_dev,
446 * we have life time issues when ub_cleanup frees ub_dev.
447 */
448static spinlock_t *ub_next_lock(void)
449{
450	unsigned long flags;
451	spinlock_t *ret;
452
453	spin_lock_irqsave(&ub_lock, flags);
454	ret = &ub_qlockv[ub_qlock_next];
455	ub_qlock_next = (ub_qlock_next + 1) % UB_QLOCK_NUM;
456	spin_unlock_irqrestore(&ub_lock, flags);
457	return ret;
458}
459
460/*
461 * Downcount for deallocation. This rides on two assumptions:
462 *  - once something is poisoned, its refcount cannot grow
463 *  - opens cannot happen at this time (del_gendisk was done)
464 * If the above is true, we can drop the lock, which we need for
465 * blk_cleanup_queue(): the silly thing may attempt to sleep.
466 * [Actually, it never needs to sleep for us, but it calls might_sleep()]
467 */
468static void ub_put(struct ub_dev *sc)
469{
470	unsigned long flags;
471
472	spin_lock_irqsave(&ub_lock, flags);
473	--sc->openc;
474	if (sc->openc == 0 && atomic_read(&sc->poison)) {
475		spin_unlock_irqrestore(&ub_lock, flags);
476		ub_cleanup(sc);
477	} else {
478		spin_unlock_irqrestore(&ub_lock, flags);
479	}
480}
481
482/*
483 * Final cleanup and deallocation.
484 */
485static void ub_cleanup(struct ub_dev *sc)
486{
487	struct list_head *p;
488	struct ub_lun *lun;
489	struct request_queue *q;
490
491	while (!list_empty(&sc->luns)) {
492		p = sc->luns.next;
493		lun = list_entry(p, struct ub_lun, link);
494		list_del(p);
495
496		/* I don't think queue can be NULL. But... Stolen from sx8.c */
497		if ((q = lun->disk->queue) != NULL)
498			blk_cleanup_queue(q);
499		/*
500		 * If we zero disk->private_data BEFORE put_disk, we have
501		 * to check for NULL all over the place in open, release,
502		 * check_media and revalidate, because the block level
503		 * semaphore is well inside the put_disk.
504		 * But we cannot zero after the call, because *disk is gone.
505		 * The sd.c is blatantly racy in this area.
506		 */
507		/* disk->private_data = NULL; */
508		put_disk(lun->disk);
509		lun->disk = NULL;
510
511		ub_id_put(lun->id);
512		kfree(lun);
513	}
514
515	usb_set_intfdata(sc->intf, NULL);
516	usb_put_intf(sc->intf);
517	usb_put_dev(sc->dev);
518	kfree(sc);
519}
520
521/*
522 * The "command allocator".
523 */
524static struct ub_scsi_cmd *ub_get_cmd(struct ub_lun *lun)
525{
526	struct ub_scsi_cmd *ret;
527
528	if (lun->cmda[0])
529		return NULL;
530	ret = &lun->cmdv[0];
531	lun->cmda[0] = 1;
532	return ret;
533}
534
535static void ub_put_cmd(struct ub_lun *lun, struct ub_scsi_cmd *cmd)
536{
537	if (cmd != &lun->cmdv[0]) {
538		printk(KERN_WARNING "%s: releasing a foreign cmd %p\n",
539		    lun->name, cmd);
540		return;
541	}
542	if (!lun->cmda[0]) {
543		printk(KERN_WARNING "%s: releasing a free cmd\n", lun->name);
544		return;
545	}
546	lun->cmda[0] = 0;
547}
548
549/*
550 * The command queue.
551 */
552static void ub_cmdq_add(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
553{
554	struct ub_scsi_cmd_queue *t = &sc->cmd_queue;
555
556	if (t->qlen++ == 0) {
557		t->head = cmd;
558		t->tail = cmd;
559	} else {
560		t->tail->next = cmd;
561		t->tail = cmd;
562	}
563
564	if (t->qlen > t->qmax)
565		t->qmax = t->qlen;
566}
567
568static void ub_cmdq_insert(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
569{
570	struct ub_scsi_cmd_queue *t = &sc->cmd_queue;
571
572	if (t->qlen++ == 0) {
573		t->head = cmd;
574		t->tail = cmd;
575	} else {
576		cmd->next = t->head;
577		t->head = cmd;
578	}
579
580	if (t->qlen > t->qmax)
581		t->qmax = t->qlen;
582}
583
584static struct ub_scsi_cmd *ub_cmdq_pop(struct ub_dev *sc)
585{
586	struct ub_scsi_cmd_queue *t = &sc->cmd_queue;
587	struct ub_scsi_cmd *cmd;
588
589	if (t->qlen == 0)
590		return NULL;
591	if (--t->qlen == 0)
592		t->tail = NULL;
593	cmd = t->head;
594	t->head = cmd->next;
595	cmd->next = NULL;
596	return cmd;
597}
598
599#define ub_cmdq_peek(sc)  ((sc)->cmd_queue.head)
600
601/*
602 * The request function is our main entry point
603 */
604
605static void ub_request_fn(struct request_queue *q)
606{
607	struct ub_lun *lun = q->queuedata;
608	struct request *rq;
609
610	while ((rq = blk_peek_request(q)) != NULL) {
611		if (ub_request_fn_1(lun, rq) != 0) {
612			blk_stop_queue(q);
613			break;
614		}
615	}
616}
617
618static int ub_request_fn_1(struct ub_lun *lun, struct request *rq)
619{
620	struct ub_dev *sc = lun->udev;
621	struct ub_scsi_cmd *cmd;
622	struct ub_request *urq;
623	int n_elem;
624
625	if (atomic_read(&sc->poison)) {
626		blk_start_request(rq);
627		ub_end_rq(rq, DID_NO_CONNECT << 16);
628		return 0;
629	}
630
631	if (lun->changed && rq->cmd_type != REQ_TYPE_BLOCK_PC) {
632		blk_start_request(rq);
633		ub_end_rq(rq, SAM_STAT_CHECK_CONDITION);
634		return 0;
635	}
636
637	if (lun->urq.rq != NULL)
638		return -1;
639	if ((cmd = ub_get_cmd(lun)) == NULL)
640		return -1;
641	memset(cmd, 0, sizeof(struct ub_scsi_cmd));
642
643	blk_start_request(rq);
644
645	urq = &lun->urq;
646	memset(urq, 0, sizeof(struct ub_request));
647	urq->rq = rq;
648
649	/*
650	 * get scatterlist from block layer
651	 */
652	sg_init_table(&urq->sgv[0], UB_MAX_REQ_SG);
653	n_elem = blk_rq_map_sg(lun->disk->queue, rq, &urq->sgv[0]);
654	if (n_elem < 0) {
655		/* Impossible, because blk_rq_map_sg should not hit ENOMEM. */
656		printk(KERN_INFO "%s: failed request map (%d)\n",
657		    lun->name, n_elem);
658		goto drop;
659	}
660	if (n_elem > UB_MAX_REQ_SG) {	/* Paranoia */
661		printk(KERN_WARNING "%s: request with %d segments\n",
662		    lun->name, n_elem);
663		goto drop;
664	}
665	urq->nsg = n_elem;
666
667	if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
668		ub_cmd_build_packet(sc, lun, cmd, urq);
669	} else {
670		ub_cmd_build_block(sc, lun, cmd, urq);
671	}
672	cmd->state = UB_CMDST_INIT;
673	cmd->lun = lun;
674	cmd->done = ub_rw_cmd_done;
675	cmd->back = urq;
676
677	cmd->tag = sc->tagcnt++;
678	if (ub_submit_scsi(sc, cmd) != 0)
679		goto drop;
680
681	return 0;
682
683drop:
684	ub_put_cmd(lun, cmd);
685	ub_end_rq(rq, DID_ERROR << 16);
686	return 0;
687}
688
689static void ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun,
690    struct ub_scsi_cmd *cmd, struct ub_request *urq)
691{
692	struct request *rq = urq->rq;
693	unsigned int block, nblks;
694
695	if (rq_data_dir(rq) == WRITE)
696		cmd->dir = UB_DIR_WRITE;
697	else
698		cmd->dir = UB_DIR_READ;
699
700	cmd->nsg = urq->nsg;
701	memcpy(cmd->sgv, urq->sgv, sizeof(struct scatterlist) * cmd->nsg);
702
703	/*
704	 * build the command
705	 *
706	 * The call to blk_queue_logical_block_size() guarantees that request
707	 * is aligned, but it is given in terms of 512 byte units, always.
708	 */
709	block = blk_rq_pos(rq) >> lun->capacity.bshift;
710	nblks = blk_rq_sectors(rq) >> lun->capacity.bshift;
711
712	cmd->cdb[0] = (cmd->dir == UB_DIR_READ)? READ_10: WRITE_10;
713	/* 10-byte uses 4 bytes of LBA: 2147483648KB, 2097152MB, 2048GB */
714	cmd->cdb[2] = block >> 24;
715	cmd->cdb[3] = block >> 16;
716	cmd->cdb[4] = block >> 8;
717	cmd->cdb[5] = block;
718	cmd->cdb[7] = nblks >> 8;
719	cmd->cdb[8] = nblks;
720	cmd->cdb_len = 10;
721
722	cmd->len = blk_rq_bytes(rq);
723}
724
725static void ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun,
726    struct ub_scsi_cmd *cmd, struct ub_request *urq)
727{
728	struct request *rq = urq->rq;
729
730	if (blk_rq_bytes(rq) == 0) {
731		cmd->dir = UB_DIR_NONE;
732	} else {
733		if (rq_data_dir(rq) == WRITE)
734			cmd->dir = UB_DIR_WRITE;
735		else
736			cmd->dir = UB_DIR_READ;
737	}
738
739	cmd->nsg = urq->nsg;
740	memcpy(cmd->sgv, urq->sgv, sizeof(struct scatterlist) * cmd->nsg);
741
742	memcpy(&cmd->cdb, rq->cmd, rq->cmd_len);
743	cmd->cdb_len = rq->cmd_len;
744
745	cmd->len = blk_rq_bytes(rq);
746
747	/*
748	 * To reapply this to every URB is not as incorrect as it looks.
749	 * In return, we avoid any complicated tracking calculations.
750	 */
751	cmd->timeo = rq->timeout;
752}
753
754static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
755{
756	struct ub_lun *lun = cmd->lun;
757	struct ub_request *urq = cmd->back;
758	struct request *rq;
759	unsigned int scsi_status;
760
761	rq = urq->rq;
762
763	if (cmd->error == 0) {
764		if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
765			if (cmd->act_len >= rq->resid_len)
766				rq->resid_len = 0;
767			else
768				rq->resid_len -= cmd->act_len;
769			scsi_status = 0;
770		} else {
771			if (cmd->act_len != cmd->len) {
772				scsi_status = SAM_STAT_CHECK_CONDITION;
773			} else {
774				scsi_status = 0;
775			}
776		}
777	} else {
778		if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
779			/* UB_SENSE_SIZE is smaller than SCSI_SENSE_BUFFERSIZE */
780			memcpy(rq->sense, sc->top_sense, UB_SENSE_SIZE);
781			rq->sense_len = UB_SENSE_SIZE;
782			if (sc->top_sense[0] != 0)
783				scsi_status = SAM_STAT_CHECK_CONDITION;
784			else
785				scsi_status = DID_ERROR << 16;
786		} else {
787			if (cmd->error == -EIO &&
788			    (cmd->key == 0 ||
789			     cmd->key == MEDIUM_ERROR ||
790			     cmd->key == UNIT_ATTENTION)) {
791				if (ub_rw_cmd_retry(sc, lun, urq, cmd) == 0)
792					return;
793			}
794			scsi_status = SAM_STAT_CHECK_CONDITION;
795		}
796	}
797
798	urq->rq = NULL;
799
800	ub_put_cmd(lun, cmd);
801	ub_end_rq(rq, scsi_status);
802	blk_start_queue(lun->disk->queue);
803}
804
805static void ub_end_rq(struct request *rq, unsigned int scsi_status)
806{
807	int error;
808
809	if (scsi_status == 0) {
810		error = 0;
811	} else {
812		error = -EIO;
813		rq->errors = scsi_status;
814	}
815	__blk_end_request_all(rq, error);
816}
817
818static int ub_rw_cmd_retry(struct ub_dev *sc, struct ub_lun *lun,
819    struct ub_request *urq, struct ub_scsi_cmd *cmd)
820{
821
822	if (atomic_read(&sc->poison))
823		return -ENXIO;
824
825	ub_reset_enter(sc, urq->current_try);
826
827	if (urq->current_try >= 3)
828		return -EIO;
829	urq->current_try++;
830
831	/* Remove this if anyone complains of flooding. */
832	printk(KERN_DEBUG "%s: dir %c len/act %d/%d "
833	    "[sense %x %02x %02x] retry %d\n",
834	    sc->name, UB_DIR_CHAR(cmd->dir), cmd->len, cmd->act_len,
835	    cmd->key, cmd->asc, cmd->ascq, urq->current_try);
836
837	memset(cmd, 0, sizeof(struct ub_scsi_cmd));
838	ub_cmd_build_block(sc, lun, cmd, urq);
839
840	cmd->state = UB_CMDST_INIT;
841	cmd->lun = lun;
842	cmd->done = ub_rw_cmd_done;
843	cmd->back = urq;
844
845	cmd->tag = sc->tagcnt++;
846
847	ub_cmdq_add(sc, cmd);
848	return 0;
849}
850
851/*
852 * Submit a regular SCSI operation (not an auto-sense).
853 *
854 * The Iron Law of Good Submit Routine is:
855 * Zero return - callback is done, Nonzero return - callback is not done.
856 * No exceptions.
857 *
858 * Host is assumed locked.
859 */
860static int ub_submit_scsi(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
861{
862
863	if (cmd->state != UB_CMDST_INIT ||
864	    (cmd->dir != UB_DIR_NONE && cmd->len == 0)) {
865		return -EINVAL;
866	}
867
868	ub_cmdq_add(sc, cmd);
869	/*
870	 * We can call ub_scsi_dispatch(sc) right away here, but it's a little
871	 * safer to jump to a tasklet, in case upper layers do something silly.
872	 */
873	tasklet_schedule(&sc->tasklet);
874	return 0;
875}
876
877/*
878 * Submit the first URB for the queued command.
879 * This function does not deal with queueing in any way.
880 */
881static int ub_scsi_cmd_start(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
882{
883	struct bulk_cb_wrap *bcb;
884	int rc;
885
886	bcb = &sc->work_bcb;
887
888	/*
889	 * ``If the allocation length is eighteen or greater, and a device
890	 * server returns less than eithteen bytes of data, the application
891	 * client should assume that the bytes not transferred would have been
892	 * zeroes had the device server returned those bytes.''
893	 *
894	 * We zero sense for all commands so that when a packet request
895	 * fails it does not return a stale sense.
896	 */
897	memset(&sc->top_sense, 0, UB_SENSE_SIZE);
898
899	/* set up the command wrapper */
900	bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
901	bcb->Tag = cmd->tag;		/* Endianness is not important */
902	bcb->DataTransferLength = cpu_to_le32(cmd->len);
903	bcb->Flags = (cmd->dir == UB_DIR_READ) ? 0x80 : 0;
904	bcb->Lun = (cmd->lun != NULL) ? cmd->lun->num : 0;
905	bcb->Length = cmd->cdb_len;
906
907	/* copy the command payload */
908	memcpy(bcb->CDB, cmd->cdb, UB_MAX_CDB_SIZE);
909
910	UB_INIT_COMPLETION(sc->work_done);
911
912	sc->last_pipe = sc->send_bulk_pipe;
913	usb_fill_bulk_urb(&sc->work_urb, sc->dev, sc->send_bulk_pipe,
914	    bcb, US_BULK_CB_WRAP_LEN, ub_urb_complete, sc);
915
916	if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) {
917		ub_complete(&sc->work_done);
918		return rc;
919	}
920
921	sc->work_timer.expires = jiffies + UB_URB_TIMEOUT;
922	add_timer(&sc->work_timer);
923
924	cmd->state = UB_CMDST_CMD;
925	return 0;
926}
927
928/*
929 * Timeout handler.
930 */
931static void ub_urb_timeout(unsigned long arg)
932{
933	struct ub_dev *sc = (struct ub_dev *) arg;
934	unsigned long flags;
935
936	spin_lock_irqsave(sc->lock, flags);
937	if (!ub_is_completed(&sc->work_done))
938		usb_unlink_urb(&sc->work_urb);
939	spin_unlock_irqrestore(sc->lock, flags);
940}
941
942/*
943 * Completion routine for the work URB.
944 *
945 * This can be called directly from usb_submit_urb (while we have
946 * the sc->lock taken) and from an interrupt (while we do NOT have
947 * the sc->lock taken). Therefore, bounce this off to a tasklet.
948 */
949static void ub_urb_complete(struct urb *urb)
950{
951	struct ub_dev *sc = urb->context;
952
953	ub_complete(&sc->work_done);
954	tasklet_schedule(&sc->tasklet);
955}
956
957static void ub_scsi_action(unsigned long _dev)
958{
959	struct ub_dev *sc = (struct ub_dev *) _dev;
960	unsigned long flags;
961
962	spin_lock_irqsave(sc->lock, flags);
963	ub_scsi_dispatch(sc);
964	spin_unlock_irqrestore(sc->lock, flags);
965}
966
967static void ub_scsi_dispatch(struct ub_dev *sc)
968{
969	struct ub_scsi_cmd *cmd;
970	int rc;
971
972	while (!sc->reset && (cmd = ub_cmdq_peek(sc)) != NULL) {
973		if (cmd->state == UB_CMDST_DONE) {
974			ub_cmdq_pop(sc);
975			(*cmd->done)(sc, cmd);
976		} else if (cmd->state == UB_CMDST_INIT) {
977			if ((rc = ub_scsi_cmd_start(sc, cmd)) == 0)
978				break;
979			cmd->error = rc;
980			cmd->state = UB_CMDST_DONE;
981		} else {
982			if (!ub_is_completed(&sc->work_done))
983				break;
984			del_timer(&sc->work_timer);
985			ub_scsi_urb_compl(sc, cmd);
986		}
987	}
988}
989
990static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
991{
992	struct urb *urb = &sc->work_urb;
993	struct bulk_cs_wrap *bcs;
994	int endp;
995	int len;
996	int rc;
997
998	if (atomic_read(&sc->poison)) {
999		ub_state_done(sc, cmd, -ENODEV);
1000		return;
1001	}
1002
1003	endp = usb_pipeendpoint(sc->last_pipe);
1004	if (usb_pipein(sc->last_pipe))
1005		endp |= USB_DIR_IN;
1006
1007	if (cmd->state == UB_CMDST_CLEAR) {
1008		if (urb->status == -EPIPE) {
1009			/*
1010			 * STALL while clearning STALL.
1011			 * The control pipe clears itself - nothing to do.
1012			 */
1013			printk(KERN_NOTICE "%s: stall on control pipe\n",
1014			    sc->name);
1015			goto Bad_End;
1016		}
1017
1018		/*
1019		 * We ignore the result for the halt clear.
1020		 */
1021
1022		usb_reset_endpoint(sc->dev, endp);
1023
1024		ub_state_sense(sc, cmd);
1025
1026	} else if (cmd->state == UB_CMDST_CLR2STS) {
1027		if (urb->status == -EPIPE) {
1028			printk(KERN_NOTICE "%s: stall on control pipe\n",
1029			    sc->name);
1030			goto Bad_End;
1031		}
1032
1033		/*
1034		 * We ignore the result for the halt clear.
1035		 */
1036
1037		usb_reset_endpoint(sc->dev, endp);
1038
1039		ub_state_stat(sc, cmd);
1040
1041	} else if (cmd->state == UB_CMDST_CLRRS) {
1042		if (urb->status == -EPIPE) {
1043			printk(KERN_NOTICE "%s: stall on control pipe\n",
1044			    sc->name);
1045			goto Bad_End;
1046		}
1047
1048		/*
1049		 * We ignore the result for the halt clear.
1050		 */
1051
1052		usb_reset_endpoint(sc->dev, endp);
1053
1054		ub_state_stat_counted(sc, cmd);
1055
1056	} else if (cmd->state == UB_CMDST_CMD) {
1057		switch (urb->status) {
1058		case 0:
1059			break;
1060		case -EOVERFLOW:
1061			goto Bad_End;
1062		case -EPIPE:
1063			rc = ub_submit_clear_stall(sc, cmd, sc->last_pipe);
1064			if (rc != 0) {
1065				printk(KERN_NOTICE "%s: "
1066				    "unable to submit clear (%d)\n",
1067				    sc->name, rc);
1068				/*
1069				 * This is typically ENOMEM or some other such shit.
1070				 * Retrying is pointless. Just do Bad End on it...
1071				 */
1072				ub_state_done(sc, cmd, rc);
1073				return;
1074			}
1075			cmd->state = UB_CMDST_CLEAR;
1076			return;
1077		case -ESHUTDOWN:	/* unplug */
1078		case -EILSEQ:		/* unplug timeout on uhci */
1079			ub_state_done(sc, cmd, -ENODEV);
1080			return;
1081		default:
1082			goto Bad_End;
1083		}
1084		if (urb->actual_length != US_BULK_CB_WRAP_LEN) {
1085			goto Bad_End;
1086		}
1087
1088		if (cmd->dir == UB_DIR_NONE || cmd->nsg < 1) {
1089			ub_state_stat(sc, cmd);
1090			return;
1091		}
1092
1093		// udelay(125);		// usb-storage has this
1094		ub_data_start(sc, cmd);
1095
1096	} else if (cmd->state == UB_CMDST_DATA) {
1097		if (urb->status == -EPIPE) {
1098			rc = ub_submit_clear_stall(sc, cmd, sc->last_pipe);
1099			if (rc != 0) {
1100				printk(KERN_NOTICE "%s: "
1101				    "unable to submit clear (%d)\n",
1102				    sc->name, rc);
1103				ub_state_done(sc, cmd, rc);
1104				return;
1105			}
1106			cmd->state = UB_CMDST_CLR2STS;
1107			return;
1108		}
1109		if (urb->status == -EOVERFLOW) {
1110			/*
1111			 * A babble? Failure, but we must transfer CSW now.
1112			 */
1113			cmd->error = -EOVERFLOW;	/* A cheap trick... */
1114			ub_state_stat(sc, cmd);
1115			return;
1116		}
1117
1118		if (cmd->dir == UB_DIR_WRITE) {
1119			/*
1120			 * Do not continue writes in case of a failure.
1121			 * Doing so would cause sectors to be mixed up,
1122			 * which is worse than sectors lost.
1123			 *
1124			 * We must try to read the CSW, or many devices
1125			 * get confused.
1126			 */
1127			len = urb->actual_length;
1128			if (urb->status != 0 ||
1129			    len != cmd->sgv[cmd->current_sg].length) {
1130				cmd->act_len += len;
1131
1132				cmd->error = -EIO;
1133				ub_state_stat(sc, cmd);
1134				return;
1135			}
1136
1137		} else {
1138			/*
1139			 * If an error occurs on read, we record it, and
1140			 * continue to fetch data in order to avoid bubble.
1141			 *
1142			 * As a small shortcut, we stop if we detect that
1143			 * a CSW mixed into data.
1144			 */
1145			if (urb->status != 0)
1146				cmd->error = -EIO;
1147
1148			len = urb->actual_length;
1149			if (urb->status != 0 ||
1150			    len != cmd->sgv[cmd->current_sg].length) {
1151				if ((len & 0x1FF) == US_BULK_CS_WRAP_LEN)
1152					goto Bad_End;
1153			}
1154		}
1155
1156		cmd->act_len += urb->actual_length;
1157
1158		if (++cmd->current_sg < cmd->nsg) {
1159			ub_data_start(sc, cmd);
1160			return;
1161		}
1162		ub_state_stat(sc, cmd);
1163
1164	} else if (cmd->state == UB_CMDST_STAT) {
1165		if (urb->status == -EPIPE) {
1166			rc = ub_submit_clear_stall(sc, cmd, sc->last_pipe);
1167			if (rc != 0) {
1168				printk(KERN_NOTICE "%s: "
1169				    "unable to submit clear (%d)\n",
1170				    sc->name, rc);
1171				ub_state_done(sc, cmd, rc);
1172				return;
1173			}
1174
1175			/*
1176			 * Having a stall when getting CSW is an error, so
1177			 * make sure uppper levels are not oblivious to it.
1178			 */
1179			cmd->error = -EIO;		/* A cheap trick... */
1180
1181			cmd->state = UB_CMDST_CLRRS;
1182			return;
1183		}
1184
1185		/* Catch everything, including -EOVERFLOW and other nasties. */
1186		if (urb->status != 0)
1187			goto Bad_End;
1188
1189		if (urb->actual_length == 0) {
1190			ub_state_stat_counted(sc, cmd);
1191			return;
1192		}
1193
1194		/*
1195		 * Check the returned Bulk protocol status.
1196		 * The status block has to be validated first.
1197		 */
1198
1199		bcs = &sc->work_bcs;
1200
1201		if (sc->signature == cpu_to_le32(0)) {
1202			/*
1203			 * This is the first reply, so do not perform the check.
1204			 * Instead, remember the signature the device uses
1205			 * for future checks. But do not allow a nul.
1206			 */
1207			sc->signature = bcs->Signature;
1208			if (sc->signature == cpu_to_le32(0)) {
1209				ub_state_stat_counted(sc, cmd);
1210				return;
1211			}
1212		} else {
1213			if (bcs->Signature != sc->signature) {
1214				ub_state_stat_counted(sc, cmd);
1215				return;
1216			}
1217		}
1218
1219		if (bcs->Tag != cmd->tag) {
1220			/*
1221			 * This usually happens when we disagree with the
1222			 * device's microcode about something. For instance,
1223			 * a few of them throw this after timeouts. They buffer
1224			 * commands and reply at commands we timed out before.
1225			 * Without flushing these replies we loop forever.
1226			 */
1227			ub_state_stat_counted(sc, cmd);
1228			return;
1229		}
1230
1231		if (!sc->bad_resid) {
1232			len = le32_to_cpu(bcs->Residue);
1233			if (len != cmd->len - cmd->act_len) {
1234				/*
1235				 * Only start ignoring if this cmd ended well.
1236				 */
1237				if (cmd->len == cmd->act_len) {
1238					printk(KERN_NOTICE "%s: "
1239					    "bad residual %d of %d, ignoring\n",
1240					    sc->name, len, cmd->len);
1241					sc->bad_resid = 1;
1242				}
1243			}
1244		}
1245
1246		switch (bcs->Status) {
1247		case US_BULK_STAT_OK:
1248			break;
1249		case US_BULK_STAT_FAIL:
1250			ub_state_sense(sc, cmd);
1251			return;
1252		case US_BULK_STAT_PHASE:
1253			goto Bad_End;
1254		default:
1255			printk(KERN_INFO "%s: unknown CSW status 0x%x\n",
1256			    sc->name, bcs->Status);
1257			ub_state_done(sc, cmd, -EINVAL);
1258			return;
1259		}
1260
1261		/* Not zeroing error to preserve a babble indicator */
1262		if (cmd->error != 0) {
1263			ub_state_sense(sc, cmd);
1264			return;
1265		}
1266		cmd->state = UB_CMDST_DONE;
1267		ub_cmdq_pop(sc);
1268		(*cmd->done)(sc, cmd);
1269
1270	} else if (cmd->state == UB_CMDST_SENSE) {
1271		ub_state_done(sc, cmd, -EIO);
1272
1273	} else {
1274		printk(KERN_WARNING "%s: wrong command state %d\n",
1275		    sc->name, cmd->state);
1276		ub_state_done(sc, cmd, -EINVAL);
1277		return;
1278	}
1279	return;
1280
1281Bad_End: /* Little Excel is dead */
1282	ub_state_done(sc, cmd, -EIO);
1283}
1284
1285/*
1286 * Factorization helper for the command state machine:
1287 * Initiate a data segment transfer.
1288 */
1289static void ub_data_start(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1290{
1291	struct scatterlist *sg = &cmd->sgv[cmd->current_sg];
1292	int pipe;
1293	int rc;
1294
1295	UB_INIT_COMPLETION(sc->work_done);
1296
1297	if (cmd->dir == UB_DIR_READ)
1298		pipe = sc->recv_bulk_pipe;
1299	else
1300		pipe = sc->send_bulk_pipe;
1301	sc->last_pipe = pipe;
1302	usb_fill_bulk_urb(&sc->work_urb, sc->dev, pipe, sg_virt(sg),
1303	    sg->length, ub_urb_complete, sc);
1304
1305	if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) {
1306		ub_complete(&sc->work_done);
1307		ub_state_done(sc, cmd, rc);
1308		return;
1309	}
1310
1311	if (cmd->timeo)
1312		sc->work_timer.expires = jiffies + cmd->timeo;
1313	else
1314		sc->work_timer.expires = jiffies + UB_DATA_TIMEOUT;
1315	add_timer(&sc->work_timer);
1316
1317	cmd->state = UB_CMDST_DATA;
1318}
1319
1320/*
1321 * Factorization helper for the command state machine:
1322 * Finish the command.
1323 */
1324static void ub_state_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd, int rc)
1325{
1326
1327	cmd->error = rc;
1328	cmd->state = UB_CMDST_DONE;
1329	ub_cmdq_pop(sc);
1330	(*cmd->done)(sc, cmd);
1331}
1332
1333/*
1334 * Factorization helper for the command state machine:
1335 * Submit a CSW read.
1336 */
1337static int __ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1338{
1339	int rc;
1340
1341	UB_INIT_COMPLETION(sc->work_done);
1342
1343	sc->last_pipe = sc->recv_bulk_pipe;
1344	usb_fill_bulk_urb(&sc->work_urb, sc->dev, sc->recv_bulk_pipe,
1345	    &sc->work_bcs, US_BULK_CS_WRAP_LEN, ub_urb_complete, sc);
1346
1347	if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) {
1348		ub_complete(&sc->work_done);
1349		ub_state_done(sc, cmd, rc);
1350		return -1;
1351	}
1352
1353	if (cmd->timeo)
1354		sc->work_timer.expires = jiffies + cmd->timeo;
1355	else
1356		sc->work_timer.expires = jiffies + UB_STAT_TIMEOUT;
1357	add_timer(&sc->work_timer);
1358	return 0;
1359}
1360
1361/*
1362 * Factorization helper for the command state machine:
1363 * Submit a CSW read and go to STAT state.
1364 */
1365static void ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1366{
1367
1368	if (__ub_state_stat(sc, cmd) != 0)
1369		return;
1370
1371	cmd->stat_count = 0;
1372	cmd->state = UB_CMDST_STAT;
1373}
1374
1375/*
1376 * Factorization helper for the command state machine:
1377 * Submit a CSW read and go to STAT state with counter (along [C] path).
1378 */
1379static void ub_state_stat_counted(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1380{
1381
1382	if (++cmd->stat_count >= 4) {
1383		ub_state_sense(sc, cmd);
1384		return;
1385	}
1386
1387	if (__ub_state_stat(sc, cmd) != 0)
1388		return;
1389
1390	cmd->state = UB_CMDST_STAT;
1391}
1392
1393/*
1394 * Factorization helper for the command state machine:
1395 * Submit a REQUEST SENSE and go to SENSE state.
1396 */
1397static void ub_state_sense(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1398{
1399	struct ub_scsi_cmd *scmd;
1400	struct scatterlist *sg;
1401	int rc;
1402
1403	if (cmd->cdb[0] == REQUEST_SENSE) {
1404		rc = -EPIPE;
1405		goto error;
1406	}
1407
1408	scmd = &sc->top_rqs_cmd;
1409	memset(scmd, 0, sizeof(struct ub_scsi_cmd));
1410	scmd->cdb[0] = REQUEST_SENSE;
1411	scmd->cdb[4] = UB_SENSE_SIZE;
1412	scmd->cdb_len = 6;
1413	scmd->dir = UB_DIR_READ;
1414	scmd->state = UB_CMDST_INIT;
1415	scmd->nsg = 1;
1416	sg = &scmd->sgv[0];
1417	sg_init_table(sg, UB_MAX_REQ_SG);
1418	sg_set_page(sg, virt_to_page(sc->top_sense), UB_SENSE_SIZE,
1419			(unsigned long)sc->top_sense & (PAGE_SIZE-1));
1420	scmd->len = UB_SENSE_SIZE;
1421	scmd->lun = cmd->lun;
1422	scmd->done = ub_top_sense_done;
1423	scmd->back = cmd;
1424
1425	scmd->tag = sc->tagcnt++;
1426
1427	cmd->state = UB_CMDST_SENSE;
1428
1429	ub_cmdq_insert(sc, scmd);
1430	return;
1431
1432error:
1433	ub_state_done(sc, cmd, rc);
1434}
1435
1436/*
1437 * A helper for the command's state machine:
1438 * Submit a stall clear.
1439 */
1440static int ub_submit_clear_stall(struct ub_dev *sc, struct ub_scsi_cmd *cmd,
1441    int stalled_pipe)
1442{
1443	int endp;
1444	struct usb_ctrlrequest *cr;
1445	int rc;
1446
1447	endp = usb_pipeendpoint(stalled_pipe);
1448	if (usb_pipein (stalled_pipe))
1449		endp |= USB_DIR_IN;
1450
1451	cr = &sc->work_cr;
1452	cr->bRequestType = USB_RECIP_ENDPOINT;
1453	cr->bRequest = USB_REQ_CLEAR_FEATURE;
1454	cr->wValue = cpu_to_le16(USB_ENDPOINT_HALT);
1455	cr->wIndex = cpu_to_le16(endp);
1456	cr->wLength = cpu_to_le16(0);
1457
1458	UB_INIT_COMPLETION(sc->work_done);
1459
1460	usb_fill_control_urb(&sc->work_urb, sc->dev, sc->send_ctrl_pipe,
1461	    (unsigned char*) cr, NULL, 0, ub_urb_complete, sc);
1462
1463	if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) {
1464		ub_complete(&sc->work_done);
1465		return rc;
1466	}
1467
1468	sc->work_timer.expires = jiffies + UB_CTRL_TIMEOUT;
1469	add_timer(&sc->work_timer);
1470	return 0;
1471}
1472
1473/*
1474 */
1475static void ub_top_sense_done(struct ub_dev *sc, struct ub_scsi_cmd *scmd)
1476{
1477	unsigned char *sense = sc->top_sense;
1478	struct ub_scsi_cmd *cmd;
1479
1480	/*
1481	 * Find the command which triggered the unit attention or a check,
1482	 * save the sense into it, and advance its state machine.
1483	 */
1484	if ((cmd = ub_cmdq_peek(sc)) == NULL) {
1485		printk(KERN_WARNING "%s: sense done while idle\n", sc->name);
1486		return;
1487	}
1488	if (cmd != scmd->back) {
1489		printk(KERN_WARNING "%s: "
1490		    "sense done for wrong command 0x%x\n",
1491		    sc->name, cmd->tag);
1492		return;
1493	}
1494	if (cmd->state != UB_CMDST_SENSE) {
1495		printk(KERN_WARNING "%s: sense done with bad cmd state %d\n",
1496		    sc->name, cmd->state);
1497		return;
1498	}
1499
1500	/*
1501	 * Ignoring scmd->act_len, because the buffer was pre-zeroed.
1502	 */
1503	cmd->key = sense[2] & 0x0F;
1504	cmd->asc = sense[12];
1505	cmd->ascq = sense[13];
1506
1507	ub_scsi_urb_compl(sc, cmd);
1508}
1509
1510/*
1511 * Reset management
1512 */
1513
1514static void ub_reset_enter(struct ub_dev *sc, int try)
1515{
1516
1517	if (sc->reset) {
1518		/* This happens often on multi-LUN devices. */
1519		return;
1520	}
1521	sc->reset = try + 1;
1522
1523
1524
1525	schedule_work(&sc->reset_work);
1526}
1527
1528static void ub_reset_task(struct work_struct *work)
1529{
1530	struct ub_dev *sc = container_of(work, struct ub_dev, reset_work);
1531	unsigned long flags;
1532	struct ub_lun *lun;
1533	int rc;
1534
1535	if (!sc->reset) {
1536		printk(KERN_WARNING "%s: Running reset unrequested\n",
1537		    sc->name);
1538		return;
1539	}
1540
1541	if (atomic_read(&sc->poison)) {
1542		;
1543	} else if ((sc->reset & 1) == 0) {
1544		ub_sync_reset(sc);
1545		msleep(700);	/* usb-storage sleeps 6s (!) */
1546		ub_probe_clear_stall(sc, sc->recv_bulk_pipe);
1547		ub_probe_clear_stall(sc, sc->send_bulk_pipe);
1548	} else if (sc->dev->actconfig->desc.bNumInterfaces != 1) {
1549		;
1550	} else {
1551		rc = usb_lock_device_for_reset(sc->dev, sc->intf);
1552		if (rc < 0) {
1553			printk(KERN_NOTICE
1554			    "%s: usb_lock_device_for_reset failed (%d)\n",
1555			    sc->name, rc);
1556		} else {
1557			rc = usb_reset_device(sc->dev);
1558			if (rc < 0) {
1559				printk(KERN_NOTICE "%s: "
1560				    "usb_lock_device_for_reset failed (%d)\n",
1561				    sc->name, rc);
1562			}
1563			usb_unlock_device(sc->dev);
1564		}
1565	}
1566
1567	/*
1568	 * In theory, no commands can be running while reset is active,
1569	 * so nobody can ask for another reset, and so we do not need any
1570	 * queues of resets or anything. We do need a spinlock though,
1571	 * to interact with block layer.
1572	 */
1573	spin_lock_irqsave(sc->lock, flags);
1574	sc->reset = 0;
1575	tasklet_schedule(&sc->tasklet);
1576	list_for_each_entry(lun, &sc->luns, link) {
1577		blk_start_queue(lun->disk->queue);
1578	}
1579	wake_up(&sc->reset_wait);
1580	spin_unlock_irqrestore(sc->lock, flags);
1581}
1582
1583static int ub_pre_reset(struct usb_interface *iface) {
1584	return 0;
1585}
1586
1587static int ub_post_reset(struct usb_interface *iface) {
1588	return 0;
1589}
1590
1591/*
1592 * This is called from a process context.
1593 */
1594static void ub_revalidate(struct ub_dev *sc, struct ub_lun *lun)
1595{
1596
1597	lun->readonly = 0;
1598
1599	lun->capacity.nsec = 0;
1600	lun->capacity.bsize = 512;
1601	lun->capacity.bshift = 0;
1602
1603	if (ub_sync_tur(sc, lun) != 0)
1604		return;			/* Not ready */
1605	lun->changed = 0;
1606
1607	if (ub_sync_read_cap(sc, lun, &lun->capacity) != 0) {
1608		/*
1609		 * The retry here means something is wrong, either with the
1610		 * device, with the transport, or with our code.
1611		 * We keep this because sd.c has retries for capacity.
1612		 */
1613		if (ub_sync_read_cap(sc, lun, &lun->capacity) != 0) {
1614			lun->capacity.nsec = 0;
1615			lun->capacity.bsize = 512;
1616			lun->capacity.bshift = 0;
1617		}
1618	}
1619}
1620
1621/*
1622 * The open funcion.
1623 * This is mostly needed to keep refcounting, but also to support
1624 * media checks on removable media drives.
1625 */
1626static int ub_bd_open(struct block_device *bdev, fmode_t mode)
1627{
1628	struct ub_lun *lun = bdev->bd_disk->private_data;
1629	struct ub_dev *sc = lun->udev;
1630	unsigned long flags;
1631	int rc;
1632
1633	spin_lock_irqsave(&ub_lock, flags);
1634	if (atomic_read(&sc->poison)) {
1635		spin_unlock_irqrestore(&ub_lock, flags);
1636		return -ENXIO;
1637	}
1638	sc->openc++;
1639	spin_unlock_irqrestore(&ub_lock, flags);
1640
1641	if (lun->removable || lun->readonly)
1642		check_disk_change(bdev);
1643
1644	/*
1645	 * The sd.c considers ->media_present and ->changed not equivalent,
1646	 * under some pretty murky conditions (a failure of READ CAPACITY).
1647	 * We may need it one day.
1648	 */
1649	if (lun->removable && lun->changed && !(mode & FMODE_NDELAY)) {
1650		rc = -ENOMEDIUM;
1651		goto err_open;
1652	}
1653
1654	if (lun->readonly && (mode & FMODE_WRITE)) {
1655		rc = -EROFS;
1656		goto err_open;
1657	}
1658
1659	return 0;
1660
1661err_open:
1662	ub_put(sc);
1663	return rc;
1664}
1665
1666static int ub_bd_unlocked_open(struct block_device *bdev, fmode_t mode)
1667{
1668	int ret;
1669
1670	lock_kernel();
1671	ret = ub_bd_open(bdev, mode);
1672	unlock_kernel();
1673
1674	return ret;
1675}
1676
1677
1678/*
1679 */
1680static int ub_bd_release(struct gendisk *disk, fmode_t mode)
1681{
1682	struct ub_lun *lun = disk->private_data;
1683	struct ub_dev *sc = lun->udev;
1684
1685	lock_kernel();
1686	ub_put(sc);
1687	unlock_kernel();
1688
1689	return 0;
1690}
1691
1692/*
1693 * The ioctl interface.
1694 */
1695static int ub_bd_ioctl(struct block_device *bdev, fmode_t mode,
1696    unsigned int cmd, unsigned long arg)
1697{
1698	struct gendisk *disk = bdev->bd_disk;
1699	void __user *usermem = (void __user *) arg;
1700	int ret;
1701
1702	lock_kernel();
1703	ret = scsi_cmd_ioctl(disk->queue, disk, mode, cmd, usermem);
1704	unlock_kernel();
1705
1706	return ret;
1707}
1708
1709/*
1710 * This is called by check_disk_change if we reported a media change.
1711 * The main onjective here is to discover the features of the media such as
1712 * the capacity, read-only status, etc. USB storage generally does not
1713 * need to be spun up, but if we needed it, this would be the place.
1714 *
1715 * This call can sleep.
1716 *
1717 * The return code is not used.
1718 */
1719static int ub_bd_revalidate(struct gendisk *disk)
1720{
1721	struct ub_lun *lun = disk->private_data;
1722
1723	ub_revalidate(lun->udev, lun);
1724
1725	blk_queue_logical_block_size(disk->queue, lun->capacity.bsize);
1726	set_capacity(disk, lun->capacity.nsec);
1727	// set_disk_ro(sdkp->disk, lun->readonly);
1728
1729	return 0;
1730}
1731
1732/*
1733 * The check is called by the block layer to verify if the media
1734 * is still available. It is supposed to be harmless, lightweight and
1735 * non-intrusive in case the media was not changed.
1736 *
1737 * This call can sleep.
1738 *
1739 * The return code is bool!
1740 */
1741static int ub_bd_media_changed(struct gendisk *disk)
1742{
1743	struct ub_lun *lun = disk->private_data;
1744
1745	if (!lun->removable)
1746		return 0;
1747
1748	/*
1749	 * We clean checks always after every command, so this is not
1750	 * as dangerous as it looks. If the TEST_UNIT_READY fails here,
1751	 * the device is actually not ready with operator or software
1752	 * intervention required. One dangerous item might be a drive which
1753	 * spins itself down, and come the time to write dirty pages, this
1754	 * will fail, then block layer discards the data. Since we never
1755	 * spin drives up, such devices simply cannot be used with ub anyway.
1756	 */
1757	if (ub_sync_tur(lun->udev, lun) != 0) {
1758		lun->changed = 1;
1759		return 1;
1760	}
1761
1762	return lun->changed;
1763}
1764
1765static const struct block_device_operations ub_bd_fops = {
1766	.owner		= THIS_MODULE,
1767	.open		= ub_bd_unlocked_open,
1768	.release	= ub_bd_release,
1769	.ioctl		= ub_bd_ioctl,
1770	.media_changed	= ub_bd_media_changed,
1771	.revalidate_disk = ub_bd_revalidate,
1772};
1773
1774/*
1775 * Common ->done routine for commands executed synchronously.
1776 */
1777static void ub_probe_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1778{
1779	struct completion *cop = cmd->back;
1780	complete(cop);
1781}
1782
1783/*
1784 * Test if the device has a check condition on it, synchronously.
1785 */
1786static int ub_sync_tur(struct ub_dev *sc, struct ub_lun *lun)
1787{
1788	struct ub_scsi_cmd *cmd;
1789	enum { ALLOC_SIZE = sizeof(struct ub_scsi_cmd) };
1790	unsigned long flags;
1791	struct completion compl;
1792	int rc;
1793
1794	init_completion(&compl);
1795
1796	rc = -ENOMEM;
1797	if ((cmd = kzalloc(ALLOC_SIZE, GFP_KERNEL)) == NULL)
1798		goto err_alloc;
1799
1800	cmd->cdb[0] = TEST_UNIT_READY;
1801	cmd->cdb_len = 6;
1802	cmd->dir = UB_DIR_NONE;
1803	cmd->state = UB_CMDST_INIT;
1804	cmd->lun = lun;			/* This may be NULL, but that's ok */
1805	cmd->done = ub_probe_done;
1806	cmd->back = &compl;
1807
1808	spin_lock_irqsave(sc->lock, flags);
1809	cmd->tag = sc->tagcnt++;
1810
1811	rc = ub_submit_scsi(sc, cmd);
1812	spin_unlock_irqrestore(sc->lock, flags);
1813
1814	if (rc != 0)
1815		goto err_submit;
1816
1817	wait_for_completion(&compl);
1818
1819	rc = cmd->error;
1820
1821	if (rc == -EIO && cmd->key != 0)	/* Retries for benh's key */
1822		rc = cmd->key;
1823
1824err_submit:
1825	kfree(cmd);
1826err_alloc:
1827	return rc;
1828}
1829
1830/*
1831 * Read the SCSI capacity synchronously (for probing).
1832 */
1833static int ub_sync_read_cap(struct ub_dev *sc, struct ub_lun *lun,
1834    struct ub_capacity *ret)
1835{
1836	struct ub_scsi_cmd *cmd;
1837	struct scatterlist *sg;
1838	char *p;
1839	enum { ALLOC_SIZE = sizeof(struct ub_scsi_cmd) + 8 };
1840	unsigned long flags;
1841	unsigned int bsize, shift;
1842	unsigned long nsec;
1843	struct completion compl;
1844	int rc;
1845
1846	init_completion(&compl);
1847
1848	rc = -ENOMEM;
1849	if ((cmd = kzalloc(ALLOC_SIZE, GFP_KERNEL)) == NULL)
1850		goto err_alloc;
1851	p = (char *)cmd + sizeof(struct ub_scsi_cmd);
1852
1853	cmd->cdb[0] = 0x25;
1854	cmd->cdb_len = 10;
1855	cmd->dir = UB_DIR_READ;
1856	cmd->state = UB_CMDST_INIT;
1857	cmd->nsg = 1;
1858	sg = &cmd->sgv[0];
1859	sg_init_table(sg, UB_MAX_REQ_SG);
1860	sg_set_page(sg, virt_to_page(p), 8, (unsigned long)p & (PAGE_SIZE-1));
1861	cmd->len = 8;
1862	cmd->lun = lun;
1863	cmd->done = ub_probe_done;
1864	cmd->back = &compl;
1865
1866	spin_lock_irqsave(sc->lock, flags);
1867	cmd->tag = sc->tagcnt++;
1868
1869	rc = ub_submit_scsi(sc, cmd);
1870	spin_unlock_irqrestore(sc->lock, flags);
1871
1872	if (rc != 0)
1873		goto err_submit;
1874
1875	wait_for_completion(&compl);
1876
1877	if (cmd->error != 0) {
1878		rc = -EIO;
1879		goto err_read;
1880	}
1881	if (cmd->act_len != 8) {
1882		rc = -EIO;
1883		goto err_read;
1884	}
1885
1886	/* sd.c special-cases sector size of 0 to mean 512. Needed? Safe? */
1887	nsec = be32_to_cpu(*(__be32 *)p) + 1;
1888	bsize = be32_to_cpu(*(__be32 *)(p + 4));
1889	switch (bsize) {
1890	case 512:	shift = 0;	break;
1891	case 1024:	shift = 1;	break;
1892	case 2048:	shift = 2;	break;
1893	case 4096:	shift = 3;	break;
1894	default:
1895		rc = -EDOM;
1896		goto err_inv_bsize;
1897	}
1898
1899	ret->bsize = bsize;
1900	ret->bshift = shift;
1901	ret->nsec = nsec << shift;
1902	rc = 0;
1903
1904err_inv_bsize:
1905err_read:
1906err_submit:
1907	kfree(cmd);
1908err_alloc:
1909	return rc;
1910}
1911
1912/*
1913 */
1914static void ub_probe_urb_complete(struct urb *urb)
1915{
1916	struct completion *cop = urb->context;
1917	complete(cop);
1918}
1919
1920static void ub_probe_timeout(unsigned long arg)
1921{
1922	struct completion *cop = (struct completion *) arg;
1923	complete(cop);
1924}
1925
1926/*
1927 * Reset with a Bulk reset.
1928 */
1929static int ub_sync_reset(struct ub_dev *sc)
1930{
1931	int ifnum = sc->intf->cur_altsetting->desc.bInterfaceNumber;
1932	struct usb_ctrlrequest *cr;
1933	struct completion compl;
1934	struct timer_list timer;
1935	int rc;
1936
1937	init_completion(&compl);
1938
1939	cr = &sc->work_cr;
1940	cr->bRequestType = USB_TYPE_CLASS | USB_RECIP_INTERFACE;
1941	cr->bRequest = US_BULK_RESET_REQUEST;
1942	cr->wValue = cpu_to_le16(0);
1943	cr->wIndex = cpu_to_le16(ifnum);
1944	cr->wLength = cpu_to_le16(0);
1945
1946	usb_fill_control_urb(&sc->work_urb, sc->dev, sc->send_ctrl_pipe,
1947	    (unsigned char*) cr, NULL, 0, ub_probe_urb_complete, &compl);
1948
1949	if ((rc = usb_submit_urb(&sc->work_urb, GFP_KERNEL)) != 0) {
1950		printk(KERN_WARNING
1951		     "%s: Unable to submit a bulk reset (%d)\n", sc->name, rc);
1952		return rc;
1953	}
1954
1955	init_timer(&timer);
1956	timer.function = ub_probe_timeout;
1957	timer.data = (unsigned long) &compl;
1958	timer.expires = jiffies + UB_CTRL_TIMEOUT;
1959	add_timer(&timer);
1960
1961	wait_for_completion(&compl);
1962
1963	del_timer_sync(&timer);
1964	usb_kill_urb(&sc->work_urb);
1965
1966	return sc->work_urb.status;
1967}
1968
1969/*
1970 * Get number of LUNs by the way of Bulk GetMaxLUN command.
1971 */
1972static int ub_sync_getmaxlun(struct ub_dev *sc)
1973{
1974	int ifnum = sc->intf->cur_altsetting->desc.bInterfaceNumber;
1975	unsigned char *p;
1976	enum { ALLOC_SIZE = 1 };
1977	struct usb_ctrlrequest *cr;
1978	struct completion compl;
1979	struct timer_list timer;
1980	int nluns;
1981	int rc;
1982
1983	init_completion(&compl);
1984
1985	rc = -ENOMEM;
1986	if ((p = kmalloc(ALLOC_SIZE, GFP_KERNEL)) == NULL)
1987		goto err_alloc;
1988	*p = 55;
1989
1990	cr = &sc->work_cr;
1991	cr->bRequestType = USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE;
1992	cr->bRequest = US_BULK_GET_MAX_LUN;
1993	cr->wValue = cpu_to_le16(0);
1994	cr->wIndex = cpu_to_le16(ifnum);
1995	cr->wLength = cpu_to_le16(1);
1996
1997	usb_fill_control_urb(&sc->work_urb, sc->dev, sc->recv_ctrl_pipe,
1998	    (unsigned char*) cr, p, 1, ub_probe_urb_complete, &compl);
1999
2000	if ((rc = usb_submit_urb(&sc->work_urb, GFP_KERNEL)) != 0)
2001		goto err_submit;
2002
2003	init_timer(&timer);
2004	timer.function = ub_probe_timeout;
2005	timer.data = (unsigned long) &compl;
2006	timer.expires = jiffies + UB_CTRL_TIMEOUT;
2007	add_timer(&timer);
2008
2009	wait_for_completion(&compl);
2010
2011	del_timer_sync(&timer);
2012	usb_kill_urb(&sc->work_urb);
2013
2014	if ((rc = sc->work_urb.status) < 0)
2015		goto err_io;
2016
2017	if (sc->work_urb.actual_length != 1) {
2018		nluns = 0;
2019	} else {
2020		if ((nluns = *p) == 55) {
2021			nluns = 0;
2022		} else {
2023  			/* GetMaxLUN returns the maximum LUN number */
2024			nluns += 1;
2025			if (nluns > UB_MAX_LUNS)
2026				nluns = UB_MAX_LUNS;
2027		}
2028	}
2029
2030	kfree(p);
2031	return nluns;
2032
2033err_io:
2034err_submit:
2035	kfree(p);
2036err_alloc:
2037	return rc;
2038}
2039
2040/*
2041 * Clear initial stalls.
2042 */
2043static int ub_probe_clear_stall(struct ub_dev *sc, int stalled_pipe)
2044{
2045	int endp;
2046	struct usb_ctrlrequest *cr;
2047	struct completion compl;
2048	struct timer_list timer;
2049	int rc;
2050
2051	init_completion(&compl);
2052
2053	endp = usb_pipeendpoint(stalled_pipe);
2054	if (usb_pipein (stalled_pipe))
2055		endp |= USB_DIR_IN;
2056
2057	cr = &sc->work_cr;
2058	cr->bRequestType = USB_RECIP_ENDPOINT;
2059	cr->bRequest = USB_REQ_CLEAR_FEATURE;
2060	cr->wValue = cpu_to_le16(USB_ENDPOINT_HALT);
2061	cr->wIndex = cpu_to_le16(endp);
2062	cr->wLength = cpu_to_le16(0);
2063
2064	usb_fill_control_urb(&sc->work_urb, sc->dev, sc->send_ctrl_pipe,
2065	    (unsigned char*) cr, NULL, 0, ub_probe_urb_complete, &compl);
2066
2067	if ((rc = usb_submit_urb(&sc->work_urb, GFP_KERNEL)) != 0) {
2068		printk(KERN_WARNING
2069		     "%s: Unable to submit a probe clear (%d)\n", sc->name, rc);
2070		return rc;
2071	}
2072
2073	init_timer(&timer);
2074	timer.function = ub_probe_timeout;
2075	timer.data = (unsigned long) &compl;
2076	timer.expires = jiffies + UB_CTRL_TIMEOUT;
2077	add_timer(&timer);
2078
2079	wait_for_completion(&compl);
2080
2081	del_timer_sync(&timer);
2082	usb_kill_urb(&sc->work_urb);
2083
2084	usb_reset_endpoint(sc->dev, endp);
2085
2086	return 0;
2087}
2088
2089/*
2090 * Get the pipe settings.
2091 */
2092static int ub_get_pipes(struct ub_dev *sc, struct usb_device *dev,
2093    struct usb_interface *intf)
2094{
2095	struct usb_host_interface *altsetting = intf->cur_altsetting;
2096	struct usb_endpoint_descriptor *ep_in = NULL;
2097	struct usb_endpoint_descriptor *ep_out = NULL;
2098	struct usb_endpoint_descriptor *ep;
2099	int i;
2100
2101	/*
2102	 * Find the endpoints we need.
2103	 * We are expecting a minimum of 2 endpoints - in and out (bulk).
2104	 * We will ignore any others.
2105	 */
2106	for (i = 0; i < altsetting->desc.bNumEndpoints; i++) {
2107		ep = &altsetting->endpoint[i].desc;
2108
2109		/* Is it a BULK endpoint? */
2110		if (usb_endpoint_xfer_bulk(ep)) {
2111			/* BULK in or out? */
2112			if (usb_endpoint_dir_in(ep)) {
2113				if (ep_in == NULL)
2114					ep_in = ep;
2115			} else {
2116				if (ep_out == NULL)
2117					ep_out = ep;
2118			}
2119		}
2120	}
2121
2122	if (ep_in == NULL || ep_out == NULL) {
2123		printk(KERN_NOTICE "%s: failed endpoint check\n", sc->name);
2124		return -ENODEV;
2125	}
2126
2127	/* Calculate and store the pipe values */
2128	sc->send_ctrl_pipe = usb_sndctrlpipe(dev, 0);
2129	sc->recv_ctrl_pipe = usb_rcvctrlpipe(dev, 0);
2130	sc->send_bulk_pipe = usb_sndbulkpipe(dev,
2131		usb_endpoint_num(ep_out));
2132	sc->recv_bulk_pipe = usb_rcvbulkpipe(dev,
2133		usb_endpoint_num(ep_in));
2134
2135	return 0;
2136}
2137
2138/*
2139 * Probing is done in the process context, which allows us to cheat
2140 * and not to build a state machine for the discovery.
2141 */
2142static int ub_probe(struct usb_interface *intf,
2143    const struct usb_device_id *dev_id)
2144{
2145	struct ub_dev *sc;
2146	int nluns;
2147	int rc;
2148	int i;
2149
2150	if (usb_usual_check_type(dev_id, USB_US_TYPE_UB))
2151		return -ENXIO;
2152
2153	rc = -ENOMEM;
2154	if ((sc = kzalloc(sizeof(struct ub_dev), GFP_KERNEL)) == NULL)
2155		goto err_core;
2156	sc->lock = ub_next_lock();
2157	INIT_LIST_HEAD(&sc->luns);
2158	usb_init_urb(&sc->work_urb);
2159	tasklet_init(&sc->tasklet, ub_scsi_action, (unsigned long)sc);
2160	atomic_set(&sc->poison, 0);
2161	INIT_WORK(&sc->reset_work, ub_reset_task);
2162	init_waitqueue_head(&sc->reset_wait);
2163
2164	init_timer(&sc->work_timer);
2165	sc->work_timer.data = (unsigned long) sc;
2166	sc->work_timer.function = ub_urb_timeout;
2167
2168	ub_init_completion(&sc->work_done);
2169	sc->work_done.done = 1;		/* A little yuk, but oh well... */
2170
2171	sc->dev = interface_to_usbdev(intf);
2172	sc->intf = intf;
2173	// sc->ifnum = intf->cur_altsetting->desc.bInterfaceNumber;
2174	usb_set_intfdata(intf, sc);
2175	usb_get_dev(sc->dev);
2176	/*
2177	 * Since we give the interface struct to the block level through
2178	 * disk->driverfs_dev, we have to pin it. Otherwise, block_uevent
2179	 * oopses on close after a disconnect (kernels 2.6.16 and up).
2180	 */
2181	usb_get_intf(sc->intf);
2182
2183	snprintf(sc->name, 12, DRV_NAME "(%d.%d)",
2184	    sc->dev->bus->busnum, sc->dev->devnum);
2185
2186
2187	if (ub_get_pipes(sc, sc->dev, intf) != 0)
2188		goto err_dev_desc;
2189
2190	/*
2191	 * At this point, all USB initialization is done, do upper layer.
2192	 * We really hate halfway initialized structures, so from the
2193	 * invariants perspective, this ub_dev is fully constructed at
2194	 * this point.
2195	 */
2196
2197	/*
2198	 * This is needed to clear toggles. It is a problem only if we do
2199	 * `rmmod ub && modprobe ub` without disconnects, but we like that.
2200	 */
2201
2202	/*
2203	 * The way this is used by the startup code is a little specific.
2204	 * A SCSI check causes a USB stall. Our common case code sees it
2205	 * and clears the check, after which the device is ready for use.
2206	 * But if a check was not present, any command other than
2207	 * TEST_UNIT_READY ends with a lockup (including REQUEST_SENSE).
2208	 *
2209	 * If we neglect to clear the SCSI check, the first real command fails
2210	 * (which is the capacity readout). We clear that and retry, but why
2211	 * causing spurious retries for no reason.
2212	 *
2213	 * Revalidation may start with its own TEST_UNIT_READY, but that one
2214	 * has to succeed, so we clear checks with an additional one here.
2215	 * In any case it's not our business how revaliadation is implemented.
2216	 */
2217	for (i = 0; i < 3; i++) {  /* Retries for the schwag key from KS'04 */
2218		if ((rc = ub_sync_tur(sc, NULL)) <= 0) break;
2219		if (rc != 0x6) break;
2220		msleep(10);
2221	}
2222
2223	nluns = 1;
2224	for (i = 0; i < 3; i++) {
2225		if ((rc = ub_sync_getmaxlun(sc)) < 0)
2226			break;
2227		if (rc != 0) {
2228			nluns = rc;
2229			break;
2230		}
2231		msleep(100);
2232	}
2233
2234	for (i = 0; i < nluns; i++) {
2235		ub_probe_lun(sc, i);
2236	}
2237	return 0;
2238
2239err_dev_desc:
2240	usb_set_intfdata(intf, NULL);
2241	usb_put_intf(sc->intf);
2242	usb_put_dev(sc->dev);
2243	kfree(sc);
2244err_core:
2245	return rc;
2246}
2247
2248static int ub_probe_lun(struct ub_dev *sc, int lnum)
2249{
2250	struct ub_lun *lun;
2251	struct request_queue *q;
2252	struct gendisk *disk;
2253	int rc;
2254
2255	rc = -ENOMEM;
2256	if ((lun = kzalloc(sizeof(struct ub_lun), GFP_KERNEL)) == NULL)
2257		goto err_alloc;
2258	lun->num = lnum;
2259
2260	rc = -ENOSR;
2261	if ((lun->id = ub_id_get()) == -1)
2262		goto err_id;
2263
2264	lun->udev = sc;
2265
2266	snprintf(lun->name, 16, DRV_NAME "%c(%d.%d.%d)",
2267	    lun->id + 'a', sc->dev->bus->busnum, sc->dev->devnum, lun->num);
2268
2269	lun->removable = 1;
2270	lun->changed = 1;		/* ub_revalidate clears only */
2271	ub_revalidate(sc, lun);
2272
2273	rc = -ENOMEM;
2274	if ((disk = alloc_disk(UB_PARTS_PER_LUN)) == NULL)
2275		goto err_diskalloc;
2276
2277	sprintf(disk->disk_name, DRV_NAME "%c", lun->id + 'a');
2278	disk->major = UB_MAJOR;
2279	disk->first_minor = lun->id * UB_PARTS_PER_LUN;
2280	disk->fops = &ub_bd_fops;
2281	disk->private_data = lun;
2282	disk->driverfs_dev = &sc->intf->dev;
2283
2284	rc = -ENOMEM;
2285	if ((q = blk_init_queue(ub_request_fn, sc->lock)) == NULL)
2286		goto err_blkqinit;
2287
2288	disk->queue = q;
2289
2290	blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
2291	blk_queue_max_segments(q, UB_MAX_REQ_SG);
2292	blk_queue_segment_boundary(q, 0xffffffff);	/* Dubious. */
2293	blk_queue_max_hw_sectors(q, UB_MAX_SECTORS);
2294	blk_queue_logical_block_size(q, lun->capacity.bsize);
2295
2296	lun->disk = disk;
2297	q->queuedata = lun;
2298	list_add(&lun->link, &sc->luns);
2299
2300	set_capacity(disk, lun->capacity.nsec);
2301	if (lun->removable)
2302		disk->flags |= GENHD_FL_REMOVABLE;
2303
2304	add_disk(disk);
2305
2306	return 0;
2307
2308err_blkqinit:
2309	put_disk(disk);
2310err_diskalloc:
2311	ub_id_put(lun->id);
2312err_id:
2313	kfree(lun);
2314err_alloc:
2315	return rc;
2316}
2317
2318static void ub_disconnect(struct usb_interface *intf)
2319{
2320	struct ub_dev *sc = usb_get_intfdata(intf);
2321	struct ub_lun *lun;
2322	unsigned long flags;
2323
2324	spin_lock_irqsave(&ub_lock, flags);
2325	sc->openc++;
2326	spin_unlock_irqrestore(&ub_lock, flags);
2327
2328	/*
2329	 * Fence stall clearings, operations triggered by unlinkings and so on.
2330	 * We do not attempt to unlink any URBs, because we do not trust the
2331	 * unlink paths in HC drivers. Also, we get -84 upon disconnect anyway.
2332	 */
2333	atomic_set(&sc->poison, 1);
2334
2335	/*
2336	 * Wait for reset to end, if any.
2337	 */
2338	wait_event(sc->reset_wait, !sc->reset);
2339
2340	/*
2341	 * Blow away queued commands.
2342	 *
2343	 * Actually, this never works, because before we get here
2344	 * the HCD terminates outstanding URB(s). It causes our
2345	 * SCSI command queue to advance, commands fail to submit,
2346	 * and the whole queue drains. So, we just use this code to
2347	 * print warnings.
2348	 */
2349	spin_lock_irqsave(sc->lock, flags);
2350	{
2351		struct ub_scsi_cmd *cmd;
2352		int cnt = 0;
2353		while ((cmd = ub_cmdq_peek(sc)) != NULL) {
2354			cmd->error = -ENOTCONN;
2355			cmd->state = UB_CMDST_DONE;
2356			ub_cmdq_pop(sc);
2357			(*cmd->done)(sc, cmd);
2358			cnt++;
2359		}
2360		if (cnt != 0) {
2361			printk(KERN_WARNING "%s: "
2362			    "%d was queued after shutdown\n", sc->name, cnt);
2363		}
2364	}
2365	spin_unlock_irqrestore(sc->lock, flags);
2366
2367	/*
2368	 * Unregister the upper layer.
2369	 */
2370	list_for_each_entry(lun, &sc->luns, link) {
2371		del_gendisk(lun->disk);
2372		/*
2373		 * I wish I could do:
2374		 *    queue_flag_set(QUEUE_FLAG_DEAD, q);
2375		 * As it is, we rely on our internal poisoning and let
2376		 * the upper levels to spin furiously failing all the I/O.
2377		 */
2378	}
2379
2380	/*
2381	 * Testing for -EINPROGRESS is always a bug, so we are bending
2382	 * the rules a little.
2383	 */
2384	spin_lock_irqsave(sc->lock, flags);
2385	if (sc->work_urb.status == -EINPROGRESS) {	/* janitors: ignore */
2386		printk(KERN_WARNING "%s: "
2387		    "URB is active after disconnect\n", sc->name);
2388	}
2389	spin_unlock_irqrestore(sc->lock, flags);
2390
2391	/*
2392	 * There is virtually no chance that other CPU runs a timeout so long
2393	 * after ub_urb_complete should have called del_timer, but only if HCD
2394	 * didn't forget to deliver a callback on unlink.
2395	 */
2396	del_timer_sync(&sc->work_timer);
2397
2398	/*
2399	 * At this point there must be no commands coming from anyone
2400	 * and no URBs left in transit.
2401	 */
2402
2403	ub_put(sc);
2404}
2405
2406static struct usb_driver ub_driver = {
2407	.name =		"ub",
2408	.probe =	ub_probe,
2409	.disconnect =	ub_disconnect,
2410	.id_table =	ub_usb_ids,
2411	.pre_reset =	ub_pre_reset,
2412	.post_reset =	ub_post_reset,
2413};
2414
2415static int __init ub_init(void)
2416{
2417	int rc;
2418	int i;
2419
2420	for (i = 0; i < UB_QLOCK_NUM; i++)
2421		spin_lock_init(&ub_qlockv[i]);
2422
2423	if ((rc = register_blkdev(UB_MAJOR, DRV_NAME)) != 0)
2424		goto err_regblkdev;
2425
2426	if ((rc = usb_register(&ub_driver)) != 0)
2427		goto err_register;
2428
2429	usb_usual_set_present(USB_US_TYPE_UB);
2430	return 0;
2431
2432err_register:
2433	unregister_blkdev(UB_MAJOR, DRV_NAME);
2434err_regblkdev:
2435	return rc;
2436}
2437
2438static void __exit ub_exit(void)
2439{
2440	usb_deregister(&ub_driver);
2441
2442	unregister_blkdev(UB_MAJOR, DRV_NAME);
2443	usb_usual_clear_present(USB_US_TYPE_UB);
2444}
2445
2446module_init(ub_init);
2447module_exit(ub_exit);
2448
2449MODULE_LICENSE("GPL");
2450