• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/drivers/scsi/
1/*
2 *  History:
3 *  Started: Aug 9 by Lawrence Foard (entropy@world.std.com),
4 *           to allow user process control of SCSI devices.
5 *  Development Sponsored by Killy Corp. NY NY
6 *
7 * Original driver (sg.c):
8 *        Copyright (C) 1992 Lawrence Foard
9 * Version 2 and 3 extensions to driver:
10 *        Copyright (C) 1998 - 2005 Douglas Gilbert
11 *
12 *  Modified  19-JAN-1998  Richard Gooch <rgooch@atnf.csiro.au>  Devfs support
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2, or (at your option)
17 * any later version.
18 *
19 */
20
21static int sg_version_num = 30534;	/* 2 digits for each component */
22#define SG_VERSION_STR "3.5.34"
23
24/*
25 *  D. P. Gilbert (dgilbert@interlog.com, dougg@triode.net.au), notes:
26 *      - scsi logging is available via SCSI_LOG_TIMEOUT macros. First
27 *        the kernel/module needs to be built with CONFIG_SCSI_LOGGING
28 *        (otherwise the macros compile to empty statements).
29 *
30 */
31#include <linux/module.h>
32
33#include <linux/fs.h>
34#include <linux/kernel.h>
35#include <linux/sched.h>
36#include <linux/string.h>
37#include <linux/mm.h>
38#include <linux/errno.h>
39#include <linux/mtio.h>
40#include <linux/ioctl.h>
41#include <linux/slab.h>
42#include <linux/fcntl.h>
43#include <linux/init.h>
44#include <linux/poll.h>
45#include <linux/moduleparam.h>
46#include <linux/cdev.h>
47#include <linux/idr.h>
48#include <linux/seq_file.h>
49#include <linux/blkdev.h>
50#include <linux/delay.h>
51#include <linux/blktrace_api.h>
52#include <linux/smp_lock.h>
53
54#include "scsi.h"
55#include <scsi/scsi_dbg.h>
56#include <scsi/scsi_host.h>
57#include <scsi/scsi_driver.h>
58#include <scsi/scsi_ioctl.h>
59#include <scsi/sg.h>
60
61#include "scsi_logging.h"
62
63#ifdef CONFIG_SCSI_PROC_FS
64#include <linux/proc_fs.h>
65static char *sg_version_date = "20061027";
66
67static int sg_proc_init(void);
68static void sg_proc_cleanup(void);
69#endif
70
71#define SG_ALLOW_DIO_DEF 0
72
73#define SG_MAX_DEVS 32768
74
75/*
76 * Suppose you want to calculate the formula muldiv(x,m,d)=int(x * m / d)
77 * Then when using 32 bit integers x * m may overflow during the calculation.
78 * Replacing muldiv(x) by muldiv(x)=((x % d) * m) / d + int(x / d) * m
79 * calculates the same, but prevents the overflow when both m and d
80 * are "small" numbers (like HZ and USER_HZ).
81 * Of course an overflow is inavoidable if the result of muldiv doesn't fit
82 * in 32 bits.
83 */
84#define MULDIV(X,MUL,DIV) ((((X % DIV) * MUL) / DIV) + ((X / DIV) * MUL))
85
86#define SG_DEFAULT_TIMEOUT MULDIV(SG_DEFAULT_TIMEOUT_USER, HZ, USER_HZ)
87
88int sg_big_buff = SG_DEF_RESERVED_SIZE;
89/* N.B. This variable is readable and writeable via
90   /proc/scsi/sg/def_reserved_size . Each time sg_open() is called a buffer
91   of this size (or less if there is not enough memory) will be reserved
92   for use by this file descriptor. [Deprecated usage: this variable is also
93   readable via /proc/sys/kernel/sg-big-buff if the sg driver is built into
94   the kernel (i.e. it is not a module).] */
95static int def_reserved_size = -1;	/* picks up init parameter */
96static int sg_allow_dio = SG_ALLOW_DIO_DEF;
97
98static int scatter_elem_sz = SG_SCATTER_SZ;
99static int scatter_elem_sz_prev = SG_SCATTER_SZ;
100
101#define SG_SECTOR_SZ 512
102
103static int sg_add(struct device *, struct class_interface *);
104static void sg_remove(struct device *, struct class_interface *);
105
106static DEFINE_IDR(sg_index_idr);
107static DEFINE_RWLOCK(sg_index_lock);	/* Also used to lock
108							   file descriptor list for device */
109
110static struct class_interface sg_interface = {
111	.add_dev	= sg_add,
112	.remove_dev	= sg_remove,
113};
114
115typedef struct sg_scatter_hold { /* holding area for scsi scatter gather info */
116	unsigned short k_use_sg; /* Count of kernel scatter-gather pieces */
117	unsigned sglist_len; /* size of malloc'd scatter-gather list ++ */
118	unsigned bufflen;	/* Size of (aggregate) data buffer */
119	struct page **pages;
120	int page_order;
121	char dio_in_use;	/* 0->indirect IO (or mmap), 1->dio */
122	unsigned char cmd_opcode; /* first byte of command */
123} Sg_scatter_hold;
124
125struct sg_device;		/* forward declarations */
126struct sg_fd;
127
128typedef struct sg_request {	/* SG_MAX_QUEUE requests outstanding per file */
129	struct sg_request *nextrp;	/* NULL -> tail request (slist) */
130	struct sg_fd *parentfp;	/* NULL -> not in use */
131	Sg_scatter_hold data;	/* hold buffer, perhaps scatter list */
132	sg_io_hdr_t header;	/* scsi command+info, see <scsi/sg.h> */
133	unsigned char sense_b[SCSI_SENSE_BUFFERSIZE];
134	char res_used;		/* 1 -> using reserve buffer, 0 -> not ... */
135	char orphan;		/* 1 -> drop on sight, 0 -> normal */
136	char sg_io_owned;	/* 1 -> packet belongs to SG_IO */
137	volatile char done;	/* 0->before bh, 1->before read, 2->read */
138	struct request *rq;
139	struct bio *bio;
140	struct execute_work ew;
141} Sg_request;
142
143typedef struct sg_fd {		/* holds the state of a file descriptor */
144	struct list_head sfd_siblings;
145	struct sg_device *parentdp;	/* owning device */
146	wait_queue_head_t read_wait;	/* queue read until command done */
147	rwlock_t rq_list_lock;	/* protect access to list in req_arr */
148	int timeout;		/* defaults to SG_DEFAULT_TIMEOUT      */
149	int timeout_user;	/* defaults to SG_DEFAULT_TIMEOUT_USER */
150	Sg_scatter_hold reserve;	/* buffer held for this file descriptor */
151	unsigned save_scat_len;	/* original length of trunc. scat. element */
152	Sg_request *headrp;	/* head of request slist, NULL->empty */
153	struct fasync_struct *async_qp;	/* used by asynchronous notification */
154	Sg_request req_arr[SG_MAX_QUEUE];	/* used as singly-linked list */
155	char low_dma;		/* as in parent but possibly overridden to 1 */
156	char force_packid;	/* 1 -> pack_id input to read(), 0 -> ignored */
157	volatile char closed;	/* 1 -> fd closed but request(s) outstanding */
158	char cmd_q;		/* 1 -> allow command queuing, 0 -> don't */
159	char next_cmd_len;	/* 0 -> automatic (def), >0 -> use on next write() */
160	char keep_orphan;	/* 0 -> drop orphan (def), 1 -> keep for read() */
161	char mmap_called;	/* 0 -> mmap() never called on this fd */
162	struct kref f_ref;
163	struct execute_work ew;
164} Sg_fd;
165
166typedef struct sg_device { /* holds the state of each scsi generic device */
167	struct scsi_device *device;
168	wait_queue_head_t o_excl_wait;	/* queue open() when O_EXCL in use */
169	int sg_tablesize;	/* adapter's max scatter-gather table size */
170	u32 index;		/* device index number */
171	struct list_head sfds;
172	volatile char detached;	/* 0->attached, 1->detached pending removal */
173	volatile char exclude;	/* opened for exclusive access */
174	char sgdebug;		/* 0->off, 1->sense, 9->dump dev, 10-> all devs */
175	struct gendisk *disk;
176	struct cdev * cdev;	/* char_dev [sysfs: /sys/cdev/major/sg<n>] */
177	struct kref d_ref;
178} Sg_device;
179
180/* tasklet or soft irq callback */
181static void sg_rq_end_io(struct request *rq, int uptodate);
182static int sg_start_req(Sg_request *srp, unsigned char *cmd);
183static int sg_finish_rem_req(Sg_request * srp);
184static int sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size);
185static ssize_t sg_new_read(Sg_fd * sfp, char __user *buf, size_t count,
186			   Sg_request * srp);
187static ssize_t sg_new_write(Sg_fd *sfp, struct file *file,
188			const char __user *buf, size_t count, int blocking,
189			int read_only, int sg_io_owned, Sg_request **o_srp);
190static int sg_common_write(Sg_fd * sfp, Sg_request * srp,
191			   unsigned char *cmnd, int timeout, int blocking);
192static int sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer);
193static void sg_remove_scat(Sg_scatter_hold * schp);
194static void sg_build_reserve(Sg_fd * sfp, int req_size);
195static void sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size);
196static void sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp);
197static Sg_fd *sg_add_sfp(Sg_device * sdp, int dev);
198static void sg_remove_sfp(struct kref *);
199static Sg_request *sg_get_rq_mark(Sg_fd * sfp, int pack_id);
200static Sg_request *sg_add_request(Sg_fd * sfp);
201static int sg_remove_request(Sg_fd * sfp, Sg_request * srp);
202static int sg_res_in_use(Sg_fd * sfp);
203static Sg_device *sg_get_dev(int dev);
204static void sg_put_dev(Sg_device *sdp);
205
206#define SZ_SG_HEADER sizeof(struct sg_header)
207#define SZ_SG_IO_HDR sizeof(sg_io_hdr_t)
208#define SZ_SG_IOVEC sizeof(sg_iovec_t)
209#define SZ_SG_REQ_INFO sizeof(sg_req_info_t)
210
211static int sg_allow_access(struct file *filp, unsigned char *cmd)
212{
213	struct sg_fd *sfp = (struct sg_fd *)filp->private_data;
214
215	if (sfp->parentdp->device->type == TYPE_SCANNER)
216		return 0;
217
218	return blk_verify_command(cmd, filp->f_mode & FMODE_WRITE);
219}
220
221static int
222sg_open(struct inode *inode, struct file *filp)
223{
224	int dev = iminor(inode);
225	int flags = filp->f_flags;
226	struct request_queue *q;
227	Sg_device *sdp;
228	Sg_fd *sfp;
229	int res;
230	int retval;
231
232	lock_kernel();
233	nonseekable_open(inode, filp);
234	SCSI_LOG_TIMEOUT(3, printk("sg_open: dev=%d, flags=0x%x\n", dev, flags));
235	sdp = sg_get_dev(dev);
236	if (IS_ERR(sdp)) {
237		retval = PTR_ERR(sdp);
238		sdp = NULL;
239		goto sg_put;
240	}
241
242	/* This driver's module count bumped by fops_get in <linux/fs.h> */
243	/* Prevent the device driver from vanishing while we sleep */
244	retval = scsi_device_get(sdp->device);
245	if (retval)
246		goto sg_put;
247
248	retval = scsi_autopm_get_device(sdp->device);
249	if (retval)
250		goto sdp_put;
251
252	if (!((flags & O_NONBLOCK) ||
253	      scsi_block_when_processing_errors(sdp->device))) {
254		retval = -ENXIO;
255		/* we are in error recovery for this device */
256		goto error_out;
257	}
258
259	if (flags & O_EXCL) {
260		if (O_RDONLY == (flags & O_ACCMODE)) {
261			retval = -EPERM; /* Can't lock it with read only access */
262			goto error_out;
263		}
264		if (!list_empty(&sdp->sfds) && (flags & O_NONBLOCK)) {
265			retval = -EBUSY;
266			goto error_out;
267		}
268		res = 0;
269		__wait_event_interruptible(sdp->o_excl_wait,
270					   ((!list_empty(&sdp->sfds) || sdp->exclude) ? 0 : (sdp->exclude = 1)), res);
271		if (res) {
272			retval = res;	/* -ERESTARTSYS because signal hit process */
273			goto error_out;
274		}
275	} else if (sdp->exclude) {	/* some other fd has an exclusive lock on dev */
276		if (flags & O_NONBLOCK) {
277			retval = -EBUSY;
278			goto error_out;
279		}
280		res = 0;
281		__wait_event_interruptible(sdp->o_excl_wait, (!sdp->exclude),
282					   res);
283		if (res) {
284			retval = res;	/* -ERESTARTSYS because signal hit process */
285			goto error_out;
286		}
287	}
288	if (sdp->detached) {
289		retval = -ENODEV;
290		goto error_out;
291	}
292	if (list_empty(&sdp->sfds)) {	/* no existing opens on this device */
293		sdp->sgdebug = 0;
294		q = sdp->device->request_queue;
295		sdp->sg_tablesize = queue_max_segments(q);
296	}
297	if ((sfp = sg_add_sfp(sdp, dev)))
298		filp->private_data = sfp;
299	else {
300		if (flags & O_EXCL) {
301			sdp->exclude = 0;	/* undo if error */
302			wake_up_interruptible(&sdp->o_excl_wait);
303		}
304		retval = -ENOMEM;
305		goto error_out;
306	}
307	retval = 0;
308error_out:
309	if (retval) {
310		scsi_autopm_put_device(sdp->device);
311sdp_put:
312		scsi_device_put(sdp->device);
313	}
314sg_put:
315	if (sdp)
316		sg_put_dev(sdp);
317	unlock_kernel();
318	return retval;
319}
320
321/* Following function was formerly called 'sg_close' */
322static int
323sg_release(struct inode *inode, struct file *filp)
324{
325	Sg_device *sdp;
326	Sg_fd *sfp;
327
328	if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
329		return -ENXIO;
330	SCSI_LOG_TIMEOUT(3, printk("sg_release: %s\n", sdp->disk->disk_name));
331
332	sfp->closed = 1;
333
334	sdp->exclude = 0;
335	wake_up_interruptible(&sdp->o_excl_wait);
336
337	scsi_autopm_put_device(sdp->device);
338	kref_put(&sfp->f_ref, sg_remove_sfp);
339	return 0;
340}
341
342static ssize_t
343sg_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos)
344{
345	Sg_device *sdp;
346	Sg_fd *sfp;
347	Sg_request *srp;
348	int req_pack_id = -1;
349	sg_io_hdr_t *hp;
350	struct sg_header *old_hdr = NULL;
351	int retval = 0;
352
353	if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
354		return -ENXIO;
355	SCSI_LOG_TIMEOUT(3, printk("sg_read: %s, count=%d\n",
356				   sdp->disk->disk_name, (int) count));
357
358	if (!access_ok(VERIFY_WRITE, buf, count))
359		return -EFAULT;
360	if (sfp->force_packid && (count >= SZ_SG_HEADER)) {
361		old_hdr = kmalloc(SZ_SG_HEADER, GFP_KERNEL);
362		if (!old_hdr)
363			return -ENOMEM;
364		if (__copy_from_user(old_hdr, buf, SZ_SG_HEADER)) {
365			retval = -EFAULT;
366			goto free_old_hdr;
367		}
368		if (old_hdr->reply_len < 0) {
369			if (count >= SZ_SG_IO_HDR) {
370				sg_io_hdr_t *new_hdr;
371				new_hdr = kmalloc(SZ_SG_IO_HDR, GFP_KERNEL);
372				if (!new_hdr) {
373					retval = -ENOMEM;
374					goto free_old_hdr;
375				}
376				retval =__copy_from_user
377				    (new_hdr, buf, SZ_SG_IO_HDR);
378				req_pack_id = new_hdr->pack_id;
379				kfree(new_hdr);
380				if (retval) {
381					retval = -EFAULT;
382					goto free_old_hdr;
383				}
384			}
385		} else
386			req_pack_id = old_hdr->pack_id;
387	}
388	srp = sg_get_rq_mark(sfp, req_pack_id);
389	if (!srp) {		/* now wait on packet to arrive */
390		if (sdp->detached) {
391			retval = -ENODEV;
392			goto free_old_hdr;
393		}
394		if (filp->f_flags & O_NONBLOCK) {
395			retval = -EAGAIN;
396			goto free_old_hdr;
397		}
398		while (1) {
399			retval = 0; /* following macro beats race condition */
400			__wait_event_interruptible(sfp->read_wait,
401				(sdp->detached ||
402				(srp = sg_get_rq_mark(sfp, req_pack_id))),
403				retval);
404			if (sdp->detached) {
405				retval = -ENODEV;
406				goto free_old_hdr;
407			}
408			if (0 == retval)
409				break;
410
411			/* -ERESTARTSYS as signal hit process */
412			goto free_old_hdr;
413		}
414	}
415	if (srp->header.interface_id != '\0') {
416		retval = sg_new_read(sfp, buf, count, srp);
417		goto free_old_hdr;
418	}
419
420	hp = &srp->header;
421	if (old_hdr == NULL) {
422		old_hdr = kmalloc(SZ_SG_HEADER, GFP_KERNEL);
423		if (! old_hdr) {
424			retval = -ENOMEM;
425			goto free_old_hdr;
426		}
427	}
428	memset(old_hdr, 0, SZ_SG_HEADER);
429	old_hdr->reply_len = (int) hp->timeout;
430	old_hdr->pack_len = old_hdr->reply_len; /* old, strange behaviour */
431	old_hdr->pack_id = hp->pack_id;
432	old_hdr->twelve_byte =
433	    ((srp->data.cmd_opcode >= 0xc0) && (12 == hp->cmd_len)) ? 1 : 0;
434	old_hdr->target_status = hp->masked_status;
435	old_hdr->host_status = hp->host_status;
436	old_hdr->driver_status = hp->driver_status;
437	if ((CHECK_CONDITION & hp->masked_status) ||
438	    (DRIVER_SENSE & hp->driver_status))
439		memcpy(old_hdr->sense_buffer, srp->sense_b,
440		       sizeof (old_hdr->sense_buffer));
441	switch (hp->host_status) {
442	/* This setup of 'result' is for backward compatibility and is best
443	   ignored by the user who should use target, host + driver status */
444	case DID_OK:
445	case DID_PASSTHROUGH:
446	case DID_SOFT_ERROR:
447		old_hdr->result = 0;
448		break;
449	case DID_NO_CONNECT:
450	case DID_BUS_BUSY:
451	case DID_TIME_OUT:
452		old_hdr->result = EBUSY;
453		break;
454	case DID_BAD_TARGET:
455	case DID_ABORT:
456	case DID_PARITY:
457	case DID_RESET:
458	case DID_BAD_INTR:
459		old_hdr->result = EIO;
460		break;
461	case DID_ERROR:
462		old_hdr->result = (srp->sense_b[0] == 0 &&
463				  hp->masked_status == GOOD) ? 0 : EIO;
464		break;
465	default:
466		old_hdr->result = EIO;
467		break;
468	}
469
470	/* Now copy the result back to the user buffer.  */
471	if (count >= SZ_SG_HEADER) {
472		if (__copy_to_user(buf, old_hdr, SZ_SG_HEADER)) {
473			retval = -EFAULT;
474			goto free_old_hdr;
475		}
476		buf += SZ_SG_HEADER;
477		if (count > old_hdr->reply_len)
478			count = old_hdr->reply_len;
479		if (count > SZ_SG_HEADER) {
480			if (sg_read_oxfer(srp, buf, count - SZ_SG_HEADER)) {
481				retval = -EFAULT;
482				goto free_old_hdr;
483			}
484		}
485	} else
486		count = (old_hdr->result == 0) ? 0 : -EIO;
487	sg_finish_rem_req(srp);
488	retval = count;
489free_old_hdr:
490	kfree(old_hdr);
491	return retval;
492}
493
494static ssize_t
495sg_new_read(Sg_fd * sfp, char __user *buf, size_t count, Sg_request * srp)
496{
497	sg_io_hdr_t *hp = &srp->header;
498	int err = 0;
499	int len;
500
501	if (count < SZ_SG_IO_HDR) {
502		err = -EINVAL;
503		goto err_out;
504	}
505	hp->sb_len_wr = 0;
506	if ((hp->mx_sb_len > 0) && hp->sbp) {
507		if ((CHECK_CONDITION & hp->masked_status) ||
508		    (DRIVER_SENSE & hp->driver_status)) {
509			int sb_len = SCSI_SENSE_BUFFERSIZE;
510			sb_len = (hp->mx_sb_len > sb_len) ? sb_len : hp->mx_sb_len;
511			len = 8 + (int) srp->sense_b[7];	/* Additional sense length field */
512			len = (len > sb_len) ? sb_len : len;
513			if (copy_to_user(hp->sbp, srp->sense_b, len)) {
514				err = -EFAULT;
515				goto err_out;
516			}
517			hp->sb_len_wr = len;
518		}
519	}
520	if (hp->masked_status || hp->host_status || hp->driver_status)
521		hp->info |= SG_INFO_CHECK;
522	if (copy_to_user(buf, hp, SZ_SG_IO_HDR)) {
523		err = -EFAULT;
524		goto err_out;
525	}
526err_out:
527	err = sg_finish_rem_req(srp);
528	return (0 == err) ? count : err;
529}
530
531static ssize_t
532sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
533{
534	int mxsize, cmd_size, k;
535	int input_size, blocking;
536	unsigned char opcode;
537	Sg_device *sdp;
538	Sg_fd *sfp;
539	Sg_request *srp;
540	struct sg_header old_hdr;
541	sg_io_hdr_t *hp;
542	unsigned char cmnd[MAX_COMMAND_SIZE];
543
544	if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
545		return -ENXIO;
546	SCSI_LOG_TIMEOUT(3, printk("sg_write: %s, count=%d\n",
547				   sdp->disk->disk_name, (int) count));
548	if (sdp->detached)
549		return -ENODEV;
550	if (!((filp->f_flags & O_NONBLOCK) ||
551	      scsi_block_when_processing_errors(sdp->device)))
552		return -ENXIO;
553
554	if (!access_ok(VERIFY_READ, buf, count))
555		return -EFAULT;	/* protects following copy_from_user()s + get_user()s */
556	if (count < SZ_SG_HEADER)
557		return -EIO;
558	if (__copy_from_user(&old_hdr, buf, SZ_SG_HEADER))
559		return -EFAULT;
560	blocking = !(filp->f_flags & O_NONBLOCK);
561	if (old_hdr.reply_len < 0)
562		return sg_new_write(sfp, filp, buf, count,
563				    blocking, 0, 0, NULL);
564	if (count < (SZ_SG_HEADER + 6))
565		return -EIO;	/* The minimum scsi command length is 6 bytes. */
566
567	if (!(srp = sg_add_request(sfp))) {
568		SCSI_LOG_TIMEOUT(1, printk("sg_write: queue full\n"));
569		return -EDOM;
570	}
571	buf += SZ_SG_HEADER;
572	__get_user(opcode, buf);
573	if (sfp->next_cmd_len > 0) {
574		if (sfp->next_cmd_len > MAX_COMMAND_SIZE) {
575			SCSI_LOG_TIMEOUT(1, printk("sg_write: command length too long\n"));
576			sfp->next_cmd_len = 0;
577			sg_remove_request(sfp, srp);
578			return -EIO;
579		}
580		cmd_size = sfp->next_cmd_len;
581		sfp->next_cmd_len = 0;	/* reset so only this write() effected */
582	} else {
583		cmd_size = COMMAND_SIZE(opcode);	/* based on SCSI command group */
584		if ((opcode >= 0xc0) && old_hdr.twelve_byte)
585			cmd_size = 12;
586	}
587	SCSI_LOG_TIMEOUT(4, printk(
588		"sg_write:   scsi opcode=0x%02x, cmd_size=%d\n", (int) opcode, cmd_size));
589/* Determine buffer size.  */
590	input_size = count - cmd_size;
591	mxsize = (input_size > old_hdr.reply_len) ? input_size : old_hdr.reply_len;
592	mxsize -= SZ_SG_HEADER;
593	input_size -= SZ_SG_HEADER;
594	if (input_size < 0) {
595		sg_remove_request(sfp, srp);
596		return -EIO;	/* User did not pass enough bytes for this command. */
597	}
598	hp = &srp->header;
599	hp->interface_id = '\0';	/* indicator of old interface tunnelled */
600	hp->cmd_len = (unsigned char) cmd_size;
601	hp->iovec_count = 0;
602	hp->mx_sb_len = 0;
603	if (input_size > 0)
604		hp->dxfer_direction = (old_hdr.reply_len > SZ_SG_HEADER) ?
605		    SG_DXFER_TO_FROM_DEV : SG_DXFER_TO_DEV;
606	else
607		hp->dxfer_direction = (mxsize > 0) ? SG_DXFER_FROM_DEV : SG_DXFER_NONE;
608	hp->dxfer_len = mxsize;
609	if (hp->dxfer_direction == SG_DXFER_TO_DEV)
610		hp->dxferp = (char __user *)buf + cmd_size;
611	else
612		hp->dxferp = NULL;
613	hp->sbp = NULL;
614	hp->timeout = old_hdr.reply_len;	/* structure abuse ... */
615	hp->flags = input_size;	/* structure abuse ... */
616	hp->pack_id = old_hdr.pack_id;
617	hp->usr_ptr = NULL;
618	if (__copy_from_user(cmnd, buf, cmd_size))
619		return -EFAULT;
620	/*
621	 * SG_DXFER_TO_FROM_DEV is functionally equivalent to SG_DXFER_FROM_DEV,
622	 * but is is possible that the app intended SG_DXFER_TO_DEV, because there
623	 * is a non-zero input_size, so emit a warning.
624	 */
625	if (hp->dxfer_direction == SG_DXFER_TO_FROM_DEV) {
626		static char cmd[TASK_COMM_LEN];
627		if (strcmp(current->comm, cmd) && printk_ratelimit()) {
628			printk(KERN_WARNING
629			       "sg_write: data in/out %d/%d bytes for SCSI command 0x%x--"
630			       "guessing data in;\n   "
631			       "program %s not setting count and/or reply_len properly\n",
632			       old_hdr.reply_len - (int)SZ_SG_HEADER,
633			       input_size, (unsigned int) cmnd[0],
634			       current->comm);
635			strcpy(cmd, current->comm);
636		}
637	}
638	k = sg_common_write(sfp, srp, cmnd, sfp->timeout, blocking);
639	return (k < 0) ? k : count;
640}
641
642static ssize_t
643sg_new_write(Sg_fd *sfp, struct file *file, const char __user *buf,
644		 size_t count, int blocking, int read_only, int sg_io_owned,
645		 Sg_request **o_srp)
646{
647	int k;
648	Sg_request *srp;
649	sg_io_hdr_t *hp;
650	unsigned char cmnd[MAX_COMMAND_SIZE];
651	int timeout;
652	unsigned long ul_timeout;
653
654	if (count < SZ_SG_IO_HDR)
655		return -EINVAL;
656	if (!access_ok(VERIFY_READ, buf, count))
657		return -EFAULT; /* protects following copy_from_user()s + get_user()s */
658
659	sfp->cmd_q = 1;	/* when sg_io_hdr seen, set command queuing on */
660	if (!(srp = sg_add_request(sfp))) {
661		SCSI_LOG_TIMEOUT(1, printk("sg_new_write: queue full\n"));
662		return -EDOM;
663	}
664	srp->sg_io_owned = sg_io_owned;
665	hp = &srp->header;
666	if (__copy_from_user(hp, buf, SZ_SG_IO_HDR)) {
667		sg_remove_request(sfp, srp);
668		return -EFAULT;
669	}
670	if (hp->interface_id != 'S') {
671		sg_remove_request(sfp, srp);
672		return -ENOSYS;
673	}
674	if (hp->flags & SG_FLAG_MMAP_IO) {
675		if (hp->dxfer_len > sfp->reserve.bufflen) {
676			sg_remove_request(sfp, srp);
677			return -ENOMEM;	/* MMAP_IO size must fit in reserve buffer */
678		}
679		if (hp->flags & SG_FLAG_DIRECT_IO) {
680			sg_remove_request(sfp, srp);
681			return -EINVAL;	/* either MMAP_IO or DIRECT_IO (not both) */
682		}
683		if (sg_res_in_use(sfp)) {
684			sg_remove_request(sfp, srp);
685			return -EBUSY;	/* reserve buffer already being used */
686		}
687	}
688	ul_timeout = msecs_to_jiffies(srp->header.timeout);
689	timeout = (ul_timeout < INT_MAX) ? ul_timeout : INT_MAX;
690	if ((!hp->cmdp) || (hp->cmd_len < 6) || (hp->cmd_len > sizeof (cmnd))) {
691		sg_remove_request(sfp, srp);
692		return -EMSGSIZE;
693	}
694	if (!access_ok(VERIFY_READ, hp->cmdp, hp->cmd_len)) {
695		sg_remove_request(sfp, srp);
696		return -EFAULT;	/* protects following copy_from_user()s + get_user()s */
697	}
698	if (__copy_from_user(cmnd, hp->cmdp, hp->cmd_len)) {
699		sg_remove_request(sfp, srp);
700		return -EFAULT;
701	}
702	if (read_only && sg_allow_access(file, cmnd)) {
703		sg_remove_request(sfp, srp);
704		return -EPERM;
705	}
706	k = sg_common_write(sfp, srp, cmnd, timeout, blocking);
707	if (k < 0)
708		return k;
709	if (o_srp)
710		*o_srp = srp;
711	return count;
712}
713
714static int
715sg_common_write(Sg_fd * sfp, Sg_request * srp,
716		unsigned char *cmnd, int timeout, int blocking)
717{
718	int k, data_dir;
719	Sg_device *sdp = sfp->parentdp;
720	sg_io_hdr_t *hp = &srp->header;
721
722	srp->data.cmd_opcode = cmnd[0];	/* hold opcode of command */
723	hp->status = 0;
724	hp->masked_status = 0;
725	hp->msg_status = 0;
726	hp->info = 0;
727	hp->host_status = 0;
728	hp->driver_status = 0;
729	hp->resid = 0;
730	SCSI_LOG_TIMEOUT(4, printk("sg_common_write:  scsi opcode=0x%02x, cmd_size=%d\n",
731			  (int) cmnd[0], (int) hp->cmd_len));
732
733	k = sg_start_req(srp, cmnd);
734	if (k) {
735		SCSI_LOG_TIMEOUT(1, printk("sg_common_write: start_req err=%d\n", k));
736		sg_finish_rem_req(srp);
737		return k;	/* probably out of space --> ENOMEM */
738	}
739	if (sdp->detached) {
740		if (srp->bio)
741			blk_end_request_all(srp->rq, -EIO);
742		sg_finish_rem_req(srp);
743		return -ENODEV;
744	}
745
746	switch (hp->dxfer_direction) {
747	case SG_DXFER_TO_FROM_DEV:
748	case SG_DXFER_FROM_DEV:
749		data_dir = DMA_FROM_DEVICE;
750		break;
751	case SG_DXFER_TO_DEV:
752		data_dir = DMA_TO_DEVICE;
753		break;
754	case SG_DXFER_UNKNOWN:
755		data_dir = DMA_BIDIRECTIONAL;
756		break;
757	default:
758		data_dir = DMA_NONE;
759		break;
760	}
761	hp->duration = jiffies_to_msecs(jiffies);
762
763	srp->rq->timeout = timeout;
764	kref_get(&sfp->f_ref); /* sg_rq_end_io() does kref_put(). */
765	blk_execute_rq_nowait(sdp->device->request_queue, sdp->disk,
766			      srp->rq, 1, sg_rq_end_io);
767	return 0;
768}
769
770static int
771sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
772{
773	void __user *p = (void __user *)arg;
774	int __user *ip = p;
775	int result, val, read_only;
776	Sg_device *sdp;
777	Sg_fd *sfp;
778	Sg_request *srp;
779	unsigned long iflags;
780
781	if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
782		return -ENXIO;
783
784	SCSI_LOG_TIMEOUT(3, printk("sg_ioctl: %s, cmd=0x%x\n",
785				   sdp->disk->disk_name, (int) cmd_in));
786	read_only = (O_RDWR != (filp->f_flags & O_ACCMODE));
787
788	switch (cmd_in) {
789	case SG_IO:
790		{
791			int blocking = 1;	/* ignore O_NONBLOCK flag */
792
793			if (sdp->detached)
794				return -ENODEV;
795			if (!scsi_block_when_processing_errors(sdp->device))
796				return -ENXIO;
797			if (!access_ok(VERIFY_WRITE, p, SZ_SG_IO_HDR))
798				return -EFAULT;
799			result =
800			    sg_new_write(sfp, filp, p, SZ_SG_IO_HDR,
801					 blocking, read_only, 1, &srp);
802			if (result < 0)
803				return result;
804			while (1) {
805				result = 0;	/* following macro to beat race condition */
806				__wait_event_interruptible(sfp->read_wait,
807					(srp->done || sdp->detached),
808					result);
809				if (sdp->detached)
810					return -ENODEV;
811				write_lock_irq(&sfp->rq_list_lock);
812				if (srp->done) {
813					srp->done = 2;
814					write_unlock_irq(&sfp->rq_list_lock);
815					break;
816				}
817				srp->orphan = 1;
818				write_unlock_irq(&sfp->rq_list_lock);
819				return result;	/* -ERESTARTSYS because signal hit process */
820			}
821			result = sg_new_read(sfp, p, SZ_SG_IO_HDR, srp);
822			return (result < 0) ? result : 0;
823		}
824	case SG_SET_TIMEOUT:
825		result = get_user(val, ip);
826		if (result)
827			return result;
828		if (val < 0)
829			return -EIO;
830		if (val >= MULDIV (INT_MAX, USER_HZ, HZ))
831		    val = MULDIV (INT_MAX, USER_HZ, HZ);
832		sfp->timeout_user = val;
833		sfp->timeout = MULDIV (val, HZ, USER_HZ);
834
835		return 0;
836	case SG_GET_TIMEOUT:	/* N.B. User receives timeout as return value */
837				/* strange ..., for backward compatibility */
838		return sfp->timeout_user;
839	case SG_SET_FORCE_LOW_DMA:
840		result = get_user(val, ip);
841		if (result)
842			return result;
843		if (val) {
844			sfp->low_dma = 1;
845			if ((0 == sfp->low_dma) && (0 == sg_res_in_use(sfp))) {
846				val = (int) sfp->reserve.bufflen;
847				sg_remove_scat(&sfp->reserve);
848				sg_build_reserve(sfp, val);
849			}
850		} else {
851			if (sdp->detached)
852				return -ENODEV;
853			sfp->low_dma = sdp->device->host->unchecked_isa_dma;
854		}
855		return 0;
856	case SG_GET_LOW_DMA:
857		return put_user((int) sfp->low_dma, ip);
858	case SG_GET_SCSI_ID:
859		if (!access_ok(VERIFY_WRITE, p, sizeof (sg_scsi_id_t)))
860			return -EFAULT;
861		else {
862			sg_scsi_id_t __user *sg_idp = p;
863
864			if (sdp->detached)
865				return -ENODEV;
866			__put_user((int) sdp->device->host->host_no,
867				   &sg_idp->host_no);
868			__put_user((int) sdp->device->channel,
869				   &sg_idp->channel);
870			__put_user((int) sdp->device->id, &sg_idp->scsi_id);
871			__put_user((int) sdp->device->lun, &sg_idp->lun);
872			__put_user((int) sdp->device->type, &sg_idp->scsi_type);
873			__put_user((short) sdp->device->host->cmd_per_lun,
874				   &sg_idp->h_cmd_per_lun);
875			__put_user((short) sdp->device->queue_depth,
876				   &sg_idp->d_queue_depth);
877			__put_user(0, &sg_idp->unused[0]);
878			__put_user(0, &sg_idp->unused[1]);
879			return 0;
880		}
881	case SG_SET_FORCE_PACK_ID:
882		result = get_user(val, ip);
883		if (result)
884			return result;
885		sfp->force_packid = val ? 1 : 0;
886		return 0;
887	case SG_GET_PACK_ID:
888		if (!access_ok(VERIFY_WRITE, ip, sizeof (int)))
889			return -EFAULT;
890		read_lock_irqsave(&sfp->rq_list_lock, iflags);
891		for (srp = sfp->headrp; srp; srp = srp->nextrp) {
892			if ((1 == srp->done) && (!srp->sg_io_owned)) {
893				read_unlock_irqrestore(&sfp->rq_list_lock,
894						       iflags);
895				__put_user(srp->header.pack_id, ip);
896				return 0;
897			}
898		}
899		read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
900		__put_user(-1, ip);
901		return 0;
902	case SG_GET_NUM_WAITING:
903		read_lock_irqsave(&sfp->rq_list_lock, iflags);
904		for (val = 0, srp = sfp->headrp; srp; srp = srp->nextrp) {
905			if ((1 == srp->done) && (!srp->sg_io_owned))
906				++val;
907		}
908		read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
909		return put_user(val, ip);
910	case SG_GET_SG_TABLESIZE:
911		return put_user(sdp->sg_tablesize, ip);
912	case SG_SET_RESERVED_SIZE:
913		result = get_user(val, ip);
914		if (result)
915			return result;
916                if (val < 0)
917                        return -EINVAL;
918		val = min_t(int, val,
919			    queue_max_sectors(sdp->device->request_queue) * 512);
920		if (val != sfp->reserve.bufflen) {
921			if (sg_res_in_use(sfp) || sfp->mmap_called)
922				return -EBUSY;
923			sg_remove_scat(&sfp->reserve);
924			sg_build_reserve(sfp, val);
925		}
926		return 0;
927	case SG_GET_RESERVED_SIZE:
928		val = min_t(int, sfp->reserve.bufflen,
929			    queue_max_sectors(sdp->device->request_queue) * 512);
930		return put_user(val, ip);
931	case SG_SET_COMMAND_Q:
932		result = get_user(val, ip);
933		if (result)
934			return result;
935		sfp->cmd_q = val ? 1 : 0;
936		return 0;
937	case SG_GET_COMMAND_Q:
938		return put_user((int) sfp->cmd_q, ip);
939	case SG_SET_KEEP_ORPHAN:
940		result = get_user(val, ip);
941		if (result)
942			return result;
943		sfp->keep_orphan = val;
944		return 0;
945	case SG_GET_KEEP_ORPHAN:
946		return put_user((int) sfp->keep_orphan, ip);
947	case SG_NEXT_CMD_LEN:
948		result = get_user(val, ip);
949		if (result)
950			return result;
951		sfp->next_cmd_len = (val > 0) ? val : 0;
952		return 0;
953	case SG_GET_VERSION_NUM:
954		return put_user(sg_version_num, ip);
955	case SG_GET_ACCESS_COUNT:
956		/* faked - we don't have a real access count anymore */
957		val = (sdp->device ? 1 : 0);
958		return put_user(val, ip);
959	case SG_GET_REQUEST_TABLE:
960		if (!access_ok(VERIFY_WRITE, p, SZ_SG_REQ_INFO * SG_MAX_QUEUE))
961			return -EFAULT;
962		else {
963			sg_req_info_t *rinfo;
964			unsigned int ms;
965
966			rinfo = kmalloc(SZ_SG_REQ_INFO * SG_MAX_QUEUE,
967								GFP_KERNEL);
968			if (!rinfo)
969				return -ENOMEM;
970			read_lock_irqsave(&sfp->rq_list_lock, iflags);
971			for (srp = sfp->headrp, val = 0; val < SG_MAX_QUEUE;
972			     ++val, srp = srp ? srp->nextrp : srp) {
973				memset(&rinfo[val], 0, SZ_SG_REQ_INFO);
974				if (srp) {
975					rinfo[val].req_state = srp->done + 1;
976					rinfo[val].problem =
977					    srp->header.masked_status &
978					    srp->header.host_status &
979					    srp->header.driver_status;
980					if (srp->done)
981						rinfo[val].duration =
982							srp->header.duration;
983					else {
984						ms = jiffies_to_msecs(jiffies);
985						rinfo[val].duration =
986						    (ms > srp->header.duration) ?
987						    (ms - srp->header.duration) : 0;
988					}
989					rinfo[val].orphan = srp->orphan;
990					rinfo[val].sg_io_owned =
991							srp->sg_io_owned;
992					rinfo[val].pack_id =
993							srp->header.pack_id;
994					rinfo[val].usr_ptr =
995							srp->header.usr_ptr;
996				}
997			}
998			read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
999			result = __copy_to_user(p, rinfo,
1000						SZ_SG_REQ_INFO * SG_MAX_QUEUE);
1001			result = result ? -EFAULT : 0;
1002			kfree(rinfo);
1003			return result;
1004		}
1005	case SG_EMULATED_HOST:
1006		if (sdp->detached)
1007			return -ENODEV;
1008		return put_user(sdp->device->host->hostt->emulated, ip);
1009	case SG_SCSI_RESET:
1010		if (sdp->detached)
1011			return -ENODEV;
1012		if (filp->f_flags & O_NONBLOCK) {
1013			if (scsi_host_in_recovery(sdp->device->host))
1014				return -EBUSY;
1015		} else if (!scsi_block_when_processing_errors(sdp->device))
1016			return -EBUSY;
1017		result = get_user(val, ip);
1018		if (result)
1019			return result;
1020		if (SG_SCSI_RESET_NOTHING == val)
1021			return 0;
1022		switch (val) {
1023		case SG_SCSI_RESET_DEVICE:
1024			val = SCSI_TRY_RESET_DEVICE;
1025			break;
1026		case SG_SCSI_RESET_TARGET:
1027			val = SCSI_TRY_RESET_TARGET;
1028			break;
1029		case SG_SCSI_RESET_BUS:
1030			val = SCSI_TRY_RESET_BUS;
1031			break;
1032		case SG_SCSI_RESET_HOST:
1033			val = SCSI_TRY_RESET_HOST;
1034			break;
1035		default:
1036			return -EINVAL;
1037		}
1038		if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
1039			return -EACCES;
1040		return (scsi_reset_provider(sdp->device, val) ==
1041			SUCCESS) ? 0 : -EIO;
1042	case SCSI_IOCTL_SEND_COMMAND:
1043		if (sdp->detached)
1044			return -ENODEV;
1045		if (read_only) {
1046			unsigned char opcode = WRITE_6;
1047			Scsi_Ioctl_Command __user *siocp = p;
1048
1049			if (copy_from_user(&opcode, siocp->data, 1))
1050				return -EFAULT;
1051			if (sg_allow_access(filp, &opcode))
1052				return -EPERM;
1053		}
1054		return sg_scsi_ioctl(sdp->device->request_queue, NULL, filp->f_mode, p);
1055	case SG_SET_DEBUG:
1056		result = get_user(val, ip);
1057		if (result)
1058			return result;
1059		sdp->sgdebug = (char) val;
1060		return 0;
1061	case SCSI_IOCTL_GET_IDLUN:
1062	case SCSI_IOCTL_GET_BUS_NUMBER:
1063	case SCSI_IOCTL_PROBE_HOST:
1064	case SG_GET_TRANSFORM:
1065		if (sdp->detached)
1066			return -ENODEV;
1067		return scsi_ioctl(sdp->device, cmd_in, p);
1068	case BLKSECTGET:
1069		return put_user(queue_max_sectors(sdp->device->request_queue) * 512,
1070				ip);
1071	case BLKTRACESETUP:
1072		return blk_trace_setup(sdp->device->request_queue,
1073				       sdp->disk->disk_name,
1074				       MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
1075				       NULL,
1076				       (char *)arg);
1077	case BLKTRACESTART:
1078		return blk_trace_startstop(sdp->device->request_queue, 1);
1079	case BLKTRACESTOP:
1080		return blk_trace_startstop(sdp->device->request_queue, 0);
1081	case BLKTRACETEARDOWN:
1082		return blk_trace_remove(sdp->device->request_queue);
1083	default:
1084		if (read_only)
1085			return -EPERM;	/* don't know so take safe approach */
1086		return scsi_ioctl(sdp->device, cmd_in, p);
1087	}
1088}
1089
1090static long
1091sg_unlocked_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
1092{
1093	int ret;
1094
1095	lock_kernel();
1096	ret = sg_ioctl(filp, cmd_in, arg);
1097	unlock_kernel();
1098
1099	return ret;
1100}
1101
1102#ifdef CONFIG_COMPAT
1103static long sg_compat_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
1104{
1105	Sg_device *sdp;
1106	Sg_fd *sfp;
1107	struct scsi_device *sdev;
1108
1109	if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
1110		return -ENXIO;
1111
1112	sdev = sdp->device;
1113	if (sdev->host->hostt->compat_ioctl) {
1114		int ret;
1115
1116		ret = sdev->host->hostt->compat_ioctl(sdev, cmd_in, (void __user *)arg);
1117
1118		return ret;
1119	}
1120
1121	return -ENOIOCTLCMD;
1122}
1123#endif
1124
1125static unsigned int
1126sg_poll(struct file *filp, poll_table * wait)
1127{
1128	unsigned int res = 0;
1129	Sg_device *sdp;
1130	Sg_fd *sfp;
1131	Sg_request *srp;
1132	int count = 0;
1133	unsigned long iflags;
1134
1135	if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))
1136	    || sfp->closed)
1137		return POLLERR;
1138	poll_wait(filp, &sfp->read_wait, wait);
1139	read_lock_irqsave(&sfp->rq_list_lock, iflags);
1140	for (srp = sfp->headrp; srp; srp = srp->nextrp) {
1141		/* if any read waiting, flag it */
1142		if ((0 == res) && (1 == srp->done) && (!srp->sg_io_owned))
1143			res = POLLIN | POLLRDNORM;
1144		++count;
1145	}
1146	read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
1147
1148	if (sdp->detached)
1149		res |= POLLHUP;
1150	else if (!sfp->cmd_q) {
1151		if (0 == count)
1152			res |= POLLOUT | POLLWRNORM;
1153	} else if (count < SG_MAX_QUEUE)
1154		res |= POLLOUT | POLLWRNORM;
1155	SCSI_LOG_TIMEOUT(3, printk("sg_poll: %s, res=0x%x\n",
1156				   sdp->disk->disk_name, (int) res));
1157	return res;
1158}
1159
1160static int
1161sg_fasync(int fd, struct file *filp, int mode)
1162{
1163	Sg_device *sdp;
1164	Sg_fd *sfp;
1165
1166	if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
1167		return -ENXIO;
1168	SCSI_LOG_TIMEOUT(3, printk("sg_fasync: %s, mode=%d\n",
1169				   sdp->disk->disk_name, mode));
1170
1171	return fasync_helper(fd, filp, mode, &sfp->async_qp);
1172}
1173
1174static int
1175sg_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1176{
1177	Sg_fd *sfp;
1178	unsigned long offset, len, sa;
1179	Sg_scatter_hold *rsv_schp;
1180	int k, length;
1181
1182	if ((NULL == vma) || (!(sfp = (Sg_fd *) vma->vm_private_data)))
1183		return VM_FAULT_SIGBUS;
1184	rsv_schp = &sfp->reserve;
1185	offset = vmf->pgoff << PAGE_SHIFT;
1186	if (offset >= rsv_schp->bufflen)
1187		return VM_FAULT_SIGBUS;
1188	SCSI_LOG_TIMEOUT(3, printk("sg_vma_fault: offset=%lu, scatg=%d\n",
1189				   offset, rsv_schp->k_use_sg));
1190	sa = vma->vm_start;
1191	length = 1 << (PAGE_SHIFT + rsv_schp->page_order);
1192	for (k = 0; k < rsv_schp->k_use_sg && sa < vma->vm_end; k++) {
1193		len = vma->vm_end - sa;
1194		len = (len < length) ? len : length;
1195		if (offset < len) {
1196			struct page *page = nth_page(rsv_schp->pages[k],
1197						     offset >> PAGE_SHIFT);
1198			get_page(page);	/* increment page count */
1199			vmf->page = page;
1200			return 0; /* success */
1201		}
1202		sa += len;
1203		offset -= len;
1204	}
1205
1206	return VM_FAULT_SIGBUS;
1207}
1208
1209static const struct vm_operations_struct sg_mmap_vm_ops = {
1210	.fault = sg_vma_fault,
1211};
1212
1213static int
1214sg_mmap(struct file *filp, struct vm_area_struct *vma)
1215{
1216	Sg_fd *sfp;
1217	unsigned long req_sz, len, sa;
1218	Sg_scatter_hold *rsv_schp;
1219	int k, length;
1220
1221	if ((!filp) || (!vma) || (!(sfp = (Sg_fd *) filp->private_data)))
1222		return -ENXIO;
1223	req_sz = vma->vm_end - vma->vm_start;
1224	SCSI_LOG_TIMEOUT(3, printk("sg_mmap starting, vm_start=%p, len=%d\n",
1225				   (void *) vma->vm_start, (int) req_sz));
1226	if (vma->vm_pgoff)
1227		return -EINVAL;	/* want no offset */
1228	rsv_schp = &sfp->reserve;
1229	if (req_sz > rsv_schp->bufflen)
1230		return -ENOMEM;	/* cannot map more than reserved buffer */
1231
1232	sa = vma->vm_start;
1233	length = 1 << (PAGE_SHIFT + rsv_schp->page_order);
1234	for (k = 0; k < rsv_schp->k_use_sg && sa < vma->vm_end; k++) {
1235		len = vma->vm_end - sa;
1236		len = (len < length) ? len : length;
1237		sa += len;
1238	}
1239
1240	sfp->mmap_called = 1;
1241	vma->vm_flags |= VM_RESERVED;
1242	vma->vm_private_data = sfp;
1243	vma->vm_ops = &sg_mmap_vm_ops;
1244	return 0;
1245}
1246
1247static void sg_rq_end_io_usercontext(struct work_struct *work)
1248{
1249	struct sg_request *srp = container_of(work, struct sg_request, ew.work);
1250	struct sg_fd *sfp = srp->parentfp;
1251
1252	sg_finish_rem_req(srp);
1253	kref_put(&sfp->f_ref, sg_remove_sfp);
1254}
1255
1256/*
1257 * This function is a "bottom half" handler that is called by the mid
1258 * level when a command is completed (or has failed).
1259 */
1260static void sg_rq_end_io(struct request *rq, int uptodate)
1261{
1262	struct sg_request *srp = rq->end_io_data;
1263	Sg_device *sdp;
1264	Sg_fd *sfp;
1265	unsigned long iflags;
1266	unsigned int ms;
1267	char *sense;
1268	int result, resid, done = 1;
1269
1270	if (WARN_ON(srp->done != 0))
1271		return;
1272
1273	sfp = srp->parentfp;
1274	if (WARN_ON(sfp == NULL))
1275		return;
1276
1277	sdp = sfp->parentdp;
1278	if (unlikely(sdp->detached))
1279		printk(KERN_INFO "sg_rq_end_io: device detached\n");
1280
1281	sense = rq->sense;
1282	result = rq->errors;
1283	resid = rq->resid_len;
1284
1285	SCSI_LOG_TIMEOUT(4, printk("sg_cmd_done: %s, pack_id=%d, res=0x%x\n",
1286		sdp->disk->disk_name, srp->header.pack_id, result));
1287	srp->header.resid = resid;
1288	ms = jiffies_to_msecs(jiffies);
1289	srp->header.duration = (ms > srp->header.duration) ?
1290				(ms - srp->header.duration) : 0;
1291	if (0 != result) {
1292		struct scsi_sense_hdr sshdr;
1293
1294		srp->header.status = 0xff & result;
1295		srp->header.masked_status = status_byte(result);
1296		srp->header.msg_status = msg_byte(result);
1297		srp->header.host_status = host_byte(result);
1298		srp->header.driver_status = driver_byte(result);
1299		if ((sdp->sgdebug > 0) &&
1300		    ((CHECK_CONDITION == srp->header.masked_status) ||
1301		     (COMMAND_TERMINATED == srp->header.masked_status)))
1302			__scsi_print_sense("sg_cmd_done", sense,
1303					   SCSI_SENSE_BUFFERSIZE);
1304
1305		/* Following if statement is a patch supplied by Eric Youngdale */
1306		if (driver_byte(result) != 0
1307		    && scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, &sshdr)
1308		    && !scsi_sense_is_deferred(&sshdr)
1309		    && sshdr.sense_key == UNIT_ATTENTION
1310		    && sdp->device->removable) {
1311			/* Detected possible disc change. Set the bit - this */
1312			/* may be used if there are filesystems using this device */
1313			sdp->device->changed = 1;
1314		}
1315	}
1316	/* Rely on write phase to clean out srp status values, so no "else" */
1317
1318	write_lock_irqsave(&sfp->rq_list_lock, iflags);
1319	if (unlikely(srp->orphan)) {
1320		if (sfp->keep_orphan)
1321			srp->sg_io_owned = 0;
1322		else
1323			done = 0;
1324	}
1325	srp->done = done;
1326	write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
1327
1328	if (likely(done)) {
1329		/* Now wake up any sg_read() that is waiting for this
1330		 * packet.
1331		 */
1332		wake_up_interruptible(&sfp->read_wait);
1333		kill_fasync(&sfp->async_qp, SIGPOLL, POLL_IN);
1334		kref_put(&sfp->f_ref, sg_remove_sfp);
1335	} else {
1336		INIT_WORK(&srp->ew.work, sg_rq_end_io_usercontext);
1337		schedule_work(&srp->ew.work);
1338	}
1339}
1340
1341static const struct file_operations sg_fops = {
1342	.owner = THIS_MODULE,
1343	.read = sg_read,
1344	.write = sg_write,
1345	.poll = sg_poll,
1346	.unlocked_ioctl = sg_unlocked_ioctl,
1347#ifdef CONFIG_COMPAT
1348	.compat_ioctl = sg_compat_ioctl,
1349#endif
1350	.open = sg_open,
1351	.mmap = sg_mmap,
1352	.release = sg_release,
1353	.fasync = sg_fasync,
1354};
1355
1356static struct class *sg_sysfs_class;
1357
1358static int sg_sysfs_valid = 0;
1359
1360static Sg_device *sg_alloc(struct gendisk *disk, struct scsi_device *scsidp)
1361{
1362	struct request_queue *q = scsidp->request_queue;
1363	Sg_device *sdp;
1364	unsigned long iflags;
1365	int error;
1366	u32 k;
1367
1368	sdp = kzalloc(sizeof(Sg_device), GFP_KERNEL);
1369	if (!sdp) {
1370		printk(KERN_WARNING "kmalloc Sg_device failure\n");
1371		return ERR_PTR(-ENOMEM);
1372	}
1373
1374	if (!idr_pre_get(&sg_index_idr, GFP_KERNEL)) {
1375		printk(KERN_WARNING "idr expansion Sg_device failure\n");
1376		error = -ENOMEM;
1377		goto out;
1378	}
1379
1380	write_lock_irqsave(&sg_index_lock, iflags);
1381
1382	error = idr_get_new(&sg_index_idr, sdp, &k);
1383	if (error) {
1384		write_unlock_irqrestore(&sg_index_lock, iflags);
1385		printk(KERN_WARNING "idr allocation Sg_device failure: %d\n",
1386		       error);
1387		goto out;
1388	}
1389
1390	if (unlikely(k >= SG_MAX_DEVS))
1391		goto overflow;
1392
1393	SCSI_LOG_TIMEOUT(3, printk("sg_alloc: dev=%d \n", k));
1394	sprintf(disk->disk_name, "sg%d", k);
1395	disk->first_minor = k;
1396	sdp->disk = disk;
1397	sdp->device = scsidp;
1398	INIT_LIST_HEAD(&sdp->sfds);
1399	init_waitqueue_head(&sdp->o_excl_wait);
1400	sdp->sg_tablesize = queue_max_segments(q);
1401	sdp->index = k;
1402	kref_init(&sdp->d_ref);
1403
1404	write_unlock_irqrestore(&sg_index_lock, iflags);
1405
1406	error = 0;
1407 out:
1408	if (error) {
1409		kfree(sdp);
1410		return ERR_PTR(error);
1411	}
1412	return sdp;
1413
1414 overflow:
1415	idr_remove(&sg_index_idr, k);
1416	write_unlock_irqrestore(&sg_index_lock, iflags);
1417	sdev_printk(KERN_WARNING, scsidp,
1418		    "Unable to attach sg device type=%d, minor "
1419		    "number exceeds %d\n", scsidp->type, SG_MAX_DEVS - 1);
1420	error = -ENODEV;
1421	goto out;
1422}
1423
1424static int
1425sg_add(struct device *cl_dev, struct class_interface *cl_intf)
1426{
1427	struct scsi_device *scsidp = to_scsi_device(cl_dev->parent);
1428	struct gendisk *disk;
1429	Sg_device *sdp = NULL;
1430	struct cdev * cdev = NULL;
1431	int error;
1432	unsigned long iflags;
1433
1434	disk = alloc_disk(1);
1435	if (!disk) {
1436		printk(KERN_WARNING "alloc_disk failed\n");
1437		return -ENOMEM;
1438	}
1439	disk->major = SCSI_GENERIC_MAJOR;
1440
1441	error = -ENOMEM;
1442	cdev = cdev_alloc();
1443	if (!cdev) {
1444		printk(KERN_WARNING "cdev_alloc failed\n");
1445		goto out;
1446	}
1447	cdev->owner = THIS_MODULE;
1448	cdev->ops = &sg_fops;
1449
1450	sdp = sg_alloc(disk, scsidp);
1451	if (IS_ERR(sdp)) {
1452		printk(KERN_WARNING "sg_alloc failed\n");
1453		error = PTR_ERR(sdp);
1454		goto out;
1455	}
1456
1457	error = cdev_add(cdev, MKDEV(SCSI_GENERIC_MAJOR, sdp->index), 1);
1458	if (error)
1459		goto cdev_add_err;
1460
1461	sdp->cdev = cdev;
1462	if (sg_sysfs_valid) {
1463		struct device *sg_class_member;
1464
1465		sg_class_member = device_create(sg_sysfs_class, cl_dev->parent,
1466						MKDEV(SCSI_GENERIC_MAJOR,
1467						      sdp->index),
1468						sdp, "%s", disk->disk_name);
1469		if (IS_ERR(sg_class_member)) {
1470			printk(KERN_ERR "sg_add: "
1471			       "device_create failed\n");
1472			error = PTR_ERR(sg_class_member);
1473			goto cdev_add_err;
1474		}
1475		error = sysfs_create_link(&scsidp->sdev_gendev.kobj,
1476					  &sg_class_member->kobj, "generic");
1477		if (error)
1478			printk(KERN_ERR "sg_add: unable to make symlink "
1479					"'generic' back to sg%d\n", sdp->index);
1480	} else
1481		printk(KERN_WARNING "sg_add: sg_sys Invalid\n");
1482
1483	sdev_printk(KERN_NOTICE, scsidp,
1484		    "Attached scsi generic sg%d type %d\n", sdp->index,
1485		    scsidp->type);
1486
1487	dev_set_drvdata(cl_dev, sdp);
1488
1489	return 0;
1490
1491cdev_add_err:
1492	write_lock_irqsave(&sg_index_lock, iflags);
1493	idr_remove(&sg_index_idr, sdp->index);
1494	write_unlock_irqrestore(&sg_index_lock, iflags);
1495	kfree(sdp);
1496
1497out:
1498	put_disk(disk);
1499	if (cdev)
1500		cdev_del(cdev);
1501	return error;
1502}
1503
1504static void sg_device_destroy(struct kref *kref)
1505{
1506	struct sg_device *sdp = container_of(kref, struct sg_device, d_ref);
1507	unsigned long flags;
1508
1509	/* CAUTION!  Note that the device can still be found via idr_find()
1510	 * even though the refcount is 0.  Therefore, do idr_remove() BEFORE
1511	 * any other cleanup.
1512	 */
1513
1514	write_lock_irqsave(&sg_index_lock, flags);
1515	idr_remove(&sg_index_idr, sdp->index);
1516	write_unlock_irqrestore(&sg_index_lock, flags);
1517
1518	SCSI_LOG_TIMEOUT(3,
1519		printk("sg_device_destroy: %s\n",
1520			sdp->disk->disk_name));
1521
1522	put_disk(sdp->disk);
1523	kfree(sdp);
1524}
1525
1526static void sg_remove(struct device *cl_dev, struct class_interface *cl_intf)
1527{
1528	struct scsi_device *scsidp = to_scsi_device(cl_dev->parent);
1529	Sg_device *sdp = dev_get_drvdata(cl_dev);
1530	unsigned long iflags;
1531	Sg_fd *sfp;
1532
1533	if (!sdp || sdp->detached)
1534		return;
1535
1536	SCSI_LOG_TIMEOUT(3, printk("sg_remove: %s\n", sdp->disk->disk_name));
1537
1538	/* Need a write lock to set sdp->detached. */
1539	write_lock_irqsave(&sg_index_lock, iflags);
1540	sdp->detached = 1;
1541	list_for_each_entry(sfp, &sdp->sfds, sfd_siblings) {
1542		wake_up_interruptible(&sfp->read_wait);
1543		kill_fasync(&sfp->async_qp, SIGPOLL, POLL_HUP);
1544	}
1545	write_unlock_irqrestore(&sg_index_lock, iflags);
1546
1547	sysfs_remove_link(&scsidp->sdev_gendev.kobj, "generic");
1548	device_destroy(sg_sysfs_class, MKDEV(SCSI_GENERIC_MAJOR, sdp->index));
1549	cdev_del(sdp->cdev);
1550	sdp->cdev = NULL;
1551
1552	sg_put_dev(sdp);
1553}
1554
1555module_param_named(scatter_elem_sz, scatter_elem_sz, int, S_IRUGO | S_IWUSR);
1556module_param_named(def_reserved_size, def_reserved_size, int,
1557		   S_IRUGO | S_IWUSR);
1558module_param_named(allow_dio, sg_allow_dio, int, S_IRUGO | S_IWUSR);
1559
1560MODULE_AUTHOR("Douglas Gilbert");
1561MODULE_DESCRIPTION("SCSI generic (sg) driver");
1562MODULE_LICENSE("GPL");
1563MODULE_VERSION(SG_VERSION_STR);
1564MODULE_ALIAS_CHARDEV_MAJOR(SCSI_GENERIC_MAJOR);
1565
1566MODULE_PARM_DESC(scatter_elem_sz, "scatter gather element "
1567                "size (default: max(SG_SCATTER_SZ, PAGE_SIZE))");
1568MODULE_PARM_DESC(def_reserved_size, "size of buffer reserved for each fd");
1569MODULE_PARM_DESC(allow_dio, "allow direct I/O (default: 0 (disallow))");
1570
1571static int __init
1572init_sg(void)
1573{
1574	int rc;
1575
1576	if (scatter_elem_sz < PAGE_SIZE) {
1577		scatter_elem_sz = PAGE_SIZE;
1578		scatter_elem_sz_prev = scatter_elem_sz;
1579	}
1580	if (def_reserved_size >= 0)
1581		sg_big_buff = def_reserved_size;
1582	else
1583		def_reserved_size = sg_big_buff;
1584
1585	rc = register_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR, 0),
1586				    SG_MAX_DEVS, "sg");
1587	if (rc)
1588		return rc;
1589        sg_sysfs_class = class_create(THIS_MODULE, "scsi_generic");
1590        if ( IS_ERR(sg_sysfs_class) ) {
1591		rc = PTR_ERR(sg_sysfs_class);
1592		goto err_out;
1593        }
1594	sg_sysfs_valid = 1;
1595	rc = scsi_register_interface(&sg_interface);
1596	if (0 == rc) {
1597#ifdef CONFIG_SCSI_PROC_FS
1598		sg_proc_init();
1599#endif				/* CONFIG_SCSI_PROC_FS */
1600		return 0;
1601	}
1602	class_destroy(sg_sysfs_class);
1603err_out:
1604	unregister_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR, 0), SG_MAX_DEVS);
1605	return rc;
1606}
1607
1608static void __exit
1609exit_sg(void)
1610{
1611#ifdef CONFIG_SCSI_PROC_FS
1612	sg_proc_cleanup();
1613#endif				/* CONFIG_SCSI_PROC_FS */
1614	scsi_unregister_interface(&sg_interface);
1615	class_destroy(sg_sysfs_class);
1616	sg_sysfs_valid = 0;
1617	unregister_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR, 0),
1618				 SG_MAX_DEVS);
1619	idr_destroy(&sg_index_idr);
1620}
1621
1622static int sg_start_req(Sg_request *srp, unsigned char *cmd)
1623{
1624	int res;
1625	struct request *rq;
1626	Sg_fd *sfp = srp->parentfp;
1627	sg_io_hdr_t *hp = &srp->header;
1628	int dxfer_len = (int) hp->dxfer_len;
1629	int dxfer_dir = hp->dxfer_direction;
1630	unsigned int iov_count = hp->iovec_count;
1631	Sg_scatter_hold *req_schp = &srp->data;
1632	Sg_scatter_hold *rsv_schp = &sfp->reserve;
1633	struct request_queue *q = sfp->parentdp->device->request_queue;
1634	struct rq_map_data *md, map_data;
1635	int rw = hp->dxfer_direction == SG_DXFER_TO_DEV ? WRITE : READ;
1636
1637	SCSI_LOG_TIMEOUT(4, printk(KERN_INFO "sg_start_req: dxfer_len=%d\n",
1638				   dxfer_len));
1639
1640	rq = blk_get_request(q, rw, GFP_ATOMIC);
1641	if (!rq)
1642		return -ENOMEM;
1643
1644	memcpy(rq->cmd, cmd, hp->cmd_len);
1645
1646	rq->cmd_len = hp->cmd_len;
1647	rq->cmd_type = REQ_TYPE_BLOCK_PC;
1648
1649	srp->rq = rq;
1650	rq->end_io_data = srp;
1651	rq->sense = srp->sense_b;
1652	rq->retries = SG_DEFAULT_RETRIES;
1653
1654	if ((dxfer_len <= 0) || (dxfer_dir == SG_DXFER_NONE))
1655		return 0;
1656
1657	if (sg_allow_dio && hp->flags & SG_FLAG_DIRECT_IO &&
1658	    dxfer_dir != SG_DXFER_UNKNOWN && !iov_count &&
1659	    !sfp->parentdp->device->host->unchecked_isa_dma &&
1660	    blk_rq_aligned(q, hp->dxferp, dxfer_len))
1661		md = NULL;
1662	else
1663		md = &map_data;
1664
1665	if (md) {
1666		if (!sg_res_in_use(sfp) && dxfer_len <= rsv_schp->bufflen)
1667			sg_link_reserve(sfp, srp, dxfer_len);
1668		else {
1669			res = sg_build_indirect(req_schp, sfp, dxfer_len);
1670			if (res)
1671				return res;
1672		}
1673
1674		md->pages = req_schp->pages;
1675		md->page_order = req_schp->page_order;
1676		md->nr_entries = req_schp->k_use_sg;
1677		md->offset = 0;
1678		md->null_mapped = hp->dxferp ? 0 : 1;
1679		if (dxfer_dir == SG_DXFER_TO_FROM_DEV)
1680			md->from_user = 1;
1681		else
1682			md->from_user = 0;
1683	}
1684
1685	if (iov_count) {
1686		int len, size = sizeof(struct sg_iovec) * iov_count;
1687		struct iovec *iov;
1688
1689		iov = memdup_user(hp->dxferp, size);
1690		if (IS_ERR(iov))
1691			return PTR_ERR(iov);
1692
1693		len = iov_length(iov, iov_count);
1694		if (hp->dxfer_len < len) {
1695			iov_count = iov_shorten(iov, iov_count, hp->dxfer_len);
1696			len = hp->dxfer_len;
1697		}
1698
1699		res = blk_rq_map_user_iov(q, rq, md, (struct sg_iovec *)iov,
1700					  iov_count,
1701					  len, GFP_ATOMIC);
1702		kfree(iov);
1703	} else
1704		res = blk_rq_map_user(q, rq, md, hp->dxferp,
1705				      hp->dxfer_len, GFP_ATOMIC);
1706
1707	if (!res) {
1708		srp->bio = rq->bio;
1709
1710		if (!md) {
1711			req_schp->dio_in_use = 1;
1712			hp->info |= SG_INFO_DIRECT_IO;
1713		}
1714	}
1715	return res;
1716}
1717
1718static int sg_finish_rem_req(Sg_request * srp)
1719{
1720	int ret = 0;
1721
1722	Sg_fd *sfp = srp->parentfp;
1723	Sg_scatter_hold *req_schp = &srp->data;
1724
1725	SCSI_LOG_TIMEOUT(4, printk("sg_finish_rem_req: res_used=%d\n", (int) srp->res_used));
1726	if (srp->rq) {
1727		if (srp->bio)
1728			ret = blk_rq_unmap_user(srp->bio);
1729
1730		blk_put_request(srp->rq);
1731	}
1732
1733	if (srp->res_used)
1734		sg_unlink_reserve(sfp, srp);
1735	else
1736		sg_remove_scat(req_schp);
1737
1738	sg_remove_request(sfp, srp);
1739
1740	return ret;
1741}
1742
1743static int
1744sg_build_sgat(Sg_scatter_hold * schp, const Sg_fd * sfp, int tablesize)
1745{
1746	int sg_bufflen = tablesize * sizeof(struct page *);
1747	gfp_t gfp_flags = GFP_ATOMIC | __GFP_NOWARN;
1748
1749	schp->pages = kzalloc(sg_bufflen, gfp_flags);
1750	if (!schp->pages)
1751		return -ENOMEM;
1752	schp->sglist_len = sg_bufflen;
1753	return tablesize;	/* number of scat_gath elements allocated */
1754}
1755
1756static int
1757sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size)
1758{
1759	int ret_sz = 0, i, k, rem_sz, num, mx_sc_elems;
1760	int sg_tablesize = sfp->parentdp->sg_tablesize;
1761	int blk_size = buff_size, order;
1762	gfp_t gfp_mask = GFP_ATOMIC | __GFP_COMP | __GFP_NOWARN;
1763
1764	if (blk_size < 0)
1765		return -EFAULT;
1766	if (0 == blk_size)
1767		++blk_size;	/* don't know why */
1768	/* round request up to next highest SG_SECTOR_SZ byte boundary */
1769	blk_size = ALIGN(blk_size, SG_SECTOR_SZ);
1770	SCSI_LOG_TIMEOUT(4, printk("sg_build_indirect: buff_size=%d, blk_size=%d\n",
1771				   buff_size, blk_size));
1772
1773	/* N.B. ret_sz carried into this block ... */
1774	mx_sc_elems = sg_build_sgat(schp, sfp, sg_tablesize);
1775	if (mx_sc_elems < 0)
1776		return mx_sc_elems;	/* most likely -ENOMEM */
1777
1778	num = scatter_elem_sz;
1779	if (unlikely(num != scatter_elem_sz_prev)) {
1780		if (num < PAGE_SIZE) {
1781			scatter_elem_sz = PAGE_SIZE;
1782			scatter_elem_sz_prev = PAGE_SIZE;
1783		} else
1784			scatter_elem_sz_prev = num;
1785	}
1786
1787	if (sfp->low_dma)
1788		gfp_mask |= GFP_DMA;
1789
1790	if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
1791		gfp_mask |= __GFP_ZERO;
1792
1793	order = get_order(num);
1794retry:
1795	ret_sz = 1 << (PAGE_SHIFT + order);
1796
1797	for (k = 0, rem_sz = blk_size; rem_sz > 0 && k < mx_sc_elems;
1798	     k++, rem_sz -= ret_sz) {
1799
1800		num = (rem_sz > scatter_elem_sz_prev) ?
1801			scatter_elem_sz_prev : rem_sz;
1802
1803		schp->pages[k] = alloc_pages(gfp_mask, order);
1804		if (!schp->pages[k])
1805			goto out;
1806
1807		if (num == scatter_elem_sz_prev) {
1808			if (unlikely(ret_sz > scatter_elem_sz_prev)) {
1809				scatter_elem_sz = ret_sz;
1810				scatter_elem_sz_prev = ret_sz;
1811			}
1812		}
1813
1814		SCSI_LOG_TIMEOUT(5, printk("sg_build_indirect: k=%d, num=%d, "
1815				 "ret_sz=%d\n", k, num, ret_sz));
1816	}		/* end of for loop */
1817
1818	schp->page_order = order;
1819	schp->k_use_sg = k;
1820	SCSI_LOG_TIMEOUT(5, printk("sg_build_indirect: k_use_sg=%d, "
1821			 "rem_sz=%d\n", k, rem_sz));
1822
1823	schp->bufflen = blk_size;
1824	if (rem_sz > 0)	/* must have failed */
1825		return -ENOMEM;
1826	return 0;
1827out:
1828	for (i = 0; i < k; i++)
1829		__free_pages(schp->pages[i], order);
1830
1831	if (--order >= 0)
1832		goto retry;
1833
1834	return -ENOMEM;
1835}
1836
1837static void
1838sg_remove_scat(Sg_scatter_hold * schp)
1839{
1840	SCSI_LOG_TIMEOUT(4, printk("sg_remove_scat: k_use_sg=%d\n", schp->k_use_sg));
1841	if (schp->pages && schp->sglist_len > 0) {
1842		if (!schp->dio_in_use) {
1843			int k;
1844
1845			for (k = 0; k < schp->k_use_sg && schp->pages[k]; k++) {
1846				SCSI_LOG_TIMEOUT(5, printk(
1847				    "sg_remove_scat: k=%d, pg=0x%p\n",
1848				    k, schp->pages[k]));
1849				__free_pages(schp->pages[k], schp->page_order);
1850			}
1851
1852			kfree(schp->pages);
1853		}
1854	}
1855	memset(schp, 0, sizeof (*schp));
1856}
1857
1858static int
1859sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer)
1860{
1861	Sg_scatter_hold *schp = &srp->data;
1862	int k, num;
1863
1864	SCSI_LOG_TIMEOUT(4, printk("sg_read_oxfer: num_read_xfer=%d\n",
1865				   num_read_xfer));
1866	if ((!outp) || (num_read_xfer <= 0))
1867		return 0;
1868
1869	num = 1 << (PAGE_SHIFT + schp->page_order);
1870	for (k = 0; k < schp->k_use_sg && schp->pages[k]; k++) {
1871		if (num > num_read_xfer) {
1872			if (__copy_to_user(outp, page_address(schp->pages[k]),
1873					   num_read_xfer))
1874				return -EFAULT;
1875			break;
1876		} else {
1877			if (__copy_to_user(outp, page_address(schp->pages[k]),
1878					   num))
1879				return -EFAULT;
1880			num_read_xfer -= num;
1881			if (num_read_xfer <= 0)
1882				break;
1883			outp += num;
1884		}
1885	}
1886
1887	return 0;
1888}
1889
1890static void
1891sg_build_reserve(Sg_fd * sfp, int req_size)
1892{
1893	Sg_scatter_hold *schp = &sfp->reserve;
1894
1895	SCSI_LOG_TIMEOUT(4, printk("sg_build_reserve: req_size=%d\n", req_size));
1896	do {
1897		if (req_size < PAGE_SIZE)
1898			req_size = PAGE_SIZE;
1899		if (0 == sg_build_indirect(schp, sfp, req_size))
1900			return;
1901		else
1902			sg_remove_scat(schp);
1903		req_size >>= 1;	/* divide by 2 */
1904	} while (req_size > (PAGE_SIZE / 2));
1905}
1906
1907static void
1908sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size)
1909{
1910	Sg_scatter_hold *req_schp = &srp->data;
1911	Sg_scatter_hold *rsv_schp = &sfp->reserve;
1912	int k, num, rem;
1913
1914	srp->res_used = 1;
1915	SCSI_LOG_TIMEOUT(4, printk("sg_link_reserve: size=%d\n", size));
1916	rem = size;
1917
1918	num = 1 << (PAGE_SHIFT + rsv_schp->page_order);
1919	for (k = 0; k < rsv_schp->k_use_sg; k++) {
1920		if (rem <= num) {
1921			req_schp->k_use_sg = k + 1;
1922			req_schp->sglist_len = rsv_schp->sglist_len;
1923			req_schp->pages = rsv_schp->pages;
1924
1925			req_schp->bufflen = size;
1926			req_schp->page_order = rsv_schp->page_order;
1927			break;
1928		} else
1929			rem -= num;
1930	}
1931
1932	if (k >= rsv_schp->k_use_sg)
1933		SCSI_LOG_TIMEOUT(1, printk("sg_link_reserve: BAD size\n"));
1934}
1935
1936static void
1937sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp)
1938{
1939	Sg_scatter_hold *req_schp = &srp->data;
1940
1941	SCSI_LOG_TIMEOUT(4, printk("sg_unlink_reserve: req->k_use_sg=%d\n",
1942				   (int) req_schp->k_use_sg));
1943	req_schp->k_use_sg = 0;
1944	req_schp->bufflen = 0;
1945	req_schp->pages = NULL;
1946	req_schp->page_order = 0;
1947	req_schp->sglist_len = 0;
1948	sfp->save_scat_len = 0;
1949	srp->res_used = 0;
1950}
1951
1952static Sg_request *
1953sg_get_rq_mark(Sg_fd * sfp, int pack_id)
1954{
1955	Sg_request *resp;
1956	unsigned long iflags;
1957
1958	write_lock_irqsave(&sfp->rq_list_lock, iflags);
1959	for (resp = sfp->headrp; resp; resp = resp->nextrp) {
1960		/* look for requests that are ready + not SG_IO owned */
1961		if ((1 == resp->done) && (!resp->sg_io_owned) &&
1962		    ((-1 == pack_id) || (resp->header.pack_id == pack_id))) {
1963			resp->done = 2;	/* guard against other readers */
1964			break;
1965		}
1966	}
1967	write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
1968	return resp;
1969}
1970
1971/* always adds to end of list */
1972static Sg_request *
1973sg_add_request(Sg_fd * sfp)
1974{
1975	int k;
1976	unsigned long iflags;
1977	Sg_request *resp;
1978	Sg_request *rp = sfp->req_arr;
1979
1980	write_lock_irqsave(&sfp->rq_list_lock, iflags);
1981	resp = sfp->headrp;
1982	if (!resp) {
1983		memset(rp, 0, sizeof (Sg_request));
1984		rp->parentfp = sfp;
1985		resp = rp;
1986		sfp->headrp = resp;
1987	} else {
1988		if (0 == sfp->cmd_q)
1989			resp = NULL;	/* command queuing disallowed */
1990		else {
1991			for (k = 0; k < SG_MAX_QUEUE; ++k, ++rp) {
1992				if (!rp->parentfp)
1993					break;
1994			}
1995			if (k < SG_MAX_QUEUE) {
1996				memset(rp, 0, sizeof (Sg_request));
1997				rp->parentfp = sfp;
1998				while (resp->nextrp)
1999					resp = resp->nextrp;
2000				resp->nextrp = rp;
2001				resp = rp;
2002			} else
2003				resp = NULL;
2004		}
2005	}
2006	if (resp) {
2007		resp->nextrp = NULL;
2008		resp->header.duration = jiffies_to_msecs(jiffies);
2009	}
2010	write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
2011	return resp;
2012}
2013
2014/* Return of 1 for found; 0 for not found */
2015static int
2016sg_remove_request(Sg_fd * sfp, Sg_request * srp)
2017{
2018	Sg_request *prev_rp;
2019	Sg_request *rp;
2020	unsigned long iflags;
2021	int res = 0;
2022
2023	if ((!sfp) || (!srp) || (!sfp->headrp))
2024		return res;
2025	write_lock_irqsave(&sfp->rq_list_lock, iflags);
2026	prev_rp = sfp->headrp;
2027	if (srp == prev_rp) {
2028		sfp->headrp = prev_rp->nextrp;
2029		prev_rp->parentfp = NULL;
2030		res = 1;
2031	} else {
2032		while ((rp = prev_rp->nextrp)) {
2033			if (srp == rp) {
2034				prev_rp->nextrp = rp->nextrp;
2035				rp->parentfp = NULL;
2036				res = 1;
2037				break;
2038			}
2039			prev_rp = rp;
2040		}
2041	}
2042	write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
2043	return res;
2044}
2045
2046static Sg_fd *
2047sg_add_sfp(Sg_device * sdp, int dev)
2048{
2049	Sg_fd *sfp;
2050	unsigned long iflags;
2051	int bufflen;
2052
2053	sfp = kzalloc(sizeof(*sfp), GFP_ATOMIC | __GFP_NOWARN);
2054	if (!sfp)
2055		return NULL;
2056
2057	init_waitqueue_head(&sfp->read_wait);
2058	rwlock_init(&sfp->rq_list_lock);
2059
2060	kref_init(&sfp->f_ref);
2061	sfp->timeout = SG_DEFAULT_TIMEOUT;
2062	sfp->timeout_user = SG_DEFAULT_TIMEOUT_USER;
2063	sfp->force_packid = SG_DEF_FORCE_PACK_ID;
2064	sfp->low_dma = (SG_DEF_FORCE_LOW_DMA == 0) ?
2065	    sdp->device->host->unchecked_isa_dma : 1;
2066	sfp->cmd_q = SG_DEF_COMMAND_Q;
2067	sfp->keep_orphan = SG_DEF_KEEP_ORPHAN;
2068	sfp->parentdp = sdp;
2069	write_lock_irqsave(&sg_index_lock, iflags);
2070	list_add_tail(&sfp->sfd_siblings, &sdp->sfds);
2071	write_unlock_irqrestore(&sg_index_lock, iflags);
2072	SCSI_LOG_TIMEOUT(3, printk("sg_add_sfp: sfp=0x%p\n", sfp));
2073	if (unlikely(sg_big_buff != def_reserved_size))
2074		sg_big_buff = def_reserved_size;
2075
2076	bufflen = min_t(int, sg_big_buff,
2077			queue_max_sectors(sdp->device->request_queue) * 512);
2078	sg_build_reserve(sfp, bufflen);
2079	SCSI_LOG_TIMEOUT(3, printk("sg_add_sfp:   bufflen=%d, k_use_sg=%d\n",
2080			   sfp->reserve.bufflen, sfp->reserve.k_use_sg));
2081
2082	kref_get(&sdp->d_ref);
2083	__module_get(THIS_MODULE);
2084	return sfp;
2085}
2086
2087static void sg_remove_sfp_usercontext(struct work_struct *work)
2088{
2089	struct sg_fd *sfp = container_of(work, struct sg_fd, ew.work);
2090	struct sg_device *sdp = sfp->parentdp;
2091
2092	/* Cleanup any responses which were never read(). */
2093	while (sfp->headrp)
2094		sg_finish_rem_req(sfp->headrp);
2095
2096	if (sfp->reserve.bufflen > 0) {
2097		SCSI_LOG_TIMEOUT(6,
2098			printk("sg_remove_sfp:    bufflen=%d, k_use_sg=%d\n",
2099				(int) sfp->reserve.bufflen,
2100				(int) sfp->reserve.k_use_sg));
2101		sg_remove_scat(&sfp->reserve);
2102	}
2103
2104	SCSI_LOG_TIMEOUT(6,
2105		printk("sg_remove_sfp: %s, sfp=0x%p\n",
2106			sdp->disk->disk_name,
2107			sfp));
2108	kfree(sfp);
2109
2110	scsi_device_put(sdp->device);
2111	sg_put_dev(sdp);
2112	module_put(THIS_MODULE);
2113}
2114
2115static void sg_remove_sfp(struct kref *kref)
2116{
2117	struct sg_fd *sfp = container_of(kref, struct sg_fd, f_ref);
2118	struct sg_device *sdp = sfp->parentdp;
2119	unsigned long iflags;
2120
2121	write_lock_irqsave(&sg_index_lock, iflags);
2122	list_del(&sfp->sfd_siblings);
2123	write_unlock_irqrestore(&sg_index_lock, iflags);
2124	wake_up_interruptible(&sdp->o_excl_wait);
2125
2126	INIT_WORK(&sfp->ew.work, sg_remove_sfp_usercontext);
2127	schedule_work(&sfp->ew.work);
2128}
2129
2130static int
2131sg_res_in_use(Sg_fd * sfp)
2132{
2133	const Sg_request *srp;
2134	unsigned long iflags;
2135
2136	read_lock_irqsave(&sfp->rq_list_lock, iflags);
2137	for (srp = sfp->headrp; srp; srp = srp->nextrp)
2138		if (srp->res_used)
2139			break;
2140	read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
2141	return srp ? 1 : 0;
2142}
2143
2144#ifdef CONFIG_SCSI_PROC_FS
2145static int
2146sg_idr_max_id(int id, void *p, void *data)
2147{
2148	int *k = data;
2149
2150	if (*k < id)
2151		*k = id;
2152
2153	return 0;
2154}
2155
2156static int
2157sg_last_dev(void)
2158{
2159	int k = -1;
2160	unsigned long iflags;
2161
2162	read_lock_irqsave(&sg_index_lock, iflags);
2163	idr_for_each(&sg_index_idr, sg_idr_max_id, &k);
2164	read_unlock_irqrestore(&sg_index_lock, iflags);
2165	return k + 1;		/* origin 1 */
2166}
2167#endif
2168
2169/* must be called with sg_index_lock held */
2170static Sg_device *sg_lookup_dev(int dev)
2171{
2172	return idr_find(&sg_index_idr, dev);
2173}
2174
2175static Sg_device *sg_get_dev(int dev)
2176{
2177	struct sg_device *sdp;
2178	unsigned long flags;
2179
2180	read_lock_irqsave(&sg_index_lock, flags);
2181	sdp = sg_lookup_dev(dev);
2182	if (!sdp)
2183		sdp = ERR_PTR(-ENXIO);
2184	else if (sdp->detached) {
2185		/* If sdp->detached, then the refcount may already be 0, in
2186		 * which case it would be a bug to do kref_get().
2187		 */
2188		sdp = ERR_PTR(-ENODEV);
2189	} else
2190		kref_get(&sdp->d_ref);
2191	read_unlock_irqrestore(&sg_index_lock, flags);
2192
2193	return sdp;
2194}
2195
2196static void sg_put_dev(struct sg_device *sdp)
2197{
2198	kref_put(&sdp->d_ref, sg_device_destroy);
2199}
2200
2201#ifdef CONFIG_SCSI_PROC_FS
2202
2203static struct proc_dir_entry *sg_proc_sgp = NULL;
2204
2205static char sg_proc_sg_dirname[] = "scsi/sg";
2206
2207static int sg_proc_seq_show_int(struct seq_file *s, void *v);
2208
2209static int sg_proc_single_open_adio(struct inode *inode, struct file *file);
2210static ssize_t sg_proc_write_adio(struct file *filp, const char __user *buffer,
2211			          size_t count, loff_t *off);
2212static const struct file_operations adio_fops = {
2213	.owner = THIS_MODULE,
2214	.open = sg_proc_single_open_adio,
2215	.read = seq_read,
2216	.llseek = seq_lseek,
2217	.write = sg_proc_write_adio,
2218	.release = single_release,
2219};
2220
2221static int sg_proc_single_open_dressz(struct inode *inode, struct file *file);
2222static ssize_t sg_proc_write_dressz(struct file *filp,
2223		const char __user *buffer, size_t count, loff_t *off);
2224static const struct file_operations dressz_fops = {
2225	.owner = THIS_MODULE,
2226	.open = sg_proc_single_open_dressz,
2227	.read = seq_read,
2228	.llseek = seq_lseek,
2229	.write = sg_proc_write_dressz,
2230	.release = single_release,
2231};
2232
2233static int sg_proc_seq_show_version(struct seq_file *s, void *v);
2234static int sg_proc_single_open_version(struct inode *inode, struct file *file);
2235static const struct file_operations version_fops = {
2236	.owner = THIS_MODULE,
2237	.open = sg_proc_single_open_version,
2238	.read = seq_read,
2239	.llseek = seq_lseek,
2240	.release = single_release,
2241};
2242
2243static int sg_proc_seq_show_devhdr(struct seq_file *s, void *v);
2244static int sg_proc_single_open_devhdr(struct inode *inode, struct file *file);
2245static const struct file_operations devhdr_fops = {
2246	.owner = THIS_MODULE,
2247	.open = sg_proc_single_open_devhdr,
2248	.read = seq_read,
2249	.llseek = seq_lseek,
2250	.release = single_release,
2251};
2252
2253static int sg_proc_seq_show_dev(struct seq_file *s, void *v);
2254static int sg_proc_open_dev(struct inode *inode, struct file *file);
2255static void * dev_seq_start(struct seq_file *s, loff_t *pos);
2256static void * dev_seq_next(struct seq_file *s, void *v, loff_t *pos);
2257static void dev_seq_stop(struct seq_file *s, void *v);
2258static const struct file_operations dev_fops = {
2259	.owner = THIS_MODULE,
2260	.open = sg_proc_open_dev,
2261	.read = seq_read,
2262	.llseek = seq_lseek,
2263	.release = seq_release,
2264};
2265static const struct seq_operations dev_seq_ops = {
2266	.start = dev_seq_start,
2267	.next  = dev_seq_next,
2268	.stop  = dev_seq_stop,
2269	.show  = sg_proc_seq_show_dev,
2270};
2271
2272static int sg_proc_seq_show_devstrs(struct seq_file *s, void *v);
2273static int sg_proc_open_devstrs(struct inode *inode, struct file *file);
2274static const struct file_operations devstrs_fops = {
2275	.owner = THIS_MODULE,
2276	.open = sg_proc_open_devstrs,
2277	.read = seq_read,
2278	.llseek = seq_lseek,
2279	.release = seq_release,
2280};
2281static const struct seq_operations devstrs_seq_ops = {
2282	.start = dev_seq_start,
2283	.next  = dev_seq_next,
2284	.stop  = dev_seq_stop,
2285	.show  = sg_proc_seq_show_devstrs,
2286};
2287
2288static int sg_proc_seq_show_debug(struct seq_file *s, void *v);
2289static int sg_proc_open_debug(struct inode *inode, struct file *file);
2290static const struct file_operations debug_fops = {
2291	.owner = THIS_MODULE,
2292	.open = sg_proc_open_debug,
2293	.read = seq_read,
2294	.llseek = seq_lseek,
2295	.release = seq_release,
2296};
2297static const struct seq_operations debug_seq_ops = {
2298	.start = dev_seq_start,
2299	.next  = dev_seq_next,
2300	.stop  = dev_seq_stop,
2301	.show  = sg_proc_seq_show_debug,
2302};
2303
2304
2305struct sg_proc_leaf {
2306	const char * name;
2307	const struct file_operations * fops;
2308};
2309
2310static struct sg_proc_leaf sg_proc_leaf_arr[] = {
2311	{"allow_dio", &adio_fops},
2312	{"debug", &debug_fops},
2313	{"def_reserved_size", &dressz_fops},
2314	{"device_hdr", &devhdr_fops},
2315	{"devices", &dev_fops},
2316	{"device_strs", &devstrs_fops},
2317	{"version", &version_fops}
2318};
2319
2320static int
2321sg_proc_init(void)
2322{
2323	int k, mask;
2324	int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
2325	struct sg_proc_leaf * leaf;
2326
2327	sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL);
2328	if (!sg_proc_sgp)
2329		return 1;
2330	for (k = 0; k < num_leaves; ++k) {
2331		leaf = &sg_proc_leaf_arr[k];
2332		mask = leaf->fops->write ? S_IRUGO | S_IWUSR : S_IRUGO;
2333		proc_create(leaf->name, mask, sg_proc_sgp, leaf->fops);
2334	}
2335	return 0;
2336}
2337
2338static void
2339sg_proc_cleanup(void)
2340{
2341	int k;
2342	int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
2343
2344	if (!sg_proc_sgp)
2345		return;
2346	for (k = 0; k < num_leaves; ++k)
2347		remove_proc_entry(sg_proc_leaf_arr[k].name, sg_proc_sgp);
2348	remove_proc_entry(sg_proc_sg_dirname, NULL);
2349}
2350
2351
2352static int sg_proc_seq_show_int(struct seq_file *s, void *v)
2353{
2354	seq_printf(s, "%d\n", *((int *)s->private));
2355	return 0;
2356}
2357
2358static int sg_proc_single_open_adio(struct inode *inode, struct file *file)
2359{
2360	return single_open(file, sg_proc_seq_show_int, &sg_allow_dio);
2361}
2362
2363static ssize_t
2364sg_proc_write_adio(struct file *filp, const char __user *buffer,
2365		   size_t count, loff_t *off)
2366{
2367	int num;
2368	char buff[11];
2369
2370	if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
2371		return -EACCES;
2372	num = (count < 10) ? count : 10;
2373	if (copy_from_user(buff, buffer, num))
2374		return -EFAULT;
2375	buff[num] = '\0';
2376	sg_allow_dio = simple_strtoul(buff, NULL, 10) ? 1 : 0;
2377	return count;
2378}
2379
2380static int sg_proc_single_open_dressz(struct inode *inode, struct file *file)
2381{
2382	return single_open(file, sg_proc_seq_show_int, &sg_big_buff);
2383}
2384
2385static ssize_t
2386sg_proc_write_dressz(struct file *filp, const char __user *buffer,
2387		     size_t count, loff_t *off)
2388{
2389	int num;
2390	unsigned long k = ULONG_MAX;
2391	char buff[11];
2392
2393	if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
2394		return -EACCES;
2395	num = (count < 10) ? count : 10;
2396	if (copy_from_user(buff, buffer, num))
2397		return -EFAULT;
2398	buff[num] = '\0';
2399	k = simple_strtoul(buff, NULL, 10);
2400	if (k <= 1048576) {	/* limit "big buff" to 1 MB */
2401		sg_big_buff = k;
2402		return count;
2403	}
2404	return -ERANGE;
2405}
2406
2407static int sg_proc_seq_show_version(struct seq_file *s, void *v)
2408{
2409	seq_printf(s, "%d\t%s [%s]\n", sg_version_num, SG_VERSION_STR,
2410		   sg_version_date);
2411	return 0;
2412}
2413
2414static int sg_proc_single_open_version(struct inode *inode, struct file *file)
2415{
2416	return single_open(file, sg_proc_seq_show_version, NULL);
2417}
2418
2419static int sg_proc_seq_show_devhdr(struct seq_file *s, void *v)
2420{
2421	seq_printf(s, "host\tchan\tid\tlun\ttype\topens\tqdepth\tbusy\t"
2422		   "online\n");
2423	return 0;
2424}
2425
2426static int sg_proc_single_open_devhdr(struct inode *inode, struct file *file)
2427{
2428	return single_open(file, sg_proc_seq_show_devhdr, NULL);
2429}
2430
2431struct sg_proc_deviter {
2432	loff_t	index;
2433	size_t	max;
2434};
2435
2436static void * dev_seq_start(struct seq_file *s, loff_t *pos)
2437{
2438	struct sg_proc_deviter * it = kmalloc(sizeof(*it), GFP_KERNEL);
2439
2440	s->private = it;
2441	if (! it)
2442		return NULL;
2443
2444	it->index = *pos;
2445	it->max = sg_last_dev();
2446	if (it->index >= it->max)
2447		return NULL;
2448	return it;
2449}
2450
2451static void * dev_seq_next(struct seq_file *s, void *v, loff_t *pos)
2452{
2453	struct sg_proc_deviter * it = s->private;
2454
2455	*pos = ++it->index;
2456	return (it->index < it->max) ? it : NULL;
2457}
2458
2459static void dev_seq_stop(struct seq_file *s, void *v)
2460{
2461	kfree(s->private);
2462}
2463
2464static int sg_proc_open_dev(struct inode *inode, struct file *file)
2465{
2466        return seq_open(file, &dev_seq_ops);
2467}
2468
2469static int sg_proc_seq_show_dev(struct seq_file *s, void *v)
2470{
2471	struct sg_proc_deviter * it = (struct sg_proc_deviter *) v;
2472	Sg_device *sdp;
2473	struct scsi_device *scsidp;
2474	unsigned long iflags;
2475
2476	read_lock_irqsave(&sg_index_lock, iflags);
2477	sdp = it ? sg_lookup_dev(it->index) : NULL;
2478	if (sdp && (scsidp = sdp->device) && (!sdp->detached))
2479		seq_printf(s, "%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\n",
2480			      scsidp->host->host_no, scsidp->channel,
2481			      scsidp->id, scsidp->lun, (int) scsidp->type,
2482			      1,
2483			      (int) scsidp->queue_depth,
2484			      (int) scsidp->device_busy,
2485			      (int) scsi_device_online(scsidp));
2486	else
2487		seq_printf(s, "-1\t-1\t-1\t-1\t-1\t-1\t-1\t-1\t-1\n");
2488	read_unlock_irqrestore(&sg_index_lock, iflags);
2489	return 0;
2490}
2491
2492static int sg_proc_open_devstrs(struct inode *inode, struct file *file)
2493{
2494        return seq_open(file, &devstrs_seq_ops);
2495}
2496
2497static int sg_proc_seq_show_devstrs(struct seq_file *s, void *v)
2498{
2499	struct sg_proc_deviter * it = (struct sg_proc_deviter *) v;
2500	Sg_device *sdp;
2501	struct scsi_device *scsidp;
2502	unsigned long iflags;
2503
2504	read_lock_irqsave(&sg_index_lock, iflags);
2505	sdp = it ? sg_lookup_dev(it->index) : NULL;
2506	if (sdp && (scsidp = sdp->device) && (!sdp->detached))
2507		seq_printf(s, "%8.8s\t%16.16s\t%4.4s\n",
2508			   scsidp->vendor, scsidp->model, scsidp->rev);
2509	else
2510		seq_printf(s, "<no active device>\n");
2511	read_unlock_irqrestore(&sg_index_lock, iflags);
2512	return 0;
2513}
2514
2515/* must be called while holding sg_index_lock */
2516static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp)
2517{
2518	int k, m, new_interface, blen, usg;
2519	Sg_request *srp;
2520	Sg_fd *fp;
2521	const sg_io_hdr_t *hp;
2522	const char * cp;
2523	unsigned int ms;
2524
2525	k = 0;
2526	list_for_each_entry(fp, &sdp->sfds, sfd_siblings) {
2527		k++;
2528		read_lock(&fp->rq_list_lock); /* irqs already disabled */
2529		seq_printf(s, "   FD(%d): timeout=%dms bufflen=%d "
2530			   "(res)sgat=%d low_dma=%d\n", k,
2531			   jiffies_to_msecs(fp->timeout),
2532			   fp->reserve.bufflen,
2533			   (int) fp->reserve.k_use_sg,
2534			   (int) fp->low_dma);
2535		seq_printf(s, "   cmd_q=%d f_packid=%d k_orphan=%d closed=%d\n",
2536			   (int) fp->cmd_q, (int) fp->force_packid,
2537			   (int) fp->keep_orphan, (int) fp->closed);
2538		for (m = 0, srp = fp->headrp;
2539				srp != NULL;
2540				++m, srp = srp->nextrp) {
2541			hp = &srp->header;
2542			new_interface = (hp->interface_id == '\0') ? 0 : 1;
2543			if (srp->res_used) {
2544				if (new_interface &&
2545				    (SG_FLAG_MMAP_IO & hp->flags))
2546					cp = "     mmap>> ";
2547				else
2548					cp = "     rb>> ";
2549			} else {
2550				if (SG_INFO_DIRECT_IO_MASK & hp->info)
2551					cp = "     dio>> ";
2552				else
2553					cp = "     ";
2554			}
2555			seq_printf(s, cp);
2556			blen = srp->data.bufflen;
2557			usg = srp->data.k_use_sg;
2558			seq_printf(s, srp->done ?
2559				   ((1 == srp->done) ?  "rcv:" : "fin:")
2560				   : "act:");
2561			seq_printf(s, " id=%d blen=%d",
2562				   srp->header.pack_id, blen);
2563			if (srp->done)
2564				seq_printf(s, " dur=%d", hp->duration);
2565			else {
2566				ms = jiffies_to_msecs(jiffies);
2567				seq_printf(s, " t_o/elap=%d/%d",
2568					(new_interface ? hp->timeout :
2569						  jiffies_to_msecs(fp->timeout)),
2570					(ms > hp->duration ? ms - hp->duration : 0));
2571			}
2572			seq_printf(s, "ms sgat=%d op=0x%02x\n", usg,
2573				   (int) srp->data.cmd_opcode);
2574		}
2575		if (0 == m)
2576			seq_printf(s, "     No requests active\n");
2577		read_unlock(&fp->rq_list_lock);
2578	}
2579}
2580
2581static int sg_proc_open_debug(struct inode *inode, struct file *file)
2582{
2583        return seq_open(file, &debug_seq_ops);
2584}
2585
2586static int sg_proc_seq_show_debug(struct seq_file *s, void *v)
2587{
2588	struct sg_proc_deviter * it = (struct sg_proc_deviter *) v;
2589	Sg_device *sdp;
2590	unsigned long iflags;
2591
2592	if (it && (0 == it->index)) {
2593		seq_printf(s, "max_active_device=%d(origin 1)\n",
2594			   (int)it->max);
2595		seq_printf(s, " def_reserved_size=%d\n", sg_big_buff);
2596	}
2597
2598	read_lock_irqsave(&sg_index_lock, iflags);
2599	sdp = it ? sg_lookup_dev(it->index) : NULL;
2600	if (sdp && !list_empty(&sdp->sfds)) {
2601		struct scsi_device *scsidp = sdp->device;
2602
2603		seq_printf(s, " >>> device=%s ", sdp->disk->disk_name);
2604		if (sdp->detached)
2605			seq_printf(s, "detached pending close ");
2606		else
2607			seq_printf
2608			    (s, "scsi%d chan=%d id=%d lun=%d   em=%d",
2609			     scsidp->host->host_no,
2610			     scsidp->channel, scsidp->id,
2611			     scsidp->lun,
2612			     scsidp->host->hostt->emulated);
2613		seq_printf(s, " sg_tablesize=%d excl=%d\n",
2614			   sdp->sg_tablesize, sdp->exclude);
2615		sg_proc_debug_helper(s, sdp);
2616	}
2617	read_unlock_irqrestore(&sg_index_lock, iflags);
2618	return 0;
2619}
2620
2621#endif				/* CONFIG_SCSI_PROC_FS */
2622
2623module_init(init_sg);
2624module_exit(exit_sg);
2625