1#ifndef _LINUX_BLKDEV_H
2#define _LINUX_BLKDEV_H
3
4#include <linux/major.h>
5#include <linux/sched.h>
6#include <linux/genhd.h>
7#include <linux/tqueue.h>
8#include <linux/list.h>
9#include <linux/mm.h>
10
11#include <asm/io.h>
12
13struct request_queue;
14typedef struct request_queue request_queue_t;
15struct elevator_s;
16typedef struct elevator_s elevator_t;
17
18/*
19 * Ok, this is an expanded form so that we can use the same
20 * request for paging requests.
21 */
22struct request {
23	struct list_head queue;
24	int elevator_sequence;
25
26	volatile int rq_status;	/* should split this into a few status bits */
27#define RQ_INACTIVE		(-1)
28#define RQ_ACTIVE		1
29#define RQ_SCSI_BUSY		0xffff
30#define RQ_SCSI_DONE		0xfffe
31#define RQ_SCSI_DISCONNECTING	0xffe0
32
33	kdev_t rq_dev;
34	int cmd;		/* READ or WRITE */
35	int errors;
36	unsigned long start_time;
37	unsigned long sector;
38	unsigned long nr_sectors;
39	unsigned long hard_sector, hard_nr_sectors;
40	unsigned int nr_segments;
41	unsigned int nr_hw_segments;
42	unsigned long current_nr_sectors, hard_cur_sectors;
43	void * special;
44	char * buffer;
45	struct completion * waiting;
46	struct buffer_head * bh;
47	struct buffer_head * bhtail;
48	request_queue_t *q;
49};
50
51#include <linux/elevator.h>
52
53typedef int (merge_request_fn) (request_queue_t *q,
54				struct request  *req,
55				struct buffer_head *bh,
56				int);
57typedef int (merge_requests_fn) (request_queue_t *q,
58				 struct request  *req,
59				 struct request  *req2,
60				 int);
61typedef void (request_fn_proc) (request_queue_t *q);
62typedef request_queue_t * (queue_proc) (kdev_t dev);
63typedef int (make_request_fn) (request_queue_t *q, int rw, struct buffer_head *bh);
64typedef void (plug_device_fn) (request_queue_t *q, kdev_t device);
65typedef void (unplug_device_fn) (void *q);
66
67/*
68 * Default nr free requests per queue, ll_rw_blk will scale it down
69 * according to available RAM at init time
70 */
71#define QUEUE_NR_REQUESTS	8192
72
73struct request_list {
74	unsigned int count;
75	struct list_head free;
76};
77
78struct request_queue
79{
80	/*
81	 * the queue request freelist, one for reads and one for writes
82	 */
83	struct request_list	rq[2];
84
85	/*
86	 * The total number of requests on each queue
87	 */
88	int nr_requests;
89
90	/*
91	 * Batching threshold for sleep/wakeup decisions
92	 */
93	int batch_requests;
94
95	/*
96	 * Together with queue_head for cacheline sharing
97	 */
98	struct list_head	queue_head;
99	elevator_t		elevator;
100
101	request_fn_proc		* request_fn;
102	merge_request_fn	* back_merge_fn;
103	merge_request_fn	* front_merge_fn;
104	merge_requests_fn	* merge_requests_fn;
105	make_request_fn		* make_request_fn;
106	plug_device_fn		* plug_device_fn;
107	/*
108	 * The queue owner gets to use this for whatever they like.
109	 * ll_rw_blk doesn't touch it.
110	 */
111	void			* queuedata;
112
113	/*
114	 * This is used to remove the plug when tq_disk runs.
115	 */
116	struct tq_struct	plug_tq;
117
118	/*
119	 * Boolean that indicates whether this queue is plugged or not.
120	 */
121	char			plugged;
122
123	/*
124	 * Boolean that indicates whether current_request is active or
125	 * not.
126	 */
127	char			head_active;
128
129	unsigned long		bounce_pfn;
130
131	/*
132	 * Is meant to protect the queue in the future instead of
133	 * io_request_lock
134	 */
135	spinlock_t		queue_lock;
136
137	/*
138	 * Tasks wait here for free read and write requests
139	 */
140	wait_queue_head_t	wait_for_requests[2];
141};
142
143extern unsigned long blk_max_low_pfn, blk_max_pfn;
144
145#define BLK_BOUNCE_HIGH		(blk_max_low_pfn << PAGE_SHIFT)
146#define BLK_BOUNCE_ANY		(blk_max_pfn << PAGE_SHIFT)
147
148extern void blk_queue_bounce_limit(request_queue_t *, u64);
149
150#ifdef CONFIG_HIGHMEM
151extern struct buffer_head *create_bounce(int, struct buffer_head *);
152extern inline struct buffer_head *blk_queue_bounce(request_queue_t *q, int rw,
153						   struct buffer_head *bh)
154{
155	struct page *page = bh->b_page;
156
157#ifndef CONFIG_DISCONTIGMEM
158	if (page - mem_map <= q->bounce_pfn)
159#else
160	if ((page - page_zone(page)->zone_mem_map) + (page_zone(page)->zone_start_paddr >> PAGE_SHIFT) <= q->bounce_pfn)
161#endif
162		return bh;
163
164	return create_bounce(rw, bh);
165}
166#else
167#define blk_queue_bounce(q, rw, bh)	(bh)
168#endif
169
170#define bh_phys(bh)		(page_to_phys((bh)->b_page) + bh_offset((bh)))
171
172#define BH_CONTIG(b1, b2)	(bh_phys((b1)) + (b1)->b_size == bh_phys((b2)))
173#define BH_PHYS_4G(b1, b2)	((bh_phys((b1)) | 0xffffffff) == ((bh_phys((b2)) + (b2)->b_size - 1) | 0xffffffff))
174
175struct blk_dev_struct {
176	/*
177	 * queue_proc has to be atomic
178	 */
179	request_queue_t		request_queue;
180	queue_proc		*queue;
181	void			*data;
182};
183
184struct sec_size {
185	unsigned block_size;
186	unsigned block_size_bits;
187};
188
189/*
190 * Used to indicate the default queue for drivers that don't bother
191 * to implement multiple queues.  We have this access macro here
192 * so as to eliminate the need for each and every block device
193 * driver to know about the internal structure of blk_dev[].
194 */
195#define BLK_DEFAULT_QUEUE(_MAJOR)  &blk_dev[_MAJOR].request_queue
196
197extern struct sec_size * blk_sec[MAX_BLKDEV];
198extern struct blk_dev_struct blk_dev[MAX_BLKDEV];
199extern void grok_partitions(struct gendisk *dev, int drive, unsigned minors, long size);
200extern void register_disk(struct gendisk *dev, kdev_t first, unsigned minors, struct block_device_operations *ops, long size);
201extern void generic_make_request(int rw, struct buffer_head * bh);
202extern inline request_queue_t *blk_get_queue(kdev_t dev);
203extern void blkdev_release_request(struct request *);
204
205/*
206 * Access functions for manipulating queue properties
207 */
208extern int blk_grow_request_list(request_queue_t *q, int nr_requests);
209extern void blk_init_queue(request_queue_t *, request_fn_proc *);
210extern void blk_cleanup_queue(request_queue_t *);
211extern void blk_queue_headactive(request_queue_t *, int);
212extern void blk_queue_make_request(request_queue_t *, make_request_fn *);
213extern void generic_unplug_device(void *);
214extern inline int blk_seg_merge_ok(struct buffer_head *, struct buffer_head *);
215
216extern int * blk_size[MAX_BLKDEV];
217
218extern int * blksize_size[MAX_BLKDEV];
219
220extern int * hardsect_size[MAX_BLKDEV];
221
222extern int * max_readahead[MAX_BLKDEV];
223
224extern int * max_sectors[MAX_BLKDEV];
225
226extern int * max_segments[MAX_BLKDEV];
227
228#define MAX_SEGMENTS 128
229#define MAX_SECTORS 255
230
231#define PageAlignSize(size) (((size) + PAGE_SIZE -1) & PAGE_MASK)
232
233#define blkdev_entry_to_request(entry) list_entry((entry), struct request, queue)
234#define blkdev_entry_next_request(entry) blkdev_entry_to_request((entry)->next)
235#define blkdev_entry_prev_request(entry) blkdev_entry_to_request((entry)->prev)
236#define blkdev_next_request(req) blkdev_entry_to_request((req)->queue.next)
237#define blkdev_prev_request(req) blkdev_entry_to_request((req)->queue.prev)
238
239extern void drive_stat_acct (kdev_t dev, int rw,
240					unsigned long nr_sectors, int new_io);
241
242static inline int get_hardsect_size(kdev_t dev)
243{
244	int retval = 512;
245	int major = MAJOR(dev);
246
247	if (hardsect_size[major]) {
248		int minor = MINOR(dev);
249		if (hardsect_size[major][minor])
250			retval = hardsect_size[major][minor];
251	}
252	return retval;
253}
254
255#define blk_finished_io(nsects)	do { } while (0)
256#define blk_started_io(nsects)	do { } while (0)
257
258static inline unsigned int blksize_bits(unsigned int size)
259{
260	unsigned int bits = 8;
261	do {
262		bits++;
263		size >>= 1;
264	} while (size > 256);
265	return bits;
266}
267
268static inline unsigned int block_size(kdev_t dev)
269{
270	int retval = BLOCK_SIZE;
271	int major = MAJOR(dev);
272
273	if (blksize_size[major]) {
274		int minor = MINOR(dev);
275		if (blksize_size[major][minor])
276			retval = blksize_size[major][minor];
277	}
278	return retval;
279}
280
281#endif
282