1/* bounce buffer handling for block devices
2 *
3 * - Split from highmem.c
4 */
5
6#include <linux/mm.h>
7#include <linux/module.h>
8#include <linux/swap.h>
9#include <linux/gfp.h>
10#include <linux/bio.h>
11#include <linux/pagemap.h>
12#include <linux/mempool.h>
13#include <linux/blkdev.h>
14#include <linux/init.h>
15#include <linux/hash.h>
16#include <linux/highmem.h>
17#include <asm/tlbflush.h>
18
19#include <trace/events/block.h>
20
21#define POOL_SIZE	64
22#define ISA_POOL_SIZE	16
23
24static mempool_t *page_pool, *isa_page_pool;
25
26#ifdef CONFIG_HIGHMEM
27static __init int init_emergency_pool(void)
28{
29	struct sysinfo i;
30	si_meminfo(&i);
31	si_swapinfo(&i);
32
33	if (!i.totalhigh)
34		return 0;
35
36	page_pool = mempool_create_page_pool(POOL_SIZE, 0);
37	BUG_ON(!page_pool);
38	printk("highmem bounce pool size: %d pages\n", POOL_SIZE);
39
40	return 0;
41}
42
43__initcall(init_emergency_pool);
44
45/*
46 * highmem version, map in to vec
47 */
48static void bounce_copy_vec(struct bio_vec *to, unsigned char *vfrom)
49{
50	unsigned long flags;
51	unsigned char *vto;
52
53	local_irq_save(flags);
54	vto = kmap_atomic(to->bv_page, KM_BOUNCE_READ);
55	memcpy(vto + to->bv_offset, vfrom, to->bv_len);
56	kunmap_atomic(vto, KM_BOUNCE_READ);
57	local_irq_restore(flags);
58}
59
60#else /* CONFIG_HIGHMEM */
61
62#define bounce_copy_vec(to, vfrom)	\
63	memcpy(page_address((to)->bv_page) + (to)->bv_offset, vfrom, (to)->bv_len)
64
65#endif /* CONFIG_HIGHMEM */
66
67/*
68 * allocate pages in the DMA region for the ISA pool
69 */
70static void *mempool_alloc_pages_isa(gfp_t gfp_mask, void *data)
71{
72	return mempool_alloc_pages(gfp_mask | GFP_DMA, data);
73}
74
75/*
76 * gets called "every" time someone init's a queue with BLK_BOUNCE_ISA
77 * as the max address, so check if the pool has already been created.
78 */
79int init_emergency_isa_pool(void)
80{
81	if (isa_page_pool)
82		return 0;
83
84	isa_page_pool = mempool_create(ISA_POOL_SIZE, mempool_alloc_pages_isa,
85				       mempool_free_pages, (void *) 0);
86	BUG_ON(!isa_page_pool);
87
88	printk("isa bounce pool size: %d pages\n", ISA_POOL_SIZE);
89	return 0;
90}
91
92/*
93 * Simple bounce buffer support for highmem pages. Depending on the
94 * queue gfp mask set, *to may or may not be a highmem page. kmap it
95 * always, it will do the Right Thing
96 */
97static void copy_to_high_bio_irq(struct bio *to, struct bio *from)
98{
99	unsigned char *vfrom;
100	struct bio_vec *tovec, *fromvec;
101	int i;
102
103	__bio_for_each_segment(tovec, to, i, 0) {
104		fromvec = from->bi_io_vec + i;
105
106		/*
107		 * not bounced
108		 */
109		if (tovec->bv_page == fromvec->bv_page)
110			continue;
111
112		/*
113		 * fromvec->bv_offset and fromvec->bv_len might have been
114		 * modified by the block layer, so use the original copy,
115		 * bounce_copy_vec already uses tovec->bv_len
116		 */
117		vfrom = page_address(fromvec->bv_page) + tovec->bv_offset;
118
119		bounce_copy_vec(tovec, vfrom);
120		flush_dcache_page(tovec->bv_page);
121	}
122}
123
124static void bounce_end_io(struct bio *bio, mempool_t *pool, int err)
125{
126	struct bio *bio_orig = bio->bi_private;
127	struct bio_vec *bvec, *org_vec;
128	int i;
129
130	if (test_bit(BIO_EOPNOTSUPP, &bio->bi_flags))
131		set_bit(BIO_EOPNOTSUPP, &bio_orig->bi_flags);
132
133	/*
134	 * free up bounce indirect pages used
135	 */
136	__bio_for_each_segment(bvec, bio, i, 0) {
137		org_vec = bio_orig->bi_io_vec + i;
138		if (bvec->bv_page == org_vec->bv_page)
139			continue;
140
141		dec_zone_page_state(bvec->bv_page, NR_BOUNCE);
142		mempool_free(bvec->bv_page, pool);
143	}
144
145	bio_endio(bio_orig, err);
146	bio_put(bio);
147}
148
149static void bounce_end_io_write(struct bio *bio, int err)
150{
151	bounce_end_io(bio, page_pool, err);
152}
153
154static void bounce_end_io_write_isa(struct bio *bio, int err)
155{
156
157	bounce_end_io(bio, isa_page_pool, err);
158}
159
160static void __bounce_end_io_read(struct bio *bio, mempool_t *pool, int err)
161{
162	struct bio *bio_orig = bio->bi_private;
163
164	if (test_bit(BIO_UPTODATE, &bio->bi_flags))
165		copy_to_high_bio_irq(bio_orig, bio);
166
167	bounce_end_io(bio, pool, err);
168}
169
170static void bounce_end_io_read(struct bio *bio, int err)
171{
172	__bounce_end_io_read(bio, page_pool, err);
173}
174
175static void bounce_end_io_read_isa(struct bio *bio, int err)
176{
177	__bounce_end_io_read(bio, isa_page_pool, err);
178}
179
180static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
181			       mempool_t *pool)
182{
183	struct page *page;
184	struct bio *bio = NULL;
185	int i, rw = bio_data_dir(*bio_orig);
186	struct bio_vec *to, *from;
187
188	bio_for_each_segment(from, *bio_orig, i) {
189		page = from->bv_page;
190
191		/*
192		 * is destination page below bounce pfn?
193		 */
194		if (page_to_pfn(page) <= queue_bounce_pfn(q))
195			continue;
196
197		/*
198		 * irk, bounce it
199		 */
200		if (!bio) {
201			unsigned int cnt = (*bio_orig)->bi_vcnt;
202
203			bio = bio_alloc(GFP_NOIO, cnt);
204			memset(bio->bi_io_vec, 0, cnt * sizeof(struct bio_vec));
205		}
206
207
208		to = bio->bi_io_vec + i;
209
210		to->bv_page = mempool_alloc(pool, q->bounce_gfp);
211		to->bv_len = from->bv_len;
212		to->bv_offset = from->bv_offset;
213		inc_zone_page_state(to->bv_page, NR_BOUNCE);
214
215		if (rw == WRITE) {
216			char *vto, *vfrom;
217
218			flush_dcache_page(from->bv_page);
219			vto = page_address(to->bv_page) + to->bv_offset;
220			vfrom = kmap(from->bv_page) + from->bv_offset;
221			memcpy(vto, vfrom, to->bv_len);
222			kunmap(from->bv_page);
223		}
224	}
225
226	/*
227	 * no pages bounced
228	 */
229	if (!bio)
230		return;
231
232	trace_block_bio_bounce(q, *bio_orig);
233
234	/*
235	 * at least one page was bounced, fill in possible non-highmem
236	 * pages
237	 */
238	__bio_for_each_segment(from, *bio_orig, i, 0) {
239		to = bio_iovec_idx(bio, i);
240		if (!to->bv_page) {
241			to->bv_page = from->bv_page;
242			to->bv_len = from->bv_len;
243			to->bv_offset = from->bv_offset;
244		}
245	}
246
247	bio->bi_bdev = (*bio_orig)->bi_bdev;
248	bio->bi_flags |= (1 << BIO_BOUNCED);
249	bio->bi_sector = (*bio_orig)->bi_sector;
250	bio->bi_rw = (*bio_orig)->bi_rw;
251
252	bio->bi_vcnt = (*bio_orig)->bi_vcnt;
253	bio->bi_idx = (*bio_orig)->bi_idx;
254	bio->bi_size = (*bio_orig)->bi_size;
255
256	if (pool == page_pool) {
257		bio->bi_end_io = bounce_end_io_write;
258		if (rw == READ)
259			bio->bi_end_io = bounce_end_io_read;
260	} else {
261		bio->bi_end_io = bounce_end_io_write_isa;
262		if (rw == READ)
263			bio->bi_end_io = bounce_end_io_read_isa;
264	}
265
266	bio->bi_private = *bio_orig;
267	*bio_orig = bio;
268}
269
270void blk_queue_bounce(struct request_queue *q, struct bio **bio_orig)
271{
272	mempool_t *pool;
273
274	/*
275	 * Data-less bio, nothing to bounce
276	 */
277	if (!bio_has_data(*bio_orig))
278		return;
279
280	/*
281	 * for non-isa bounce case, just check if the bounce pfn is equal
282	 * to or bigger than the highest pfn in the system -- in that case,
283	 * don't waste time iterating over bio segments
284	 */
285	if (!(q->bounce_gfp & GFP_DMA)) {
286		if (queue_bounce_pfn(q) >= blk_max_pfn)
287			return;
288		pool = page_pool;
289	} else {
290		BUG_ON(!isa_page_pool);
291		pool = isa_page_pool;
292	}
293
294	/*
295	 * slow path
296	 */
297	__blk_queue_bounce(q, bio_orig, pool);
298}
299
300EXPORT_SYMBOL(blk_queue_bounce);
301