• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/drivers/mmc/card/
1/*
2 *  linux/drivers/mmc/card/queue.c
3 *
4 *  Copyright (C) 2003 Russell King, All Rights Reserved.
5 *  Copyright 2006-2007 Pierre Ossman
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 */
12#include <linux/slab.h>
13#include <linux/module.h>
14#include <linux/blkdev.h>
15#include <linux/freezer.h>
16#include <linux/kthread.h>
17#include <linux/scatterlist.h>
18
19#include <linux/mmc/card.h>
20#include <linux/mmc/host.h>
21#include "queue.h"
22
23#define MMC_QUEUE_BOUNCESZ	65536
24
25#define MMC_QUEUE_SUSPENDED	(1 << 0)
26
27/*
28 * Prepare a MMC request. This just filters out odd stuff.
29 */
30static int mmc_prep_request(struct request_queue *q, struct request *req)
31{
32	/*
33	 * We only like normal block requests and discards.
34	 */
35	if (req->cmd_type != REQ_TYPE_FS && !(req->cmd_flags & REQ_DISCARD)) {
36		blk_dump_rq_flags(req, "MMC bad request");
37		return BLKPREP_KILL;
38	}
39
40	req->cmd_flags |= REQ_DONTPREP;
41
42	return BLKPREP_OK;
43}
44
45static int mmc_queue_thread(void *d)
46{
47	struct mmc_queue *mq = d;
48	struct request_queue *q = mq->queue;
49
50	current->flags |= PF_MEMALLOC;
51
52	down(&mq->thread_sem);
53	do {
54		struct request *req = NULL;
55
56		spin_lock_irq(q->queue_lock);
57		set_current_state(TASK_INTERRUPTIBLE);
58		if (!blk_queue_plugged(q))
59			req = blk_fetch_request(q);
60		mq->req = req;
61		spin_unlock_irq(q->queue_lock);
62
63		if (!req) {
64			if (kthread_should_stop()) {
65				set_current_state(TASK_RUNNING);
66				break;
67			}
68			up(&mq->thread_sem);
69			schedule();
70			down(&mq->thread_sem);
71			continue;
72		}
73		set_current_state(TASK_RUNNING);
74
75		mq->issue_fn(mq, req);
76	} while (1);
77	up(&mq->thread_sem);
78
79	return 0;
80}
81
82/*
83 * Generic MMC request handler.  This is called for any queue on a
84 * particular host.  When the host is not busy, we look for a request
85 * on any queue on this host, and attempt to issue it.  This may
86 * not be the queue we were asked to process.
87 */
88static void mmc_request(struct request_queue *q)
89{
90	struct mmc_queue *mq = q->queuedata;
91	struct request *req;
92
93	if (!mq) {
94		while ((req = blk_fetch_request(q)) != NULL) {
95			req->cmd_flags |= REQ_QUIET;
96			__blk_end_request_all(req, -EIO);
97		}
98		return;
99	}
100
101	if (!mq->req)
102		wake_up_process(mq->thread);
103}
104
105/**
106 * mmc_init_queue - initialise a queue structure.
107 * @mq: mmc queue
108 * @card: mmc card to attach this queue
109 * @lock: queue lock
110 *
111 * Initialise a MMC card request queue.
112 */
113int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock)
114{
115	struct mmc_host *host = card->host;
116	u64 limit = BLK_BOUNCE_HIGH;
117	int ret;
118
119	if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
120		limit = *mmc_dev(host)->dma_mask;
121
122	mq->card = card;
123	mq->queue = blk_init_queue(mmc_request, lock);
124	if (!mq->queue)
125		return -ENOMEM;
126
127	mq->queue->queuedata = mq;
128	mq->req = NULL;
129
130	blk_queue_prep_rq(mq->queue, mmc_prep_request);
131	blk_queue_ordered(mq->queue, QUEUE_ORDERED_DRAIN);
132	queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
133	if (mmc_can_erase(card)) {
134		queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mq->queue);
135		mq->queue->limits.max_discard_sectors = UINT_MAX;
136		if (card->erased_byte == 0)
137			mq->queue->limits.discard_zeroes_data = 1;
138		if (!mmc_can_trim(card) && is_power_of_2(card->erase_size)) {
139			mq->queue->limits.discard_granularity =
140							card->erase_size << 9;
141			mq->queue->limits.discard_alignment =
142							card->erase_size << 9;
143		}
144		if (mmc_can_secure_erase_trim(card))
145			queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD,
146						mq->queue);
147	}
148
149#ifdef CONFIG_MMC_BLOCK_BOUNCE
150	if (host->max_hw_segs == 1) {
151		unsigned int bouncesz;
152
153		bouncesz = MMC_QUEUE_BOUNCESZ;
154
155		if (bouncesz > host->max_req_size)
156			bouncesz = host->max_req_size;
157		if (bouncesz > host->max_seg_size)
158			bouncesz = host->max_seg_size;
159		if (bouncesz > (host->max_blk_count * 512))
160			bouncesz = host->max_blk_count * 512;
161
162		if (bouncesz > 512) {
163			mq->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
164			if (!mq->bounce_buf) {
165				printk(KERN_WARNING "%s: unable to "
166					"allocate bounce buffer\n",
167					mmc_card_name(card));
168			}
169		}
170
171		if (mq->bounce_buf) {
172			blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY);
173			blk_queue_max_hw_sectors(mq->queue, bouncesz / 512);
174			blk_queue_max_segments(mq->queue, bouncesz / 512);
175			blk_queue_max_segment_size(mq->queue, bouncesz);
176
177			mq->sg = kmalloc(sizeof(struct scatterlist),
178				GFP_KERNEL);
179			if (!mq->sg) {
180				ret = -ENOMEM;
181				goto cleanup_queue;
182			}
183			sg_init_table(mq->sg, 1);
184
185			mq->bounce_sg = kmalloc(sizeof(struct scatterlist) *
186				bouncesz / 512, GFP_KERNEL);
187			if (!mq->bounce_sg) {
188				ret = -ENOMEM;
189				goto cleanup_queue;
190			}
191			sg_init_table(mq->bounce_sg, bouncesz / 512);
192		}
193	}
194#endif
195
196	if (!mq->bounce_buf) {
197		blk_queue_bounce_limit(mq->queue, limit);
198		blk_queue_max_hw_sectors(mq->queue,
199			min(host->max_blk_count, host->max_req_size / 512));
200		blk_queue_max_segments(mq->queue, host->max_hw_segs);
201		blk_queue_max_segment_size(mq->queue, host->max_seg_size);
202
203		mq->sg = kmalloc(sizeof(struct scatterlist) *
204			host->max_phys_segs, GFP_KERNEL);
205		if (!mq->sg) {
206			ret = -ENOMEM;
207			goto cleanup_queue;
208		}
209		sg_init_table(mq->sg, host->max_phys_segs);
210	}
211
212	init_MUTEX(&mq->thread_sem);
213
214	mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd");
215	if (IS_ERR(mq->thread)) {
216		ret = PTR_ERR(mq->thread);
217		goto free_bounce_sg;
218	}
219
220	return 0;
221 free_bounce_sg:
222 	if (mq->bounce_sg)
223 		kfree(mq->bounce_sg);
224 	mq->bounce_sg = NULL;
225 cleanup_queue:
226 	if (mq->sg)
227		kfree(mq->sg);
228	mq->sg = NULL;
229	if (mq->bounce_buf)
230		kfree(mq->bounce_buf);
231	mq->bounce_buf = NULL;
232	blk_cleanup_queue(mq->queue);
233	return ret;
234}
235
236void mmc_cleanup_queue(struct mmc_queue *mq)
237{
238	struct request_queue *q = mq->queue;
239	unsigned long flags;
240
241	/* Make sure the queue isn't suspended, as that will deadlock */
242	mmc_queue_resume(mq);
243
244	/* Then terminate our worker thread */
245	kthread_stop(mq->thread);
246
247	/* Empty the queue */
248	spin_lock_irqsave(q->queue_lock, flags);
249	q->queuedata = NULL;
250	blk_start_queue(q);
251	spin_unlock_irqrestore(q->queue_lock, flags);
252
253 	if (mq->bounce_sg)
254 		kfree(mq->bounce_sg);
255 	mq->bounce_sg = NULL;
256
257	kfree(mq->sg);
258	mq->sg = NULL;
259
260	if (mq->bounce_buf)
261		kfree(mq->bounce_buf);
262	mq->bounce_buf = NULL;
263
264	mq->card = NULL;
265}
266EXPORT_SYMBOL(mmc_cleanup_queue);
267
268/**
269 * mmc_queue_suspend - suspend a MMC request queue
270 * @mq: MMC queue to suspend
271 *
272 * Stop the block request queue, and wait for our thread to
273 * complete any outstanding requests.  This ensures that we
274 * won't suspend while a request is being processed.
275 */
276void mmc_queue_suspend(struct mmc_queue *mq)
277{
278	struct request_queue *q = mq->queue;
279	unsigned long flags;
280
281	if (!(mq->flags & MMC_QUEUE_SUSPENDED)) {
282		mq->flags |= MMC_QUEUE_SUSPENDED;
283
284		spin_lock_irqsave(q->queue_lock, flags);
285		blk_stop_queue(q);
286		spin_unlock_irqrestore(q->queue_lock, flags);
287
288		down(&mq->thread_sem);
289	}
290}
291
292/**
293 * mmc_queue_resume - resume a previously suspended MMC request queue
294 * @mq: MMC queue to resume
295 */
296void mmc_queue_resume(struct mmc_queue *mq)
297{
298	struct request_queue *q = mq->queue;
299	unsigned long flags;
300
301	if (mq->flags & MMC_QUEUE_SUSPENDED) {
302		mq->flags &= ~MMC_QUEUE_SUSPENDED;
303
304		up(&mq->thread_sem);
305
306		spin_lock_irqsave(q->queue_lock, flags);
307		blk_start_queue(q);
308		spin_unlock_irqrestore(q->queue_lock, flags);
309	}
310}
311
312/*
313 * Prepare the sg list(s) to be handed of to the host driver
314 */
315unsigned int mmc_queue_map_sg(struct mmc_queue *mq)
316{
317	unsigned int sg_len;
318	size_t buflen;
319	struct scatterlist *sg;
320	int i;
321
322	if (!mq->bounce_buf)
323		return blk_rq_map_sg(mq->queue, mq->req, mq->sg);
324
325	BUG_ON(!mq->bounce_sg);
326
327	sg_len = blk_rq_map_sg(mq->queue, mq->req, mq->bounce_sg);
328
329	mq->bounce_sg_len = sg_len;
330
331	buflen = 0;
332	for_each_sg(mq->bounce_sg, sg, sg_len, i)
333		buflen += sg->length;
334
335	sg_init_one(mq->sg, mq->bounce_buf, buflen);
336
337	return 1;
338}
339
340/*
341 * If writing, bounce the data to the buffer before the request
342 * is sent to the host driver
343 */
344void mmc_queue_bounce_pre(struct mmc_queue *mq)
345{
346	unsigned long flags;
347
348	if (!mq->bounce_buf)
349		return;
350
351	if (rq_data_dir(mq->req) != WRITE)
352		return;
353
354	local_irq_save(flags);
355	sg_copy_to_buffer(mq->bounce_sg, mq->bounce_sg_len,
356		mq->bounce_buf, mq->sg[0].length);
357	local_irq_restore(flags);
358}
359
360/*
361 * If reading, bounce the data from the buffer after the request
362 * has been handled by the host driver
363 */
364void mmc_queue_bounce_post(struct mmc_queue *mq)
365{
366	unsigned long flags;
367
368	if (!mq->bounce_buf)
369		return;
370
371	if (rq_data_dir(mq->req) != READ)
372		return;
373
374	local_irq_save(flags);
375	sg_copy_from_buffer(mq->bounce_sg, mq->bounce_sg_len,
376		mq->bounce_buf, mq->sg[0].length);
377	local_irq_restore(flags);
378}
379