• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/drivers/mmc/card/

Lines Matching refs:mq

47 	struct mmc_queue *mq = d;
48 struct request_queue *q = mq->queue;
52 down(&mq->thread_sem);
60 mq->req = req;
68 up(&mq->thread_sem);
70 down(&mq->thread_sem);
75 mq->issue_fn(mq, req);
77 up(&mq->thread_sem);
90 struct mmc_queue *mq = q->queuedata;
93 if (!mq) {
101 if (!mq->req)
102 wake_up_process(mq->thread);
107 * @mq: mmc queue
113 int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock)
122 mq->card = card;
123 mq->queue = blk_init_queue(mmc_request, lock);
124 if (!mq->queue)
127 mq->queue->queuedata = mq;
128 mq->req = NULL;
130 blk_queue_prep_rq(mq->queue, mmc_prep_request);
131 blk_queue_ordered(mq->queue, QUEUE_ORDERED_DRAIN);
132 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
134 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mq->queue);
135 mq->queue->limits.max_discard_sectors = UINT_MAX;
137 mq->queue->limits.discard_zeroes_data = 1;
139 mq->queue->limits.discard_granularity =
141 mq->queue->limits.discard_alignment =
146 mq->queue);
163 mq->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
164 if (!mq->bounce_buf) {
171 if (mq->bounce_buf) {
172 blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY);
173 blk_queue_max_hw_sectors(mq->queue, bouncesz / 512);
174 blk_queue_max_segments(mq->queue, bouncesz / 512);
175 blk_queue_max_segment_size(mq->queue, bouncesz);
177 mq->sg = kmalloc(sizeof(struct scatterlist),
179 if (!mq->sg) {
183 sg_init_table(mq->sg, 1);
185 mq->bounce_sg = kmalloc(sizeof(struct scatterlist) *
187 if (!mq->bounce_sg) {
191 sg_init_table(mq->bounce_sg, bouncesz / 512);
196 if (!mq->bounce_buf) {
197 blk_queue_bounce_limit(mq->queue, limit);
198 blk_queue_max_hw_sectors(mq->queue,
200 blk_queue_max_segments(mq->queue, host->max_hw_segs);
201 blk_queue_max_segment_size(mq->queue, host->max_seg_size);
203 mq->sg = kmalloc(sizeof(struct scatterlist) *
205 if (!mq->sg) {
209 sg_init_table(mq->sg, host->max_phys_segs);
212 init_MUTEX(&mq->thread_sem);
214 mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd");
215 if (IS_ERR(mq->thread)) {
216 ret = PTR_ERR(mq->thread);
222 if (mq->bounce_sg)
223 kfree(mq->bounce_sg);
224 mq->bounce_sg = NULL;
226 if (mq->sg)
227 kfree(mq->sg);
228 mq->sg = NULL;
229 if (mq->bounce_buf)
230 kfree(mq->bounce_buf);
231 mq->bounce_buf = NULL;
232 blk_cleanup_queue(mq->queue);
236 void mmc_cleanup_queue(struct mmc_queue *mq)
238 struct request_queue *q = mq->queue;
242 mmc_queue_resume(mq);
245 kthread_stop(mq->thread);
253 if (mq->bounce_sg)
254 kfree(mq->bounce_sg);
255 mq->bounce_sg = NULL;
257 kfree(mq->sg);
258 mq->sg = NULL;
260 if (mq->bounce_buf)
261 kfree(mq->bounce_buf);
262 mq->bounce_buf = NULL;
264 mq->card = NULL;
270 * @mq: MMC queue to suspend
276 void mmc_queue_suspend(struct mmc_queue *mq)
278 struct request_queue *q = mq->queue;
281 if (!(mq->flags & MMC_QUEUE_SUSPENDED)) {
282 mq->flags |= MMC_QUEUE_SUSPENDED;
288 down(&mq->thread_sem);
294 * @mq: MMC queue to resume
296 void mmc_queue_resume(struct mmc_queue *mq)
298 struct request_queue *q = mq->queue;
301 if (mq->flags & MMC_QUEUE_SUSPENDED) {
302 mq->flags &= ~MMC_QUEUE_SUSPENDED;
304 up(&mq->thread_sem);
315 unsigned int mmc_queue_map_sg(struct mmc_queue *mq)
322 if (!mq->bounce_buf)
323 return blk_rq_map_sg(mq->queue, mq->req, mq->sg);
325 BUG_ON(!mq->bounce_sg);
327 sg_len = blk_rq_map_sg(mq->queue, mq->req, mq->bounce_sg);
329 mq->bounce_sg_len = sg_len;
332 for_each_sg(mq->bounce_sg, sg, sg_len, i)
335 sg_init_one(mq->sg, mq->bounce_buf, buflen);
344 void mmc_queue_bounce_pre(struct mmc_queue *mq)
348 if (!mq->bounce_buf)
351 if (rq_data_dir(mq->req) != WRITE)
355 sg_copy_to_buffer(mq->bounce_sg, mq->bounce_sg_len,
356 mq->bounce_buf, mq->sg[0].length);
364 void mmc_queue_bounce_post(struct mmc_queue *mq)
368 if (!mq->bounce_buf)
371 if (rq_data_dir(mq->req) != READ)
375 sg_copy_from_buffer(mq->bounce_sg, mq->bounce_sg_len,
376 mq->bounce_buf, mq->sg[0].length);