• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/drivers/mmc/card/
1/*
2 *  linux/drivers/mmc/card/mmc_test.c
3 *
4 *  Copyright 2007-2008 Pierre Ossman
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or (at
9 * your option) any later version.
10 */
11
12#include <linux/mmc/core.h>
13#include <linux/mmc/card.h>
14#include <linux/mmc/host.h>
15#include <linux/mmc/mmc.h>
16#include <linux/slab.h>
17
18#include <linux/scatterlist.h>
19#include <linux/swap.h>		/* For nr_free_buffer_pages() */
20
21#define RESULT_OK		0
22#define RESULT_FAIL		1
23#define RESULT_UNSUP_HOST	2
24#define RESULT_UNSUP_CARD	3
25
26#define BUFFER_ORDER		2
27#define BUFFER_SIZE		(PAGE_SIZE << BUFFER_ORDER)
28
29/*
30 * Limit the test area size to the maximum MMC HC erase group size.  Note that
31 * the maximum SD allocation unit size is just 4MiB.
32 */
33#define TEST_AREA_MAX_SIZE (128 * 1024 * 1024)
34
35/**
36 * struct mmc_test_pages - pages allocated by 'alloc_pages()'.
37 * @page: first page in the allocation
38 * @order: order of the number of pages allocated
39 */
40struct mmc_test_pages {
41	struct page *page;
42	unsigned int order;
43};
44
45/**
46 * struct mmc_test_mem - allocated memory.
47 * @arr: array of allocations
48 * @cnt: number of allocations
49 */
50struct mmc_test_mem {
51	struct mmc_test_pages *arr;
52	unsigned int cnt;
53};
54
55/**
56 * struct mmc_test_area - information for performance tests.
57 * @max_sz: test area size (in bytes)
58 * @dev_addr: address on card at which to do performance tests
59 * @max_segs: maximum segments in scatterlist @sg
60 * @blocks: number of (512 byte) blocks currently mapped by @sg
61 * @sg_len: length of currently mapped scatterlist @sg
62 * @mem: allocated memory
63 * @sg: scatterlist
64 */
65struct mmc_test_area {
66	unsigned long max_sz;
67	unsigned int dev_addr;
68	unsigned int max_segs;
69	unsigned int blocks;
70	unsigned int sg_len;
71	struct mmc_test_mem *mem;
72	struct scatterlist *sg;
73};
74
75/**
76 * struct mmc_test_card - test information.
77 * @card: card under test
78 * @scratch: transfer buffer
79 * @buffer: transfer buffer
80 * @highmem: buffer for highmem tests
81 * @area: information for performance tests
82 */
83struct mmc_test_card {
84	struct mmc_card	*card;
85
86	u8		scratch[BUFFER_SIZE];
87	u8		*buffer;
88#ifdef CONFIG_HIGHMEM
89	struct page	*highmem;
90#endif
91	struct mmc_test_area area;
92};
93
94/*******************************************************************/
95/*  General helper functions                                       */
96/*******************************************************************/
97
98/*
99 * Configure correct block size in card
100 */
101static int mmc_test_set_blksize(struct mmc_test_card *test, unsigned size)
102{
103	struct mmc_command cmd;
104	int ret;
105
106	cmd.opcode = MMC_SET_BLOCKLEN;
107	cmd.arg = size;
108	cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
109	ret = mmc_wait_for_cmd(test->card->host, &cmd, 0);
110	if (ret)
111		return ret;
112
113	return 0;
114}
115
116/*
117 * Fill in the mmc_request structure given a set of transfer parameters.
118 */
119static void mmc_test_prepare_mrq(struct mmc_test_card *test,
120	struct mmc_request *mrq, struct scatterlist *sg, unsigned sg_len,
121	unsigned dev_addr, unsigned blocks, unsigned blksz, int write)
122{
123	BUG_ON(!mrq || !mrq->cmd || !mrq->data || !mrq->stop);
124
125	if (blocks > 1) {
126		mrq->cmd->opcode = write ?
127			MMC_WRITE_MULTIPLE_BLOCK : MMC_READ_MULTIPLE_BLOCK;
128	} else {
129		mrq->cmd->opcode = write ?
130			MMC_WRITE_BLOCK : MMC_READ_SINGLE_BLOCK;
131	}
132
133	mrq->cmd->arg = dev_addr;
134	if (!mmc_card_blockaddr(test->card))
135		mrq->cmd->arg <<= 9;
136
137	mrq->cmd->flags = MMC_RSP_R1 | MMC_CMD_ADTC;
138
139	if (blocks == 1)
140		mrq->stop = NULL;
141	else {
142		mrq->stop->opcode = MMC_STOP_TRANSMISSION;
143		mrq->stop->arg = 0;
144		mrq->stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
145	}
146
147	mrq->data->blksz = blksz;
148	mrq->data->blocks = blocks;
149	mrq->data->flags = write ? MMC_DATA_WRITE : MMC_DATA_READ;
150	mrq->data->sg = sg;
151	mrq->data->sg_len = sg_len;
152
153	mmc_set_data_timeout(mrq->data, test->card);
154}
155
156static int mmc_test_busy(struct mmc_command *cmd)
157{
158	return !(cmd->resp[0] & R1_READY_FOR_DATA) ||
159		(R1_CURRENT_STATE(cmd->resp[0]) == 7);
160}
161
162/*
163 * Wait for the card to finish the busy state
164 */
165static int mmc_test_wait_busy(struct mmc_test_card *test)
166{
167	int ret, busy;
168	struct mmc_command cmd;
169
170	busy = 0;
171	do {
172		memset(&cmd, 0, sizeof(struct mmc_command));
173
174		cmd.opcode = MMC_SEND_STATUS;
175		cmd.arg = test->card->rca << 16;
176		cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
177
178		ret = mmc_wait_for_cmd(test->card->host, &cmd, 0);
179		if (ret)
180			break;
181
182		if (!busy && mmc_test_busy(&cmd)) {
183			busy = 1;
184			printk(KERN_INFO "%s: Warning: Host did not "
185				"wait for busy state to end.\n",
186				mmc_hostname(test->card->host));
187		}
188	} while (mmc_test_busy(&cmd));
189
190	return ret;
191}
192
193/*
194 * Transfer a single sector of kernel addressable data
195 */
196static int mmc_test_buffer_transfer(struct mmc_test_card *test,
197	u8 *buffer, unsigned addr, unsigned blksz, int write)
198{
199	int ret;
200
201	struct mmc_request mrq;
202	struct mmc_command cmd;
203	struct mmc_command stop;
204	struct mmc_data data;
205
206	struct scatterlist sg;
207
208	memset(&mrq, 0, sizeof(struct mmc_request));
209	memset(&cmd, 0, sizeof(struct mmc_command));
210	memset(&data, 0, sizeof(struct mmc_data));
211	memset(&stop, 0, sizeof(struct mmc_command));
212
213	mrq.cmd = &cmd;
214	mrq.data = &data;
215	mrq.stop = &stop;
216
217	sg_init_one(&sg, buffer, blksz);
218
219	mmc_test_prepare_mrq(test, &mrq, &sg, 1, addr, 1, blksz, write);
220
221	mmc_wait_for_req(test->card->host, &mrq);
222
223	if (cmd.error)
224		return cmd.error;
225	if (data.error)
226		return data.error;
227
228	ret = mmc_test_wait_busy(test);
229	if (ret)
230		return ret;
231
232	return 0;
233}
234
235static void mmc_test_free_mem(struct mmc_test_mem *mem)
236{
237	if (!mem)
238		return;
239	while (mem->cnt--)
240		__free_pages(mem->arr[mem->cnt].page,
241			     mem->arr[mem->cnt].order);
242	kfree(mem->arr);
243	kfree(mem);
244}
245
246/*
247 * Allocate a lot of memory, preferrably max_sz but at least min_sz.  In case
248 * there isn't much memory do not exceed 1/16th total lowmem pages.
249 */
250static struct mmc_test_mem *mmc_test_alloc_mem(unsigned long min_sz,
251					       unsigned long max_sz)
252{
253	unsigned long max_page_cnt = DIV_ROUND_UP(max_sz, PAGE_SIZE);
254	unsigned long min_page_cnt = DIV_ROUND_UP(min_sz, PAGE_SIZE);
255	unsigned long page_cnt = 0;
256	unsigned long limit = nr_free_buffer_pages() >> 4;
257	struct mmc_test_mem *mem;
258
259	if (max_page_cnt > limit)
260		max_page_cnt = limit;
261	if (max_page_cnt < min_page_cnt)
262		max_page_cnt = min_page_cnt;
263
264	mem = kzalloc(sizeof(struct mmc_test_mem), GFP_KERNEL);
265	if (!mem)
266		return NULL;
267
268	mem->arr = kzalloc(sizeof(struct mmc_test_pages) * max_page_cnt,
269			   GFP_KERNEL);
270	if (!mem->arr)
271		goto out_free;
272
273	while (max_page_cnt) {
274		struct page *page;
275		unsigned int order;
276		gfp_t flags = GFP_KERNEL | GFP_DMA | __GFP_NOWARN |
277				__GFP_NORETRY;
278
279		order = get_order(max_page_cnt << PAGE_SHIFT);
280		while (1) {
281			page = alloc_pages(flags, order);
282			if (page || !order)
283				break;
284			order -= 1;
285		}
286		if (!page) {
287			if (page_cnt < min_page_cnt)
288				goto out_free;
289			break;
290		}
291		mem->arr[mem->cnt].page = page;
292		mem->arr[mem->cnt].order = order;
293		mem->cnt += 1;
294		if (max_page_cnt <= (1UL << order))
295			break;
296		max_page_cnt -= 1UL << order;
297		page_cnt += 1UL << order;
298	}
299
300	return mem;
301
302out_free:
303	mmc_test_free_mem(mem);
304	return NULL;
305}
306
307/*
308 * Map memory into a scatterlist.  Optionally allow the same memory to be
309 * mapped more than once.
310 */
311static int mmc_test_map_sg(struct mmc_test_mem *mem, unsigned long sz,
312			   struct scatterlist *sglist, int repeat,
313			   unsigned int max_segs, unsigned int *sg_len)
314{
315	struct scatterlist *sg = NULL;
316	unsigned int i;
317
318	sg_init_table(sglist, max_segs);
319
320	*sg_len = 0;
321	do {
322		for (i = 0; i < mem->cnt; i++) {
323			unsigned long len = PAGE_SIZE << mem->arr[i].order;
324
325			if (sz < len)
326				len = sz;
327			if (sg)
328				sg = sg_next(sg);
329			else
330				sg = sglist;
331			if (!sg)
332				return -EINVAL;
333			sg_set_page(sg, mem->arr[i].page, len, 0);
334			sz -= len;
335			*sg_len += 1;
336			if (!sz)
337				break;
338		}
339	} while (sz && repeat);
340
341	if (sz)
342		return -EINVAL;
343
344	if (sg)
345		sg_mark_end(sg);
346
347	return 0;
348}
349
350/*
351 * Map memory into a scatterlist so that no pages are contiguous.  Allow the
352 * same memory to be mapped more than once.
353 */
354static int mmc_test_map_sg_max_scatter(struct mmc_test_mem *mem,
355				       unsigned long sz,
356				       struct scatterlist *sglist,
357				       unsigned int max_segs,
358				       unsigned int *sg_len)
359{
360	struct scatterlist *sg = NULL;
361	unsigned int i = mem->cnt, cnt;
362	unsigned long len;
363	void *base, *addr, *last_addr = NULL;
364
365	sg_init_table(sglist, max_segs);
366
367	*sg_len = 0;
368	while (sz && i) {
369		base = page_address(mem->arr[--i].page);
370		cnt = 1 << mem->arr[i].order;
371		while (sz && cnt) {
372			addr = base + PAGE_SIZE * --cnt;
373			if (last_addr && last_addr + PAGE_SIZE == addr)
374				continue;
375			last_addr = addr;
376			len = PAGE_SIZE;
377			if (sz < len)
378				len = sz;
379			if (sg)
380				sg = sg_next(sg);
381			else
382				sg = sglist;
383			if (!sg)
384				return -EINVAL;
385			sg_set_page(sg, virt_to_page(addr), len, 0);
386			sz -= len;
387			*sg_len += 1;
388		}
389	}
390
391	if (sg)
392		sg_mark_end(sg);
393
394	return 0;
395}
396
397/*
398 * Calculate transfer rate in bytes per second.
399 */
400static unsigned int mmc_test_rate(uint64_t bytes, struct timespec *ts)
401{
402	uint64_t ns;
403
404	ns = ts->tv_sec;
405	ns *= 1000000000;
406	ns += ts->tv_nsec;
407
408	bytes *= 1000000000;
409
410	while (ns > UINT_MAX) {
411		bytes >>= 1;
412		ns >>= 1;
413	}
414
415	if (!ns)
416		return 0;
417
418	do_div(bytes, (uint32_t)ns);
419
420	return bytes;
421}
422
423/*
424 * Print the transfer rate.
425 */
426static void mmc_test_print_rate(struct mmc_test_card *test, uint64_t bytes,
427				struct timespec *ts1, struct timespec *ts2)
428{
429	unsigned int rate, sectors = bytes >> 9;
430	struct timespec ts;
431
432	ts = timespec_sub(*ts2, *ts1);
433
434	rate = mmc_test_rate(bytes, &ts);
435
436	printk(KERN_INFO "%s: Transfer of %u sectors (%u%s KiB) took %lu.%09lu "
437			 "seconds (%u kB/s, %u KiB/s)\n",
438			 mmc_hostname(test->card->host), sectors, sectors >> 1,
439			 (sectors == 1 ? ".5" : ""), (unsigned long)ts.tv_sec,
440			 (unsigned long)ts.tv_nsec, rate / 1000, rate / 1024);
441}
442
443/*
444 * Print the average transfer rate.
445 */
446static void mmc_test_print_avg_rate(struct mmc_test_card *test, uint64_t bytes,
447				    unsigned int count, struct timespec *ts1,
448				    struct timespec *ts2)
449{
450	unsigned int rate, sectors = bytes >> 9;
451	uint64_t tot = bytes * count;
452	struct timespec ts;
453
454	ts = timespec_sub(*ts2, *ts1);
455
456	rate = mmc_test_rate(tot, &ts);
457
458	printk(KERN_INFO "%s: Transfer of %u x %u sectors (%u x %u%s KiB) took "
459			 "%lu.%09lu seconds (%u kB/s, %u KiB/s)\n",
460			 mmc_hostname(test->card->host), count, sectors, count,
461			 sectors >> 1, (sectors == 1 ? ".5" : ""),
462			 (unsigned long)ts.tv_sec, (unsigned long)ts.tv_nsec,
463			 rate / 1000, rate / 1024);
464}
465
466/*
467 * Return the card size in sectors.
468 */
469static unsigned int mmc_test_capacity(struct mmc_card *card)
470{
471	if (!mmc_card_sd(card) && mmc_card_blockaddr(card))
472		return card->ext_csd.sectors;
473	else
474		return card->csd.capacity << (card->csd.read_blkbits - 9);
475}
476
477/*******************************************************************/
478/*  Test preparation and cleanup                                   */
479/*******************************************************************/
480
481/*
482 * Fill the first couple of sectors of the card with known data
483 * so that bad reads/writes can be detected
484 */
485static int __mmc_test_prepare(struct mmc_test_card *test, int write)
486{
487	int ret, i;
488
489	ret = mmc_test_set_blksize(test, 512);
490	if (ret)
491		return ret;
492
493	if (write)
494		memset(test->buffer, 0xDF, 512);
495	else {
496		for (i = 0;i < 512;i++)
497			test->buffer[i] = i;
498	}
499
500	for (i = 0;i < BUFFER_SIZE / 512;i++) {
501		ret = mmc_test_buffer_transfer(test, test->buffer, i, 512, 1);
502		if (ret)
503			return ret;
504	}
505
506	return 0;
507}
508
509static int mmc_test_prepare_write(struct mmc_test_card *test)
510{
511	return __mmc_test_prepare(test, 1);
512}
513
514static int mmc_test_prepare_read(struct mmc_test_card *test)
515{
516	return __mmc_test_prepare(test, 0);
517}
518
519static int mmc_test_cleanup(struct mmc_test_card *test)
520{
521	int ret, i;
522
523	ret = mmc_test_set_blksize(test, 512);
524	if (ret)
525		return ret;
526
527	memset(test->buffer, 0, 512);
528
529	for (i = 0;i < BUFFER_SIZE / 512;i++) {
530		ret = mmc_test_buffer_transfer(test, test->buffer, i, 512, 1);
531		if (ret)
532			return ret;
533	}
534
535	return 0;
536}
537
538/*******************************************************************/
539/*  Test execution helpers                                         */
540/*******************************************************************/
541
542/*
543 * Modifies the mmc_request to perform the "short transfer" tests
544 */
545static void mmc_test_prepare_broken_mrq(struct mmc_test_card *test,
546	struct mmc_request *mrq, int write)
547{
548	BUG_ON(!mrq || !mrq->cmd || !mrq->data);
549
550	if (mrq->data->blocks > 1) {
551		mrq->cmd->opcode = write ?
552			MMC_WRITE_BLOCK : MMC_READ_SINGLE_BLOCK;
553		mrq->stop = NULL;
554	} else {
555		mrq->cmd->opcode = MMC_SEND_STATUS;
556		mrq->cmd->arg = test->card->rca << 16;
557	}
558}
559
560/*
561 * Checks that a normal transfer didn't have any errors
562 */
563static int mmc_test_check_result(struct mmc_test_card *test,
564	struct mmc_request *mrq)
565{
566	int ret;
567
568	BUG_ON(!mrq || !mrq->cmd || !mrq->data);
569
570	ret = 0;
571
572	if (!ret && mrq->cmd->error)
573		ret = mrq->cmd->error;
574	if (!ret && mrq->data->error)
575		ret = mrq->data->error;
576	if (!ret && mrq->stop && mrq->stop->error)
577		ret = mrq->stop->error;
578	if (!ret && mrq->data->bytes_xfered !=
579		mrq->data->blocks * mrq->data->blksz)
580		ret = RESULT_FAIL;
581
582	if (ret == -EINVAL)
583		ret = RESULT_UNSUP_HOST;
584
585	return ret;
586}
587
588/*
589 * Checks that a "short transfer" behaved as expected
590 */
591static int mmc_test_check_broken_result(struct mmc_test_card *test,
592	struct mmc_request *mrq)
593{
594	int ret;
595
596	BUG_ON(!mrq || !mrq->cmd || !mrq->data);
597
598	ret = 0;
599
600	if (!ret && mrq->cmd->error)
601		ret = mrq->cmd->error;
602	if (!ret && mrq->data->error == 0)
603		ret = RESULT_FAIL;
604	if (!ret && mrq->data->error != -ETIMEDOUT)
605		ret = mrq->data->error;
606	if (!ret && mrq->stop && mrq->stop->error)
607		ret = mrq->stop->error;
608	if (mrq->data->blocks > 1) {
609		if (!ret && mrq->data->bytes_xfered > mrq->data->blksz)
610			ret = RESULT_FAIL;
611	} else {
612		if (!ret && mrq->data->bytes_xfered > 0)
613			ret = RESULT_FAIL;
614	}
615
616	if (ret == -EINVAL)
617		ret = RESULT_UNSUP_HOST;
618
619	return ret;
620}
621
622/*
623 * Tests a basic transfer with certain parameters
624 */
625static int mmc_test_simple_transfer(struct mmc_test_card *test,
626	struct scatterlist *sg, unsigned sg_len, unsigned dev_addr,
627	unsigned blocks, unsigned blksz, int write)
628{
629	struct mmc_request mrq;
630	struct mmc_command cmd;
631	struct mmc_command stop;
632	struct mmc_data data;
633
634	memset(&mrq, 0, sizeof(struct mmc_request));
635	memset(&cmd, 0, sizeof(struct mmc_command));
636	memset(&data, 0, sizeof(struct mmc_data));
637	memset(&stop, 0, sizeof(struct mmc_command));
638
639	mrq.cmd = &cmd;
640	mrq.data = &data;
641	mrq.stop = &stop;
642
643	mmc_test_prepare_mrq(test, &mrq, sg, sg_len, dev_addr,
644		blocks, blksz, write);
645
646	mmc_wait_for_req(test->card->host, &mrq);
647
648	mmc_test_wait_busy(test);
649
650	return mmc_test_check_result(test, &mrq);
651}
652
653/*
654 * Tests a transfer where the card will fail completely or partly
655 */
656static int mmc_test_broken_transfer(struct mmc_test_card *test,
657	unsigned blocks, unsigned blksz, int write)
658{
659	struct mmc_request mrq;
660	struct mmc_command cmd;
661	struct mmc_command stop;
662	struct mmc_data data;
663
664	struct scatterlist sg;
665
666	memset(&mrq, 0, sizeof(struct mmc_request));
667	memset(&cmd, 0, sizeof(struct mmc_command));
668	memset(&data, 0, sizeof(struct mmc_data));
669	memset(&stop, 0, sizeof(struct mmc_command));
670
671	mrq.cmd = &cmd;
672	mrq.data = &data;
673	mrq.stop = &stop;
674
675	sg_init_one(&sg, test->buffer, blocks * blksz);
676
677	mmc_test_prepare_mrq(test, &mrq, &sg, 1, 0, blocks, blksz, write);
678	mmc_test_prepare_broken_mrq(test, &mrq, write);
679
680	mmc_wait_for_req(test->card->host, &mrq);
681
682	mmc_test_wait_busy(test);
683
684	return mmc_test_check_broken_result(test, &mrq);
685}
686
687/*
688 * Does a complete transfer test where data is also validated
689 *
690 * Note: mmc_test_prepare() must have been done before this call
691 */
692static int mmc_test_transfer(struct mmc_test_card *test,
693	struct scatterlist *sg, unsigned sg_len, unsigned dev_addr,
694	unsigned blocks, unsigned blksz, int write)
695{
696	int ret, i;
697	unsigned long flags;
698
699	if (write) {
700		for (i = 0;i < blocks * blksz;i++)
701			test->scratch[i] = i;
702	} else {
703		memset(test->scratch, 0, BUFFER_SIZE);
704	}
705	local_irq_save(flags);
706	sg_copy_from_buffer(sg, sg_len, test->scratch, BUFFER_SIZE);
707	local_irq_restore(flags);
708
709	ret = mmc_test_set_blksize(test, blksz);
710	if (ret)
711		return ret;
712
713	ret = mmc_test_simple_transfer(test, sg, sg_len, dev_addr,
714		blocks, blksz, write);
715	if (ret)
716		return ret;
717
718	if (write) {
719		int sectors;
720
721		ret = mmc_test_set_blksize(test, 512);
722		if (ret)
723			return ret;
724
725		sectors = (blocks * blksz + 511) / 512;
726		if ((sectors * 512) == (blocks * blksz))
727			sectors++;
728
729		if ((sectors * 512) > BUFFER_SIZE)
730			return -EINVAL;
731
732		memset(test->buffer, 0, sectors * 512);
733
734		for (i = 0;i < sectors;i++) {
735			ret = mmc_test_buffer_transfer(test,
736				test->buffer + i * 512,
737				dev_addr + i, 512, 0);
738			if (ret)
739				return ret;
740		}
741
742		for (i = 0;i < blocks * blksz;i++) {
743			if (test->buffer[i] != (u8)i)
744				return RESULT_FAIL;
745		}
746
747		for (;i < sectors * 512;i++) {
748			if (test->buffer[i] != 0xDF)
749				return RESULT_FAIL;
750		}
751	} else {
752		local_irq_save(flags);
753		sg_copy_to_buffer(sg, sg_len, test->scratch, BUFFER_SIZE);
754		local_irq_restore(flags);
755		for (i = 0;i < blocks * blksz;i++) {
756			if (test->scratch[i] != (u8)i)
757				return RESULT_FAIL;
758		}
759	}
760
761	return 0;
762}
763
764/*******************************************************************/
765/*  Tests                                                          */
766/*******************************************************************/
767
768struct mmc_test_case {
769	const char *name;
770
771	int (*prepare)(struct mmc_test_card *);
772	int (*run)(struct mmc_test_card *);
773	int (*cleanup)(struct mmc_test_card *);
774};
775
776static int mmc_test_basic_write(struct mmc_test_card *test)
777{
778	int ret;
779	struct scatterlist sg;
780
781	ret = mmc_test_set_blksize(test, 512);
782	if (ret)
783		return ret;
784
785	sg_init_one(&sg, test->buffer, 512);
786
787	ret = mmc_test_simple_transfer(test, &sg, 1, 0, 1, 512, 1);
788	if (ret)
789		return ret;
790
791	return 0;
792}
793
794static int mmc_test_basic_read(struct mmc_test_card *test)
795{
796	int ret;
797	struct scatterlist sg;
798
799	ret = mmc_test_set_blksize(test, 512);
800	if (ret)
801		return ret;
802
803	sg_init_one(&sg, test->buffer, 512);
804
805	ret = mmc_test_simple_transfer(test, &sg, 1, 0, 1, 512, 0);
806	if (ret)
807		return ret;
808
809	return 0;
810}
811
812static int mmc_test_verify_write(struct mmc_test_card *test)
813{
814	int ret;
815	struct scatterlist sg;
816
817	sg_init_one(&sg, test->buffer, 512);
818
819	ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1);
820	if (ret)
821		return ret;
822
823	return 0;
824}
825
826static int mmc_test_verify_read(struct mmc_test_card *test)
827{
828	int ret;
829	struct scatterlist sg;
830
831	sg_init_one(&sg, test->buffer, 512);
832
833	ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0);
834	if (ret)
835		return ret;
836
837	return 0;
838}
839
840static int mmc_test_multi_write(struct mmc_test_card *test)
841{
842	int ret;
843	unsigned int size;
844	struct scatterlist sg;
845
846	if (test->card->host->max_blk_count == 1)
847		return RESULT_UNSUP_HOST;
848
849	size = PAGE_SIZE * 2;
850	size = min(size, test->card->host->max_req_size);
851	size = min(size, test->card->host->max_seg_size);
852	size = min(size, test->card->host->max_blk_count * 512);
853
854	if (size < 1024)
855		return RESULT_UNSUP_HOST;
856
857	sg_init_one(&sg, test->buffer, size);
858
859	ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1);
860	if (ret)
861		return ret;
862
863	return 0;
864}
865
866static int mmc_test_multi_read(struct mmc_test_card *test)
867{
868	int ret;
869	unsigned int size;
870	struct scatterlist sg;
871
872	if (test->card->host->max_blk_count == 1)
873		return RESULT_UNSUP_HOST;
874
875	size = PAGE_SIZE * 2;
876	size = min(size, test->card->host->max_req_size);
877	size = min(size, test->card->host->max_seg_size);
878	size = min(size, test->card->host->max_blk_count * 512);
879
880	if (size < 1024)
881		return RESULT_UNSUP_HOST;
882
883	sg_init_one(&sg, test->buffer, size);
884
885	ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0);
886	if (ret)
887		return ret;
888
889	return 0;
890}
891
892static int mmc_test_pow2_write(struct mmc_test_card *test)
893{
894	int ret, i;
895	struct scatterlist sg;
896
897	if (!test->card->csd.write_partial)
898		return RESULT_UNSUP_CARD;
899
900	for (i = 1; i < 512;i <<= 1) {
901		sg_init_one(&sg, test->buffer, i);
902		ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 1);
903		if (ret)
904			return ret;
905	}
906
907	return 0;
908}
909
910static int mmc_test_pow2_read(struct mmc_test_card *test)
911{
912	int ret, i;
913	struct scatterlist sg;
914
915	if (!test->card->csd.read_partial)
916		return RESULT_UNSUP_CARD;
917
918	for (i = 1; i < 512;i <<= 1) {
919		sg_init_one(&sg, test->buffer, i);
920		ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 0);
921		if (ret)
922			return ret;
923	}
924
925	return 0;
926}
927
928static int mmc_test_weird_write(struct mmc_test_card *test)
929{
930	int ret, i;
931	struct scatterlist sg;
932
933	if (!test->card->csd.write_partial)
934		return RESULT_UNSUP_CARD;
935
936	for (i = 3; i < 512;i += 7) {
937		sg_init_one(&sg, test->buffer, i);
938		ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 1);
939		if (ret)
940			return ret;
941	}
942
943	return 0;
944}
945
946static int mmc_test_weird_read(struct mmc_test_card *test)
947{
948	int ret, i;
949	struct scatterlist sg;
950
951	if (!test->card->csd.read_partial)
952		return RESULT_UNSUP_CARD;
953
954	for (i = 3; i < 512;i += 7) {
955		sg_init_one(&sg, test->buffer, i);
956		ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 0);
957		if (ret)
958			return ret;
959	}
960
961	return 0;
962}
963
964static int mmc_test_align_write(struct mmc_test_card *test)
965{
966	int ret, i;
967	struct scatterlist sg;
968
969	for (i = 1;i < 4;i++) {
970		sg_init_one(&sg, test->buffer + i, 512);
971		ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1);
972		if (ret)
973			return ret;
974	}
975
976	return 0;
977}
978
979static int mmc_test_align_read(struct mmc_test_card *test)
980{
981	int ret, i;
982	struct scatterlist sg;
983
984	for (i = 1;i < 4;i++) {
985		sg_init_one(&sg, test->buffer + i, 512);
986		ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0);
987		if (ret)
988			return ret;
989	}
990
991	return 0;
992}
993
994static int mmc_test_align_multi_write(struct mmc_test_card *test)
995{
996	int ret, i;
997	unsigned int size;
998	struct scatterlist sg;
999
1000	if (test->card->host->max_blk_count == 1)
1001		return RESULT_UNSUP_HOST;
1002
1003	size = PAGE_SIZE * 2;
1004	size = min(size, test->card->host->max_req_size);
1005	size = min(size, test->card->host->max_seg_size);
1006	size = min(size, test->card->host->max_blk_count * 512);
1007
1008	if (size < 1024)
1009		return RESULT_UNSUP_HOST;
1010
1011	for (i = 1;i < 4;i++) {
1012		sg_init_one(&sg, test->buffer + i, size);
1013		ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1);
1014		if (ret)
1015			return ret;
1016	}
1017
1018	return 0;
1019}
1020
1021static int mmc_test_align_multi_read(struct mmc_test_card *test)
1022{
1023	int ret, i;
1024	unsigned int size;
1025	struct scatterlist sg;
1026
1027	if (test->card->host->max_blk_count == 1)
1028		return RESULT_UNSUP_HOST;
1029
1030	size = PAGE_SIZE * 2;
1031	size = min(size, test->card->host->max_req_size);
1032	size = min(size, test->card->host->max_seg_size);
1033	size = min(size, test->card->host->max_blk_count * 512);
1034
1035	if (size < 1024)
1036		return RESULT_UNSUP_HOST;
1037
1038	for (i = 1;i < 4;i++) {
1039		sg_init_one(&sg, test->buffer + i, size);
1040		ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0);
1041		if (ret)
1042			return ret;
1043	}
1044
1045	return 0;
1046}
1047
1048static int mmc_test_xfersize_write(struct mmc_test_card *test)
1049{
1050	int ret;
1051
1052	ret = mmc_test_set_blksize(test, 512);
1053	if (ret)
1054		return ret;
1055
1056	ret = mmc_test_broken_transfer(test, 1, 512, 1);
1057	if (ret)
1058		return ret;
1059
1060	return 0;
1061}
1062
1063static int mmc_test_xfersize_read(struct mmc_test_card *test)
1064{
1065	int ret;
1066
1067	ret = mmc_test_set_blksize(test, 512);
1068	if (ret)
1069		return ret;
1070
1071	ret = mmc_test_broken_transfer(test, 1, 512, 0);
1072	if (ret)
1073		return ret;
1074
1075	return 0;
1076}
1077
1078static int mmc_test_multi_xfersize_write(struct mmc_test_card *test)
1079{
1080	int ret;
1081
1082	if (test->card->host->max_blk_count == 1)
1083		return RESULT_UNSUP_HOST;
1084
1085	ret = mmc_test_set_blksize(test, 512);
1086	if (ret)
1087		return ret;
1088
1089	ret = mmc_test_broken_transfer(test, 2, 512, 1);
1090	if (ret)
1091		return ret;
1092
1093	return 0;
1094}
1095
1096static int mmc_test_multi_xfersize_read(struct mmc_test_card *test)
1097{
1098	int ret;
1099
1100	if (test->card->host->max_blk_count == 1)
1101		return RESULT_UNSUP_HOST;
1102
1103	ret = mmc_test_set_blksize(test, 512);
1104	if (ret)
1105		return ret;
1106
1107	ret = mmc_test_broken_transfer(test, 2, 512, 0);
1108	if (ret)
1109		return ret;
1110
1111	return 0;
1112}
1113
1114#ifdef CONFIG_HIGHMEM
1115
1116static int mmc_test_write_high(struct mmc_test_card *test)
1117{
1118	int ret;
1119	struct scatterlist sg;
1120
1121	sg_init_table(&sg, 1);
1122	sg_set_page(&sg, test->highmem, 512, 0);
1123
1124	ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1);
1125	if (ret)
1126		return ret;
1127
1128	return 0;
1129}
1130
1131static int mmc_test_read_high(struct mmc_test_card *test)
1132{
1133	int ret;
1134	struct scatterlist sg;
1135
1136	sg_init_table(&sg, 1);
1137	sg_set_page(&sg, test->highmem, 512, 0);
1138
1139	ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0);
1140	if (ret)
1141		return ret;
1142
1143	return 0;
1144}
1145
1146static int mmc_test_multi_write_high(struct mmc_test_card *test)
1147{
1148	int ret;
1149	unsigned int size;
1150	struct scatterlist sg;
1151
1152	if (test->card->host->max_blk_count == 1)
1153		return RESULT_UNSUP_HOST;
1154
1155	size = PAGE_SIZE * 2;
1156	size = min(size, test->card->host->max_req_size);
1157	size = min(size, test->card->host->max_seg_size);
1158	size = min(size, test->card->host->max_blk_count * 512);
1159
1160	if (size < 1024)
1161		return RESULT_UNSUP_HOST;
1162
1163	sg_init_table(&sg, 1);
1164	sg_set_page(&sg, test->highmem, size, 0);
1165
1166	ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1);
1167	if (ret)
1168		return ret;
1169
1170	return 0;
1171}
1172
1173static int mmc_test_multi_read_high(struct mmc_test_card *test)
1174{
1175	int ret;
1176	unsigned int size;
1177	struct scatterlist sg;
1178
1179	if (test->card->host->max_blk_count == 1)
1180		return RESULT_UNSUP_HOST;
1181
1182	size = PAGE_SIZE * 2;
1183	size = min(size, test->card->host->max_req_size);
1184	size = min(size, test->card->host->max_seg_size);
1185	size = min(size, test->card->host->max_blk_count * 512);
1186
1187	if (size < 1024)
1188		return RESULT_UNSUP_HOST;
1189
1190	sg_init_table(&sg, 1);
1191	sg_set_page(&sg, test->highmem, size, 0);
1192
1193	ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0);
1194	if (ret)
1195		return ret;
1196
1197	return 0;
1198}
1199
1200#else
1201
1202static int mmc_test_no_highmem(struct mmc_test_card *test)
1203{
1204	printk(KERN_INFO "%s: Highmem not configured - test skipped\n",
1205	       mmc_hostname(test->card->host));
1206	return 0;
1207}
1208
1209#endif /* CONFIG_HIGHMEM */
1210
1211/*
1212 * Map sz bytes so that it can be transferred.
1213 */
1214static int mmc_test_area_map(struct mmc_test_card *test, unsigned long sz,
1215			     int max_scatter)
1216{
1217	struct mmc_test_area *t = &test->area;
1218
1219	t->blocks = sz >> 9;
1220
1221	if (max_scatter) {
1222		return mmc_test_map_sg_max_scatter(t->mem, sz, t->sg,
1223						   t->max_segs, &t->sg_len);
1224	} else {
1225		return mmc_test_map_sg(t->mem, sz, t->sg, 1, t->max_segs,
1226				       &t->sg_len);
1227	}
1228}
1229
1230/*
1231 * Transfer bytes mapped by mmc_test_area_map().
1232 */
1233static int mmc_test_area_transfer(struct mmc_test_card *test,
1234				  unsigned int dev_addr, int write)
1235{
1236	struct mmc_test_area *t = &test->area;
1237
1238	return mmc_test_simple_transfer(test, t->sg, t->sg_len, dev_addr,
1239					t->blocks, 512, write);
1240}
1241
1242/*
1243 * Map and transfer bytes.
1244 */
1245static int mmc_test_area_io(struct mmc_test_card *test, unsigned long sz,
1246			    unsigned int dev_addr, int write, int max_scatter,
1247			    int timed)
1248{
1249	struct timespec ts1, ts2;
1250	int ret;
1251
1252	ret = mmc_test_area_map(test, sz, max_scatter);
1253	if (ret)
1254		return ret;
1255
1256	if (timed)
1257		getnstimeofday(&ts1);
1258
1259	ret = mmc_test_area_transfer(test, dev_addr, write);
1260	if (ret)
1261		return ret;
1262
1263	if (timed)
1264		getnstimeofday(&ts2);
1265
1266	if (timed)
1267		mmc_test_print_rate(test, sz, &ts1, &ts2);
1268
1269	return 0;
1270}
1271
1272/*
1273 * Write the test area entirely.
1274 */
1275static int mmc_test_area_fill(struct mmc_test_card *test)
1276{
1277	return mmc_test_area_io(test, test->area.max_sz, test->area.dev_addr,
1278				1, 0, 0);
1279}
1280
1281/*
1282 * Erase the test area entirely.
1283 */
1284static int mmc_test_area_erase(struct mmc_test_card *test)
1285{
1286	struct mmc_test_area *t = &test->area;
1287
1288	if (!mmc_can_erase(test->card))
1289		return 0;
1290
1291	return mmc_erase(test->card, t->dev_addr, test->area.max_sz >> 9,
1292			 MMC_ERASE_ARG);
1293}
1294
1295/*
1296 * Cleanup struct mmc_test_area.
1297 */
1298static int mmc_test_area_cleanup(struct mmc_test_card *test)
1299{
1300	struct mmc_test_area *t = &test->area;
1301
1302	kfree(t->sg);
1303	mmc_test_free_mem(t->mem);
1304
1305	return 0;
1306}
1307
1308/*
1309 * Initialize an area for testing large transfers.  The size of the area is the
1310 * preferred erase size which is a good size for optimal transfer speed.  Note
1311 * that is typically 4MiB for modern cards.  The test area is set to the middle
1312 * of the card because cards may have different charateristics at the front
1313 * (for FAT file system optimization).  Optionally, the area is erased (if the
1314 * card supports it) which may improve write performance.  Optionally, the area
1315 * is filled with data for subsequent read tests.
1316 */
1317static int mmc_test_area_init(struct mmc_test_card *test, int erase, int fill)
1318{
1319	struct mmc_test_area *t = &test->area;
1320	unsigned long min_sz = 64 * 1024;
1321	int ret;
1322
1323	ret = mmc_test_set_blksize(test, 512);
1324	if (ret)
1325		return ret;
1326
1327	if (test->card->pref_erase > TEST_AREA_MAX_SIZE >> 9)
1328		t->max_sz = TEST_AREA_MAX_SIZE;
1329	else
1330		t->max_sz = (unsigned long)test->card->pref_erase << 9;
1331	/*
1332	 * Try to allocate enough memory for the whole area.  Less is OK
1333	 * because the same memory can be mapped into the scatterlist more than
1334	 * once.
1335	 */
1336	t->mem = mmc_test_alloc_mem(min_sz, t->max_sz);
1337	if (!t->mem)
1338		return -ENOMEM;
1339
1340	t->max_segs = DIV_ROUND_UP(t->max_sz, PAGE_SIZE);
1341	t->sg = kmalloc(sizeof(struct scatterlist) * t->max_segs, GFP_KERNEL);
1342	if (!t->sg) {
1343		ret = -ENOMEM;
1344		goto out_free;
1345	}
1346
1347	t->dev_addr = mmc_test_capacity(test->card) / 2;
1348	t->dev_addr -= t->dev_addr % (t->max_sz >> 9);
1349
1350	if (erase) {
1351		ret = mmc_test_area_erase(test);
1352		if (ret)
1353			goto out_free;
1354	}
1355
1356	if (fill) {
1357		ret = mmc_test_area_fill(test);
1358		if (ret)
1359			goto out_free;
1360	}
1361
1362	return 0;
1363
1364out_free:
1365	mmc_test_area_cleanup(test);
1366	return ret;
1367}
1368
1369/*
1370 * Prepare for large transfers.  Do not erase the test area.
1371 */
1372static int mmc_test_area_prepare(struct mmc_test_card *test)
1373{
1374	return mmc_test_area_init(test, 0, 0);
1375}
1376
1377/*
1378 * Prepare for large transfers.  Do erase the test area.
1379 */
1380static int mmc_test_area_prepare_erase(struct mmc_test_card *test)
1381{
1382	return mmc_test_area_init(test, 1, 0);
1383}
1384
1385/*
1386 * Prepare for large transfers.  Erase and fill the test area.
1387 */
1388static int mmc_test_area_prepare_fill(struct mmc_test_card *test)
1389{
1390	return mmc_test_area_init(test, 1, 1);
1391}
1392
1393/*
1394 * Test best-case performance.  Best-case performance is expected from
1395 * a single large transfer.
1396 *
1397 * An additional option (max_scatter) allows the measurement of the same
1398 * transfer but with no contiguous pages in the scatter list.  This tests
1399 * the efficiency of DMA to handle scattered pages.
1400 */
1401static int mmc_test_best_performance(struct mmc_test_card *test, int write,
1402				     int max_scatter)
1403{
1404	return mmc_test_area_io(test, test->area.max_sz, test->area.dev_addr,
1405				write, max_scatter, 1);
1406}
1407
1408/*
1409 * Best-case read performance.
1410 */
1411static int mmc_test_best_read_performance(struct mmc_test_card *test)
1412{
1413	return mmc_test_best_performance(test, 0, 0);
1414}
1415
1416/*
1417 * Best-case write performance.
1418 */
1419static int mmc_test_best_write_performance(struct mmc_test_card *test)
1420{
1421	return mmc_test_best_performance(test, 1, 0);
1422}
1423
1424/*
1425 * Best-case read performance into scattered pages.
1426 */
1427static int mmc_test_best_read_perf_max_scatter(struct mmc_test_card *test)
1428{
1429	return mmc_test_best_performance(test, 0, 1);
1430}
1431
1432/*
1433 * Best-case write performance from scattered pages.
1434 */
1435static int mmc_test_best_write_perf_max_scatter(struct mmc_test_card *test)
1436{
1437	return mmc_test_best_performance(test, 1, 1);
1438}
1439
1440/*
1441 * Single read performance by transfer size.
1442 */
1443static int mmc_test_profile_read_perf(struct mmc_test_card *test)
1444{
1445	unsigned long sz;
1446	unsigned int dev_addr;
1447	int ret;
1448
1449	for (sz = 512; sz < test->area.max_sz; sz <<= 1) {
1450		dev_addr = test->area.dev_addr + (sz >> 9);
1451		ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 1);
1452		if (ret)
1453			return ret;
1454	}
1455	dev_addr = test->area.dev_addr;
1456	return mmc_test_area_io(test, sz, dev_addr, 0, 0, 1);
1457}
1458
1459/*
1460 * Single write performance by transfer size.
1461 */
1462static int mmc_test_profile_write_perf(struct mmc_test_card *test)
1463{
1464	unsigned long sz;
1465	unsigned int dev_addr;
1466	int ret;
1467
1468	ret = mmc_test_area_erase(test);
1469	if (ret)
1470		return ret;
1471	for (sz = 512; sz < test->area.max_sz; sz <<= 1) {
1472		dev_addr = test->area.dev_addr + (sz >> 9);
1473		ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 1);
1474		if (ret)
1475			return ret;
1476	}
1477	ret = mmc_test_area_erase(test);
1478	if (ret)
1479		return ret;
1480	dev_addr = test->area.dev_addr;
1481	return mmc_test_area_io(test, sz, dev_addr, 1, 0, 1);
1482}
1483
1484/*
1485 * Single trim performance by transfer size.
1486 */
1487static int mmc_test_profile_trim_perf(struct mmc_test_card *test)
1488{
1489	unsigned long sz;
1490	unsigned int dev_addr;
1491	struct timespec ts1, ts2;
1492	int ret;
1493
1494	if (!mmc_can_trim(test->card))
1495		return RESULT_UNSUP_CARD;
1496
1497	if (!mmc_can_erase(test->card))
1498		return RESULT_UNSUP_HOST;
1499
1500	for (sz = 512; sz < test->area.max_sz; sz <<= 1) {
1501		dev_addr = test->area.dev_addr + (sz >> 9);
1502		getnstimeofday(&ts1);
1503		ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG);
1504		if (ret)
1505			return ret;
1506		getnstimeofday(&ts2);
1507		mmc_test_print_rate(test, sz, &ts1, &ts2);
1508	}
1509	dev_addr = test->area.dev_addr;
1510	getnstimeofday(&ts1);
1511	ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG);
1512	if (ret)
1513		return ret;
1514	getnstimeofday(&ts2);
1515	mmc_test_print_rate(test, sz, &ts1, &ts2);
1516	return 0;
1517}
1518
1519/*
1520 * Consecutive read performance by transfer size.
1521 */
1522static int mmc_test_profile_seq_read_perf(struct mmc_test_card *test)
1523{
1524	unsigned long sz;
1525	unsigned int dev_addr, i, cnt;
1526	struct timespec ts1, ts2;
1527	int ret;
1528
1529	for (sz = 512; sz <= test->area.max_sz; sz <<= 1) {
1530		cnt = test->area.max_sz / sz;
1531		dev_addr = test->area.dev_addr;
1532		getnstimeofday(&ts1);
1533		for (i = 0; i < cnt; i++) {
1534			ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 0);
1535			if (ret)
1536				return ret;
1537			dev_addr += (sz >> 9);
1538		}
1539		getnstimeofday(&ts2);
1540		mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1541	}
1542	return 0;
1543}
1544
1545/*
1546 * Consecutive write performance by transfer size.
1547 */
1548static int mmc_test_profile_seq_write_perf(struct mmc_test_card *test)
1549{
1550	unsigned long sz;
1551	unsigned int dev_addr, i, cnt;
1552	struct timespec ts1, ts2;
1553	int ret;
1554
1555	for (sz = 512; sz <= test->area.max_sz; sz <<= 1) {
1556		ret = mmc_test_area_erase(test);
1557		if (ret)
1558			return ret;
1559		cnt = test->area.max_sz / sz;
1560		dev_addr = test->area.dev_addr;
1561		getnstimeofday(&ts1);
1562		for (i = 0; i < cnt; i++) {
1563			ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 0);
1564			if (ret)
1565				return ret;
1566			dev_addr += (sz >> 9);
1567		}
1568		getnstimeofday(&ts2);
1569		mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1570	}
1571	return 0;
1572}
1573
1574/*
1575 * Consecutive trim performance by transfer size.
1576 */
1577static int mmc_test_profile_seq_trim_perf(struct mmc_test_card *test)
1578{
1579	unsigned long sz;
1580	unsigned int dev_addr, i, cnt;
1581	struct timespec ts1, ts2;
1582	int ret;
1583
1584	if (!mmc_can_trim(test->card))
1585		return RESULT_UNSUP_CARD;
1586
1587	if (!mmc_can_erase(test->card))
1588		return RESULT_UNSUP_HOST;
1589
1590	for (sz = 512; sz <= test->area.max_sz; sz <<= 1) {
1591		ret = mmc_test_area_erase(test);
1592		if (ret)
1593			return ret;
1594		ret = mmc_test_area_fill(test);
1595		if (ret)
1596			return ret;
1597		cnt = test->area.max_sz / sz;
1598		dev_addr = test->area.dev_addr;
1599		getnstimeofday(&ts1);
1600		for (i = 0; i < cnt; i++) {
1601			ret = mmc_erase(test->card, dev_addr, sz >> 9,
1602					MMC_TRIM_ARG);
1603			if (ret)
1604				return ret;
1605			dev_addr += (sz >> 9);
1606		}
1607		getnstimeofday(&ts2);
1608		mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1609	}
1610	return 0;
1611}
1612
1613static const struct mmc_test_case mmc_test_cases[] = {
1614	{
1615		.name = "Basic write (no data verification)",
1616		.run = mmc_test_basic_write,
1617	},
1618
1619	{
1620		.name = "Basic read (no data verification)",
1621		.run = mmc_test_basic_read,
1622	},
1623
1624	{
1625		.name = "Basic write (with data verification)",
1626		.prepare = mmc_test_prepare_write,
1627		.run = mmc_test_verify_write,
1628		.cleanup = mmc_test_cleanup,
1629	},
1630
1631	{
1632		.name = "Basic read (with data verification)",
1633		.prepare = mmc_test_prepare_read,
1634		.run = mmc_test_verify_read,
1635		.cleanup = mmc_test_cleanup,
1636	},
1637
1638	{
1639		.name = "Multi-block write",
1640		.prepare = mmc_test_prepare_write,
1641		.run = mmc_test_multi_write,
1642		.cleanup = mmc_test_cleanup,
1643	},
1644
1645	{
1646		.name = "Multi-block read",
1647		.prepare = mmc_test_prepare_read,
1648		.run = mmc_test_multi_read,
1649		.cleanup = mmc_test_cleanup,
1650	},
1651
1652	{
1653		.name = "Power of two block writes",
1654		.prepare = mmc_test_prepare_write,
1655		.run = mmc_test_pow2_write,
1656		.cleanup = mmc_test_cleanup,
1657	},
1658
1659	{
1660		.name = "Power of two block reads",
1661		.prepare = mmc_test_prepare_read,
1662		.run = mmc_test_pow2_read,
1663		.cleanup = mmc_test_cleanup,
1664	},
1665
1666	{
1667		.name = "Weird sized block writes",
1668		.prepare = mmc_test_prepare_write,
1669		.run = mmc_test_weird_write,
1670		.cleanup = mmc_test_cleanup,
1671	},
1672
1673	{
1674		.name = "Weird sized block reads",
1675		.prepare = mmc_test_prepare_read,
1676		.run = mmc_test_weird_read,
1677		.cleanup = mmc_test_cleanup,
1678	},
1679
1680	{
1681		.name = "Badly aligned write",
1682		.prepare = mmc_test_prepare_write,
1683		.run = mmc_test_align_write,
1684		.cleanup = mmc_test_cleanup,
1685	},
1686
1687	{
1688		.name = "Badly aligned read",
1689		.prepare = mmc_test_prepare_read,
1690		.run = mmc_test_align_read,
1691		.cleanup = mmc_test_cleanup,
1692	},
1693
1694	{
1695		.name = "Badly aligned multi-block write",
1696		.prepare = mmc_test_prepare_write,
1697		.run = mmc_test_align_multi_write,
1698		.cleanup = mmc_test_cleanup,
1699	},
1700
1701	{
1702		.name = "Badly aligned multi-block read",
1703		.prepare = mmc_test_prepare_read,
1704		.run = mmc_test_align_multi_read,
1705		.cleanup = mmc_test_cleanup,
1706	},
1707
1708	{
1709		.name = "Correct xfer_size at write (start failure)",
1710		.run = mmc_test_xfersize_write,
1711	},
1712
1713	{
1714		.name = "Correct xfer_size at read (start failure)",
1715		.run = mmc_test_xfersize_read,
1716	},
1717
1718	{
1719		.name = "Correct xfer_size at write (midway failure)",
1720		.run = mmc_test_multi_xfersize_write,
1721	},
1722
1723	{
1724		.name = "Correct xfer_size at read (midway failure)",
1725		.run = mmc_test_multi_xfersize_read,
1726	},
1727
1728#ifdef CONFIG_HIGHMEM
1729
1730	{
1731		.name = "Highmem write",
1732		.prepare = mmc_test_prepare_write,
1733		.run = mmc_test_write_high,
1734		.cleanup = mmc_test_cleanup,
1735	},
1736
1737	{
1738		.name = "Highmem read",
1739		.prepare = mmc_test_prepare_read,
1740		.run = mmc_test_read_high,
1741		.cleanup = mmc_test_cleanup,
1742	},
1743
1744	{
1745		.name = "Multi-block highmem write",
1746		.prepare = mmc_test_prepare_write,
1747		.run = mmc_test_multi_write_high,
1748		.cleanup = mmc_test_cleanup,
1749	},
1750
1751	{
1752		.name = "Multi-block highmem read",
1753		.prepare = mmc_test_prepare_read,
1754		.run = mmc_test_multi_read_high,
1755		.cleanup = mmc_test_cleanup,
1756	},
1757
1758#else
1759
1760	{
1761		.name = "Highmem write",
1762		.run = mmc_test_no_highmem,
1763	},
1764
1765	{
1766		.name = "Highmem read",
1767		.run = mmc_test_no_highmem,
1768	},
1769
1770	{
1771		.name = "Multi-block highmem write",
1772		.run = mmc_test_no_highmem,
1773	},
1774
1775	{
1776		.name = "Multi-block highmem read",
1777		.run = mmc_test_no_highmem,
1778	},
1779
1780#endif /* CONFIG_HIGHMEM */
1781
1782	{
1783		.name = "Best-case read performance",
1784		.prepare = mmc_test_area_prepare_fill,
1785		.run = mmc_test_best_read_performance,
1786		.cleanup = mmc_test_area_cleanup,
1787	},
1788
1789	{
1790		.name = "Best-case write performance",
1791		.prepare = mmc_test_area_prepare_erase,
1792		.run = mmc_test_best_write_performance,
1793		.cleanup = mmc_test_area_cleanup,
1794	},
1795
1796	{
1797		.name = "Best-case read performance into scattered pages",
1798		.prepare = mmc_test_area_prepare_fill,
1799		.run = mmc_test_best_read_perf_max_scatter,
1800		.cleanup = mmc_test_area_cleanup,
1801	},
1802
1803	{
1804		.name = "Best-case write performance from scattered pages",
1805		.prepare = mmc_test_area_prepare_erase,
1806		.run = mmc_test_best_write_perf_max_scatter,
1807		.cleanup = mmc_test_area_cleanup,
1808	},
1809
1810	{
1811		.name = "Single read performance by transfer size",
1812		.prepare = mmc_test_area_prepare_fill,
1813		.run = mmc_test_profile_read_perf,
1814		.cleanup = mmc_test_area_cleanup,
1815	},
1816
1817	{
1818		.name = "Single write performance by transfer size",
1819		.prepare = mmc_test_area_prepare,
1820		.run = mmc_test_profile_write_perf,
1821		.cleanup = mmc_test_area_cleanup,
1822	},
1823
1824	{
1825		.name = "Single trim performance by transfer size",
1826		.prepare = mmc_test_area_prepare_fill,
1827		.run = mmc_test_profile_trim_perf,
1828		.cleanup = mmc_test_area_cleanup,
1829	},
1830
1831	{
1832		.name = "Consecutive read performance by transfer size",
1833		.prepare = mmc_test_area_prepare_fill,
1834		.run = mmc_test_profile_seq_read_perf,
1835		.cleanup = mmc_test_area_cleanup,
1836	},
1837
1838	{
1839		.name = "Consecutive write performance by transfer size",
1840		.prepare = mmc_test_area_prepare,
1841		.run = mmc_test_profile_seq_write_perf,
1842		.cleanup = mmc_test_area_cleanup,
1843	},
1844
1845	{
1846		.name = "Consecutive trim performance by transfer size",
1847		.prepare = mmc_test_area_prepare,
1848		.run = mmc_test_profile_seq_trim_perf,
1849		.cleanup = mmc_test_area_cleanup,
1850	},
1851
1852};
1853
1854static DEFINE_MUTEX(mmc_test_lock);
1855
1856static void mmc_test_run(struct mmc_test_card *test, int testcase)
1857{
1858	int i, ret;
1859
1860	printk(KERN_INFO "%s: Starting tests of card %s...\n",
1861		mmc_hostname(test->card->host), mmc_card_id(test->card));
1862
1863	mmc_claim_host(test->card->host);
1864
1865	for (i = 0;i < ARRAY_SIZE(mmc_test_cases);i++) {
1866		if (testcase && ((i + 1) != testcase))
1867			continue;
1868
1869		printk(KERN_INFO "%s: Test case %d. %s...\n",
1870			mmc_hostname(test->card->host), i + 1,
1871			mmc_test_cases[i].name);
1872
1873		if (mmc_test_cases[i].prepare) {
1874			ret = mmc_test_cases[i].prepare(test);
1875			if (ret) {
1876				printk(KERN_INFO "%s: Result: Prepare "
1877					"stage failed! (%d)\n",
1878					mmc_hostname(test->card->host),
1879					ret);
1880				continue;
1881			}
1882		}
1883
1884		ret = mmc_test_cases[i].run(test);
1885		switch (ret) {
1886		case RESULT_OK:
1887			printk(KERN_INFO "%s: Result: OK\n",
1888				mmc_hostname(test->card->host));
1889			break;
1890		case RESULT_FAIL:
1891			printk(KERN_INFO "%s: Result: FAILED\n",
1892				mmc_hostname(test->card->host));
1893			break;
1894		case RESULT_UNSUP_HOST:
1895			printk(KERN_INFO "%s: Result: UNSUPPORTED "
1896				"(by host)\n",
1897				mmc_hostname(test->card->host));
1898			break;
1899		case RESULT_UNSUP_CARD:
1900			printk(KERN_INFO "%s: Result: UNSUPPORTED "
1901				"(by card)\n",
1902				mmc_hostname(test->card->host));
1903			break;
1904		default:
1905			printk(KERN_INFO "%s: Result: ERROR (%d)\n",
1906				mmc_hostname(test->card->host), ret);
1907		}
1908
1909		if (mmc_test_cases[i].cleanup) {
1910			ret = mmc_test_cases[i].cleanup(test);
1911			if (ret) {
1912				printk(KERN_INFO "%s: Warning: Cleanup "
1913					"stage failed! (%d)\n",
1914					mmc_hostname(test->card->host),
1915					ret);
1916			}
1917		}
1918	}
1919
1920	mmc_release_host(test->card->host);
1921
1922	printk(KERN_INFO "%s: Tests completed.\n",
1923		mmc_hostname(test->card->host));
1924}
1925
1926static ssize_t mmc_test_show(struct device *dev,
1927	struct device_attribute *attr, char *buf)
1928{
1929	mutex_lock(&mmc_test_lock);
1930	mutex_unlock(&mmc_test_lock);
1931
1932	return 0;
1933}
1934
1935static ssize_t mmc_test_store(struct device *dev,
1936	struct device_attribute *attr, const char *buf, size_t count)
1937{
1938	struct mmc_card *card;
1939	struct mmc_test_card *test;
1940	int testcase;
1941
1942	card = container_of(dev, struct mmc_card, dev);
1943
1944	testcase = simple_strtol(buf, NULL, 10);
1945
1946	test = kzalloc(sizeof(struct mmc_test_card), GFP_KERNEL);
1947	if (!test)
1948		return -ENOMEM;
1949
1950	test->card = card;
1951
1952	test->buffer = kzalloc(BUFFER_SIZE, GFP_KERNEL);
1953#ifdef CONFIG_HIGHMEM
1954	test->highmem = alloc_pages(GFP_KERNEL | __GFP_HIGHMEM, BUFFER_ORDER);
1955#endif
1956
1957#ifdef CONFIG_HIGHMEM
1958	if (test->buffer && test->highmem) {
1959#else
1960	if (test->buffer) {
1961#endif
1962		mutex_lock(&mmc_test_lock);
1963		mmc_test_run(test, testcase);
1964		mutex_unlock(&mmc_test_lock);
1965	}
1966
1967#ifdef CONFIG_HIGHMEM
1968	__free_pages(test->highmem, BUFFER_ORDER);
1969#endif
1970	kfree(test->buffer);
1971	kfree(test);
1972
1973	return count;
1974}
1975
1976static DEVICE_ATTR(test, S_IWUSR | S_IRUGO, mmc_test_show, mmc_test_store);
1977
1978static int mmc_test_probe(struct mmc_card *card)
1979{
1980	int ret;
1981
1982	if ((card->type != MMC_TYPE_MMC) && (card->type != MMC_TYPE_SD))
1983		return -ENODEV;
1984
1985	ret = device_create_file(&card->dev, &dev_attr_test);
1986	if (ret)
1987		return ret;
1988
1989	dev_info(&card->dev, "Card claimed for testing.\n");
1990
1991	return 0;
1992}
1993
1994static void mmc_test_remove(struct mmc_card *card)
1995{
1996	device_remove_file(&card->dev, &dev_attr_test);
1997}
1998
1999static struct mmc_driver mmc_driver = {
2000	.drv		= {
2001		.name	= "mmc_test",
2002	},
2003	.probe		= mmc_test_probe,
2004	.remove		= mmc_test_remove,
2005};
2006
2007static int __init mmc_test_init(void)
2008{
2009	return mmc_register_driver(&mmc_driver);
2010}
2011
2012static void __exit mmc_test_exit(void)
2013{
2014	mmc_unregister_driver(&mmc_driver);
2015}
2016
2017module_init(mmc_test_init);
2018module_exit(mmc_test_exit);
2019
2020MODULE_LICENSE("GPL");
2021MODULE_DESCRIPTION("Multimedia Card (MMC) host test driver");
2022MODULE_AUTHOR("Pierre Ossman");
2023