1/*
2 *  linux/drivers/mmc/card/mmc_test.c
3 *
4 *  Copyright 2007-2008 Pierre Ossman
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or (at
9 * your option) any later version.
10 */
11
12#include <linux/mmc/core.h>
13#include <linux/mmc/card.h>
14#include <linux/mmc/host.h>
15#include <linux/mmc/mmc.h>
16#include <linux/slab.h>
17
18#include <linux/scatterlist.h>
19#include <linux/swap.h>		/* For nr_free_buffer_pages() */
20#include <linux/list.h>
21
22#include <linux/debugfs.h>
23#include <linux/uaccess.h>
24#include <linux/seq_file.h>
25#include <linux/module.h>
26
27#define RESULT_OK		0
28#define RESULT_FAIL		1
29#define RESULT_UNSUP_HOST	2
30#define RESULT_UNSUP_CARD	3
31
32#define BUFFER_ORDER		2
33#define BUFFER_SIZE		(PAGE_SIZE << BUFFER_ORDER)
34
35#define TEST_ALIGN_END		8
36
37/*
38 * Limit the test area size to the maximum MMC HC erase group size.  Note that
39 * the maximum SD allocation unit size is just 4MiB.
40 */
41#define TEST_AREA_MAX_SIZE (128 * 1024 * 1024)
42
43/**
44 * struct mmc_test_pages - pages allocated by 'alloc_pages()'.
45 * @page: first page in the allocation
46 * @order: order of the number of pages allocated
47 */
48struct mmc_test_pages {
49	struct page *page;
50	unsigned int order;
51};
52
53/**
54 * struct mmc_test_mem - allocated memory.
55 * @arr: array of allocations
56 * @cnt: number of allocations
57 */
58struct mmc_test_mem {
59	struct mmc_test_pages *arr;
60	unsigned int cnt;
61};
62
63/**
64 * struct mmc_test_area - information for performance tests.
65 * @max_sz: test area size (in bytes)
66 * @dev_addr: address on card at which to do performance tests
67 * @max_tfr: maximum transfer size allowed by driver (in bytes)
68 * @max_segs: maximum segments allowed by driver in scatterlist @sg
69 * @max_seg_sz: maximum segment size allowed by driver
70 * @blocks: number of (512 byte) blocks currently mapped by @sg
71 * @sg_len: length of currently mapped scatterlist @sg
72 * @mem: allocated memory
73 * @sg: scatterlist
74 */
75struct mmc_test_area {
76	unsigned long max_sz;
77	unsigned int dev_addr;
78	unsigned int max_tfr;
79	unsigned int max_segs;
80	unsigned int max_seg_sz;
81	unsigned int blocks;
82	unsigned int sg_len;
83	struct mmc_test_mem *mem;
84	struct scatterlist *sg;
85};
86
87/**
88 * struct mmc_test_transfer_result - transfer results for performance tests.
89 * @link: double-linked list
90 * @count: amount of group of sectors to check
91 * @sectors: amount of sectors to check in one group
92 * @ts: time values of transfer
93 * @rate: calculated transfer rate
94 * @iops: I/O operations per second (times 100)
95 */
96struct mmc_test_transfer_result {
97	struct list_head link;
98	unsigned int count;
99	unsigned int sectors;
100	struct timespec ts;
101	unsigned int rate;
102	unsigned int iops;
103};
104
105/**
106 * struct mmc_test_general_result - results for tests.
107 * @link: double-linked list
108 * @card: card under test
109 * @testcase: number of test case
110 * @result: result of test run
111 * @tr_lst: transfer measurements if any as mmc_test_transfer_result
112 */
113struct mmc_test_general_result {
114	struct list_head link;
115	struct mmc_card *card;
116	int testcase;
117	int result;
118	struct list_head tr_lst;
119};
120
121/**
122 * struct mmc_test_dbgfs_file - debugfs related file.
123 * @link: double-linked list
124 * @card: card under test
125 * @file: file created under debugfs
126 */
127struct mmc_test_dbgfs_file {
128	struct list_head link;
129	struct mmc_card *card;
130	struct dentry *file;
131};
132
133/**
134 * struct mmc_test_card - test information.
135 * @card: card under test
136 * @scratch: transfer buffer
137 * @buffer: transfer buffer
138 * @highmem: buffer for highmem tests
139 * @area: information for performance tests
140 * @gr: pointer to results of current testcase
141 */
142struct mmc_test_card {
143	struct mmc_card	*card;
144
145	u8		scratch[BUFFER_SIZE];
146	u8		*buffer;
147#ifdef CONFIG_HIGHMEM
148	struct page	*highmem;
149#endif
150	struct mmc_test_area		area;
151	struct mmc_test_general_result	*gr;
152};
153
154enum mmc_test_prep_media {
155	MMC_TEST_PREP_NONE = 0,
156	MMC_TEST_PREP_WRITE_FULL = 1 << 0,
157	MMC_TEST_PREP_ERASE = 1 << 1,
158};
159
160struct mmc_test_multiple_rw {
161	unsigned int *sg_len;
162	unsigned int *bs;
163	unsigned int len;
164	unsigned int size;
165	bool do_write;
166	bool do_nonblock_req;
167	enum mmc_test_prep_media prepare;
168};
169
170struct mmc_test_async_req {
171	struct mmc_async_req areq;
172	struct mmc_test_card *test;
173};
174
175/*******************************************************************/
176/*  General helper functions                                       */
177/*******************************************************************/
178
179/*
180 * Configure correct block size in card
181 */
182static int mmc_test_set_blksize(struct mmc_test_card *test, unsigned size)
183{
184	return mmc_set_blocklen(test->card, size);
185}
186
187/*
188 * Fill in the mmc_request structure given a set of transfer parameters.
189 */
190static void mmc_test_prepare_mrq(struct mmc_test_card *test,
191	struct mmc_request *mrq, struct scatterlist *sg, unsigned sg_len,
192	unsigned dev_addr, unsigned blocks, unsigned blksz, int write)
193{
194	BUG_ON(!mrq || !mrq->cmd || !mrq->data || !mrq->stop);
195
196	if (blocks > 1) {
197		mrq->cmd->opcode = write ?
198			MMC_WRITE_MULTIPLE_BLOCK : MMC_READ_MULTIPLE_BLOCK;
199	} else {
200		mrq->cmd->opcode = write ?
201			MMC_WRITE_BLOCK : MMC_READ_SINGLE_BLOCK;
202	}
203
204	mrq->cmd->arg = dev_addr;
205	if (!mmc_card_blockaddr(test->card))
206		mrq->cmd->arg <<= 9;
207
208	mrq->cmd->flags = MMC_RSP_R1 | MMC_CMD_ADTC;
209
210	if (blocks == 1)
211		mrq->stop = NULL;
212	else {
213		mrq->stop->opcode = MMC_STOP_TRANSMISSION;
214		mrq->stop->arg = 0;
215		mrq->stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
216	}
217
218	mrq->data->blksz = blksz;
219	mrq->data->blocks = blocks;
220	mrq->data->flags = write ? MMC_DATA_WRITE : MMC_DATA_READ;
221	mrq->data->sg = sg;
222	mrq->data->sg_len = sg_len;
223
224	mmc_set_data_timeout(mrq->data, test->card);
225}
226
227static int mmc_test_busy(struct mmc_command *cmd)
228{
229	return !(cmd->resp[0] & R1_READY_FOR_DATA) ||
230		(R1_CURRENT_STATE(cmd->resp[0]) == R1_STATE_PRG);
231}
232
233/*
234 * Wait for the card to finish the busy state
235 */
236static int mmc_test_wait_busy(struct mmc_test_card *test)
237{
238	int ret, busy;
239	struct mmc_command cmd = {0};
240
241	busy = 0;
242	do {
243		memset(&cmd, 0, sizeof(struct mmc_command));
244
245		cmd.opcode = MMC_SEND_STATUS;
246		cmd.arg = test->card->rca << 16;
247		cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
248
249		ret = mmc_wait_for_cmd(test->card->host, &cmd, 0);
250		if (ret)
251			break;
252
253		if (!busy && mmc_test_busy(&cmd)) {
254			busy = 1;
255			if (test->card->host->caps & MMC_CAP_WAIT_WHILE_BUSY)
256				pr_info("%s: Warning: Host did not "
257					"wait for busy state to end.\n",
258					mmc_hostname(test->card->host));
259		}
260	} while (mmc_test_busy(&cmd));
261
262	return ret;
263}
264
265/*
266 * Transfer a single sector of kernel addressable data
267 */
268static int mmc_test_buffer_transfer(struct mmc_test_card *test,
269	u8 *buffer, unsigned addr, unsigned blksz, int write)
270{
271	int ret;
272
273	struct mmc_request mrq = {0};
274	struct mmc_command cmd = {0};
275	struct mmc_command stop = {0};
276	struct mmc_data data = {0};
277
278	struct scatterlist sg;
279
280	mrq.cmd = &cmd;
281	mrq.data = &data;
282	mrq.stop = &stop;
283
284	sg_init_one(&sg, buffer, blksz);
285
286	mmc_test_prepare_mrq(test, &mrq, &sg, 1, addr, 1, blksz, write);
287
288	mmc_wait_for_req(test->card->host, &mrq);
289
290	if (cmd.error)
291		return cmd.error;
292	if (data.error)
293		return data.error;
294
295	ret = mmc_test_wait_busy(test);
296	if (ret)
297		return ret;
298
299	return 0;
300}
301
302static void mmc_test_free_mem(struct mmc_test_mem *mem)
303{
304	if (!mem)
305		return;
306	while (mem->cnt--)
307		__free_pages(mem->arr[mem->cnt].page,
308			     mem->arr[mem->cnt].order);
309	kfree(mem->arr);
310	kfree(mem);
311}
312
313/*
314 * Allocate a lot of memory, preferably max_sz but at least min_sz.  In case
315 * there isn't much memory do not exceed 1/16th total lowmem pages.  Also do
316 * not exceed a maximum number of segments and try not to make segments much
317 * bigger than maximum segment size.
318 */
319static struct mmc_test_mem *mmc_test_alloc_mem(unsigned long min_sz,
320					       unsigned long max_sz,
321					       unsigned int max_segs,
322					       unsigned int max_seg_sz)
323{
324	unsigned long max_page_cnt = DIV_ROUND_UP(max_sz, PAGE_SIZE);
325	unsigned long min_page_cnt = DIV_ROUND_UP(min_sz, PAGE_SIZE);
326	unsigned long max_seg_page_cnt = DIV_ROUND_UP(max_seg_sz, PAGE_SIZE);
327	unsigned long page_cnt = 0;
328	unsigned long limit = nr_free_buffer_pages() >> 4;
329	struct mmc_test_mem *mem;
330
331	if (max_page_cnt > limit)
332		max_page_cnt = limit;
333	if (min_page_cnt > max_page_cnt)
334		min_page_cnt = max_page_cnt;
335
336	if (max_seg_page_cnt > max_page_cnt)
337		max_seg_page_cnt = max_page_cnt;
338
339	if (max_segs > max_page_cnt)
340		max_segs = max_page_cnt;
341
342	mem = kzalloc(sizeof(struct mmc_test_mem), GFP_KERNEL);
343	if (!mem)
344		return NULL;
345
346	mem->arr = kzalloc(sizeof(struct mmc_test_pages) * max_segs,
347			   GFP_KERNEL);
348	if (!mem->arr)
349		goto out_free;
350
351	while (max_page_cnt) {
352		struct page *page;
353		unsigned int order;
354		gfp_t flags = GFP_KERNEL | GFP_DMA | __GFP_NOWARN |
355				__GFP_NORETRY;
356
357		order = get_order(max_seg_page_cnt << PAGE_SHIFT);
358		while (1) {
359			page = alloc_pages(flags, order);
360			if (page || !order)
361				break;
362			order -= 1;
363		}
364		if (!page) {
365			if (page_cnt < min_page_cnt)
366				goto out_free;
367			break;
368		}
369		mem->arr[mem->cnt].page = page;
370		mem->arr[mem->cnt].order = order;
371		mem->cnt += 1;
372		if (max_page_cnt <= (1UL << order))
373			break;
374		max_page_cnt -= 1UL << order;
375		page_cnt += 1UL << order;
376		if (mem->cnt >= max_segs) {
377			if (page_cnt < min_page_cnt)
378				goto out_free;
379			break;
380		}
381	}
382
383	return mem;
384
385out_free:
386	mmc_test_free_mem(mem);
387	return NULL;
388}
389
390/*
391 * Map memory into a scatterlist.  Optionally allow the same memory to be
392 * mapped more than once.
393 */
394static int mmc_test_map_sg(struct mmc_test_mem *mem, unsigned long size,
395			   struct scatterlist *sglist, int repeat,
396			   unsigned int max_segs, unsigned int max_seg_sz,
397			   unsigned int *sg_len, int min_sg_len)
398{
399	struct scatterlist *sg = NULL;
400	unsigned int i;
401	unsigned long sz = size;
402
403	sg_init_table(sglist, max_segs);
404	if (min_sg_len > max_segs)
405		min_sg_len = max_segs;
406
407	*sg_len = 0;
408	do {
409		for (i = 0; i < mem->cnt; i++) {
410			unsigned long len = PAGE_SIZE << mem->arr[i].order;
411
412			if (min_sg_len && (size / min_sg_len < len))
413				len = ALIGN(size / min_sg_len, 512);
414			if (len > sz)
415				len = sz;
416			if (len > max_seg_sz)
417				len = max_seg_sz;
418			if (sg)
419				sg = sg_next(sg);
420			else
421				sg = sglist;
422			if (!sg)
423				return -EINVAL;
424			sg_set_page(sg, mem->arr[i].page, len, 0);
425			sz -= len;
426			*sg_len += 1;
427			if (!sz)
428				break;
429		}
430	} while (sz && repeat);
431
432	if (sz)
433		return -EINVAL;
434
435	if (sg)
436		sg_mark_end(sg);
437
438	return 0;
439}
440
441/*
442 * Map memory into a scatterlist so that no pages are contiguous.  Allow the
443 * same memory to be mapped more than once.
444 */
445static int mmc_test_map_sg_max_scatter(struct mmc_test_mem *mem,
446				       unsigned long sz,
447				       struct scatterlist *sglist,
448				       unsigned int max_segs,
449				       unsigned int max_seg_sz,
450				       unsigned int *sg_len)
451{
452	struct scatterlist *sg = NULL;
453	unsigned int i = mem->cnt, cnt;
454	unsigned long len;
455	void *base, *addr, *last_addr = NULL;
456
457	sg_init_table(sglist, max_segs);
458
459	*sg_len = 0;
460	while (sz) {
461		base = page_address(mem->arr[--i].page);
462		cnt = 1 << mem->arr[i].order;
463		while (sz && cnt) {
464			addr = base + PAGE_SIZE * --cnt;
465			if (last_addr && last_addr + PAGE_SIZE == addr)
466				continue;
467			last_addr = addr;
468			len = PAGE_SIZE;
469			if (len > max_seg_sz)
470				len = max_seg_sz;
471			if (len > sz)
472				len = sz;
473			if (sg)
474				sg = sg_next(sg);
475			else
476				sg = sglist;
477			if (!sg)
478				return -EINVAL;
479			sg_set_page(sg, virt_to_page(addr), len, 0);
480			sz -= len;
481			*sg_len += 1;
482		}
483		if (i == 0)
484			i = mem->cnt;
485	}
486
487	if (sg)
488		sg_mark_end(sg);
489
490	return 0;
491}
492
493/*
494 * Calculate transfer rate in bytes per second.
495 */
496static unsigned int mmc_test_rate(uint64_t bytes, struct timespec *ts)
497{
498	uint64_t ns;
499
500	ns = ts->tv_sec;
501	ns *= 1000000000;
502	ns += ts->tv_nsec;
503
504	bytes *= 1000000000;
505
506	while (ns > UINT_MAX) {
507		bytes >>= 1;
508		ns >>= 1;
509	}
510
511	if (!ns)
512		return 0;
513
514	do_div(bytes, (uint32_t)ns);
515
516	return bytes;
517}
518
519/*
520 * Save transfer results for future usage
521 */
522static void mmc_test_save_transfer_result(struct mmc_test_card *test,
523	unsigned int count, unsigned int sectors, struct timespec ts,
524	unsigned int rate, unsigned int iops)
525{
526	struct mmc_test_transfer_result *tr;
527
528	if (!test->gr)
529		return;
530
531	tr = kmalloc(sizeof(struct mmc_test_transfer_result), GFP_KERNEL);
532	if (!tr)
533		return;
534
535	tr->count = count;
536	tr->sectors = sectors;
537	tr->ts = ts;
538	tr->rate = rate;
539	tr->iops = iops;
540
541	list_add_tail(&tr->link, &test->gr->tr_lst);
542}
543
544/*
545 * Print the transfer rate.
546 */
547static void mmc_test_print_rate(struct mmc_test_card *test, uint64_t bytes,
548				struct timespec *ts1, struct timespec *ts2)
549{
550	unsigned int rate, iops, sectors = bytes >> 9;
551	struct timespec ts;
552
553	ts = timespec_sub(*ts2, *ts1);
554
555	rate = mmc_test_rate(bytes, &ts);
556	iops = mmc_test_rate(100, &ts); /* I/O ops per sec x 100 */
557
558	pr_info("%s: Transfer of %u sectors (%u%s KiB) took %lu.%09lu "
559			 "seconds (%u kB/s, %u KiB/s, %u.%02u IOPS)\n",
560			 mmc_hostname(test->card->host), sectors, sectors >> 1,
561			 (sectors & 1 ? ".5" : ""), (unsigned long)ts.tv_sec,
562			 (unsigned long)ts.tv_nsec, rate / 1000, rate / 1024,
563			 iops / 100, iops % 100);
564
565	mmc_test_save_transfer_result(test, 1, sectors, ts, rate, iops);
566}
567
568/*
569 * Print the average transfer rate.
570 */
571static void mmc_test_print_avg_rate(struct mmc_test_card *test, uint64_t bytes,
572				    unsigned int count, struct timespec *ts1,
573				    struct timespec *ts2)
574{
575	unsigned int rate, iops, sectors = bytes >> 9;
576	uint64_t tot = bytes * count;
577	struct timespec ts;
578
579	ts = timespec_sub(*ts2, *ts1);
580
581	rate = mmc_test_rate(tot, &ts);
582	iops = mmc_test_rate(count * 100, &ts); /* I/O ops per sec x 100 */
583
584	pr_info("%s: Transfer of %u x %u sectors (%u x %u%s KiB) took "
585			 "%lu.%09lu seconds (%u kB/s, %u KiB/s, "
586			 "%u.%02u IOPS, sg_len %d)\n",
587			 mmc_hostname(test->card->host), count, sectors, count,
588			 sectors >> 1, (sectors & 1 ? ".5" : ""),
589			 (unsigned long)ts.tv_sec, (unsigned long)ts.tv_nsec,
590			 rate / 1000, rate / 1024, iops / 100, iops % 100,
591			 test->area.sg_len);
592
593	mmc_test_save_transfer_result(test, count, sectors, ts, rate, iops);
594}
595
596/*
597 * Return the card size in sectors.
598 */
599static unsigned int mmc_test_capacity(struct mmc_card *card)
600{
601	if (!mmc_card_sd(card) && mmc_card_blockaddr(card))
602		return card->ext_csd.sectors;
603	else
604		return card->csd.capacity << (card->csd.read_blkbits - 9);
605}
606
607/*******************************************************************/
608/*  Test preparation and cleanup                                   */
609/*******************************************************************/
610
611/*
612 * Fill the first couple of sectors of the card with known data
613 * so that bad reads/writes can be detected
614 */
615static int __mmc_test_prepare(struct mmc_test_card *test, int write)
616{
617	int ret, i;
618
619	ret = mmc_test_set_blksize(test, 512);
620	if (ret)
621		return ret;
622
623	if (write)
624		memset(test->buffer, 0xDF, 512);
625	else {
626		for (i = 0;i < 512;i++)
627			test->buffer[i] = i;
628	}
629
630	for (i = 0;i < BUFFER_SIZE / 512;i++) {
631		ret = mmc_test_buffer_transfer(test, test->buffer, i, 512, 1);
632		if (ret)
633			return ret;
634	}
635
636	return 0;
637}
638
639static int mmc_test_prepare_write(struct mmc_test_card *test)
640{
641	return __mmc_test_prepare(test, 1);
642}
643
644static int mmc_test_prepare_read(struct mmc_test_card *test)
645{
646	return __mmc_test_prepare(test, 0);
647}
648
649static int mmc_test_cleanup(struct mmc_test_card *test)
650{
651	int ret, i;
652
653	ret = mmc_test_set_blksize(test, 512);
654	if (ret)
655		return ret;
656
657	memset(test->buffer, 0, 512);
658
659	for (i = 0;i < BUFFER_SIZE / 512;i++) {
660		ret = mmc_test_buffer_transfer(test, test->buffer, i, 512, 1);
661		if (ret)
662			return ret;
663	}
664
665	return 0;
666}
667
668/*******************************************************************/
669/*  Test execution helpers                                         */
670/*******************************************************************/
671
672/*
673 * Modifies the mmc_request to perform the "short transfer" tests
674 */
675static void mmc_test_prepare_broken_mrq(struct mmc_test_card *test,
676	struct mmc_request *mrq, int write)
677{
678	BUG_ON(!mrq || !mrq->cmd || !mrq->data);
679
680	if (mrq->data->blocks > 1) {
681		mrq->cmd->opcode = write ?
682			MMC_WRITE_BLOCK : MMC_READ_SINGLE_BLOCK;
683		mrq->stop = NULL;
684	} else {
685		mrq->cmd->opcode = MMC_SEND_STATUS;
686		mrq->cmd->arg = test->card->rca << 16;
687	}
688}
689
690/*
691 * Checks that a normal transfer didn't have any errors
692 */
693static int mmc_test_check_result(struct mmc_test_card *test,
694				 struct mmc_request *mrq)
695{
696	int ret;
697
698	BUG_ON(!mrq || !mrq->cmd || !mrq->data);
699
700	ret = 0;
701
702	if (!ret && mrq->cmd->error)
703		ret = mrq->cmd->error;
704	if (!ret && mrq->data->error)
705		ret = mrq->data->error;
706	if (!ret && mrq->stop && mrq->stop->error)
707		ret = mrq->stop->error;
708	if (!ret && mrq->data->bytes_xfered !=
709		mrq->data->blocks * mrq->data->blksz)
710		ret = RESULT_FAIL;
711
712	if (ret == -EINVAL)
713		ret = RESULT_UNSUP_HOST;
714
715	return ret;
716}
717
718static int mmc_test_check_result_async(struct mmc_card *card,
719				       struct mmc_async_req *areq)
720{
721	struct mmc_test_async_req *test_async =
722		container_of(areq, struct mmc_test_async_req, areq);
723
724	mmc_test_wait_busy(test_async->test);
725
726	return mmc_test_check_result(test_async->test, areq->mrq);
727}
728
729/*
730 * Checks that a "short transfer" behaved as expected
731 */
732static int mmc_test_check_broken_result(struct mmc_test_card *test,
733	struct mmc_request *mrq)
734{
735	int ret;
736
737	BUG_ON(!mrq || !mrq->cmd || !mrq->data);
738
739	ret = 0;
740
741	if (!ret && mrq->cmd->error)
742		ret = mrq->cmd->error;
743	if (!ret && mrq->data->error == 0)
744		ret = RESULT_FAIL;
745	if (!ret && mrq->data->error != -ETIMEDOUT)
746		ret = mrq->data->error;
747	if (!ret && mrq->stop && mrq->stop->error)
748		ret = mrq->stop->error;
749	if (mrq->data->blocks > 1) {
750		if (!ret && mrq->data->bytes_xfered > mrq->data->blksz)
751			ret = RESULT_FAIL;
752	} else {
753		if (!ret && mrq->data->bytes_xfered > 0)
754			ret = RESULT_FAIL;
755	}
756
757	if (ret == -EINVAL)
758		ret = RESULT_UNSUP_HOST;
759
760	return ret;
761}
762
763/*
764 * Tests nonblock transfer with certain parameters
765 */
766static void mmc_test_nonblock_reset(struct mmc_request *mrq,
767				    struct mmc_command *cmd,
768				    struct mmc_command *stop,
769				    struct mmc_data *data)
770{
771	memset(mrq, 0, sizeof(struct mmc_request));
772	memset(cmd, 0, sizeof(struct mmc_command));
773	memset(data, 0, sizeof(struct mmc_data));
774	memset(stop, 0, sizeof(struct mmc_command));
775
776	mrq->cmd = cmd;
777	mrq->data = data;
778	mrq->stop = stop;
779}
780static int mmc_test_nonblock_transfer(struct mmc_test_card *test,
781				      struct scatterlist *sg, unsigned sg_len,
782				      unsigned dev_addr, unsigned blocks,
783				      unsigned blksz, int write, int count)
784{
785	struct mmc_request mrq1;
786	struct mmc_command cmd1;
787	struct mmc_command stop1;
788	struct mmc_data data1;
789
790	struct mmc_request mrq2;
791	struct mmc_command cmd2;
792	struct mmc_command stop2;
793	struct mmc_data data2;
794
795	struct mmc_test_async_req test_areq[2];
796	struct mmc_async_req *done_areq;
797	struct mmc_async_req *cur_areq = &test_areq[0].areq;
798	struct mmc_async_req *other_areq = &test_areq[1].areq;
799	int i;
800	int ret;
801
802	test_areq[0].test = test;
803	test_areq[1].test = test;
804
805	mmc_test_nonblock_reset(&mrq1, &cmd1, &stop1, &data1);
806	mmc_test_nonblock_reset(&mrq2, &cmd2, &stop2, &data2);
807
808	cur_areq->mrq = &mrq1;
809	cur_areq->err_check = mmc_test_check_result_async;
810	other_areq->mrq = &mrq2;
811	other_areq->err_check = mmc_test_check_result_async;
812
813	for (i = 0; i < count; i++) {
814		mmc_test_prepare_mrq(test, cur_areq->mrq, sg, sg_len, dev_addr,
815				     blocks, blksz, write);
816		done_areq = mmc_start_req(test->card->host, cur_areq, &ret);
817
818		if (ret || (!done_areq && i > 0))
819			goto err;
820
821		if (done_areq) {
822			if (done_areq->mrq == &mrq2)
823				mmc_test_nonblock_reset(&mrq2, &cmd2,
824							&stop2, &data2);
825			else
826				mmc_test_nonblock_reset(&mrq1, &cmd1,
827							&stop1, &data1);
828		}
829		done_areq = cur_areq;
830		cur_areq = other_areq;
831		other_areq = done_areq;
832		dev_addr += blocks;
833	}
834
835	done_areq = mmc_start_req(test->card->host, NULL, &ret);
836
837	return ret;
838err:
839	return ret;
840}
841
842/*
843 * Tests a basic transfer with certain parameters
844 */
845static int mmc_test_simple_transfer(struct mmc_test_card *test,
846	struct scatterlist *sg, unsigned sg_len, unsigned dev_addr,
847	unsigned blocks, unsigned blksz, int write)
848{
849	struct mmc_request mrq = {0};
850	struct mmc_command cmd = {0};
851	struct mmc_command stop = {0};
852	struct mmc_data data = {0};
853
854	mrq.cmd = &cmd;
855	mrq.data = &data;
856	mrq.stop = &stop;
857
858	mmc_test_prepare_mrq(test, &mrq, sg, sg_len, dev_addr,
859		blocks, blksz, write);
860
861	mmc_wait_for_req(test->card->host, &mrq);
862
863	mmc_test_wait_busy(test);
864
865	return mmc_test_check_result(test, &mrq);
866}
867
868/*
869 * Tests a transfer where the card will fail completely or partly
870 */
871static int mmc_test_broken_transfer(struct mmc_test_card *test,
872	unsigned blocks, unsigned blksz, int write)
873{
874	struct mmc_request mrq = {0};
875	struct mmc_command cmd = {0};
876	struct mmc_command stop = {0};
877	struct mmc_data data = {0};
878
879	struct scatterlist sg;
880
881	mrq.cmd = &cmd;
882	mrq.data = &data;
883	mrq.stop = &stop;
884
885	sg_init_one(&sg, test->buffer, blocks * blksz);
886
887	mmc_test_prepare_mrq(test, &mrq, &sg, 1, 0, blocks, blksz, write);
888	mmc_test_prepare_broken_mrq(test, &mrq, write);
889
890	mmc_wait_for_req(test->card->host, &mrq);
891
892	mmc_test_wait_busy(test);
893
894	return mmc_test_check_broken_result(test, &mrq);
895}
896
897/*
898 * Does a complete transfer test where data is also validated
899 *
900 * Note: mmc_test_prepare() must have been done before this call
901 */
902static int mmc_test_transfer(struct mmc_test_card *test,
903	struct scatterlist *sg, unsigned sg_len, unsigned dev_addr,
904	unsigned blocks, unsigned blksz, int write)
905{
906	int ret, i;
907	unsigned long flags;
908
909	if (write) {
910		for (i = 0;i < blocks * blksz;i++)
911			test->scratch[i] = i;
912	} else {
913		memset(test->scratch, 0, BUFFER_SIZE);
914	}
915	local_irq_save(flags);
916	sg_copy_from_buffer(sg, sg_len, test->scratch, BUFFER_SIZE);
917	local_irq_restore(flags);
918
919	ret = mmc_test_set_blksize(test, blksz);
920	if (ret)
921		return ret;
922
923	ret = mmc_test_simple_transfer(test, sg, sg_len, dev_addr,
924		blocks, blksz, write);
925	if (ret)
926		return ret;
927
928	if (write) {
929		int sectors;
930
931		ret = mmc_test_set_blksize(test, 512);
932		if (ret)
933			return ret;
934
935		sectors = (blocks * blksz + 511) / 512;
936		if ((sectors * 512) == (blocks * blksz))
937			sectors++;
938
939		if ((sectors * 512) > BUFFER_SIZE)
940			return -EINVAL;
941
942		memset(test->buffer, 0, sectors * 512);
943
944		for (i = 0;i < sectors;i++) {
945			ret = mmc_test_buffer_transfer(test,
946				test->buffer + i * 512,
947				dev_addr + i, 512, 0);
948			if (ret)
949				return ret;
950		}
951
952		for (i = 0;i < blocks * blksz;i++) {
953			if (test->buffer[i] != (u8)i)
954				return RESULT_FAIL;
955		}
956
957		for (;i < sectors * 512;i++) {
958			if (test->buffer[i] != 0xDF)
959				return RESULT_FAIL;
960		}
961	} else {
962		local_irq_save(flags);
963		sg_copy_to_buffer(sg, sg_len, test->scratch, BUFFER_SIZE);
964		local_irq_restore(flags);
965		for (i = 0;i < blocks * blksz;i++) {
966			if (test->scratch[i] != (u8)i)
967				return RESULT_FAIL;
968		}
969	}
970
971	return 0;
972}
973
974/*******************************************************************/
975/*  Tests                                                          */
976/*******************************************************************/
977
978struct mmc_test_case {
979	const char *name;
980
981	int (*prepare)(struct mmc_test_card *);
982	int (*run)(struct mmc_test_card *);
983	int (*cleanup)(struct mmc_test_card *);
984};
985
986static int mmc_test_basic_write(struct mmc_test_card *test)
987{
988	int ret;
989	struct scatterlist sg;
990
991	ret = mmc_test_set_blksize(test, 512);
992	if (ret)
993		return ret;
994
995	sg_init_one(&sg, test->buffer, 512);
996
997	ret = mmc_test_simple_transfer(test, &sg, 1, 0, 1, 512, 1);
998	if (ret)
999		return ret;
1000
1001	return 0;
1002}
1003
1004static int mmc_test_basic_read(struct mmc_test_card *test)
1005{
1006	int ret;
1007	struct scatterlist sg;
1008
1009	ret = mmc_test_set_blksize(test, 512);
1010	if (ret)
1011		return ret;
1012
1013	sg_init_one(&sg, test->buffer, 512);
1014
1015	ret = mmc_test_simple_transfer(test, &sg, 1, 0, 1, 512, 0);
1016	if (ret)
1017		return ret;
1018
1019	return 0;
1020}
1021
1022static int mmc_test_verify_write(struct mmc_test_card *test)
1023{
1024	int ret;
1025	struct scatterlist sg;
1026
1027	sg_init_one(&sg, test->buffer, 512);
1028
1029	ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1);
1030	if (ret)
1031		return ret;
1032
1033	return 0;
1034}
1035
1036static int mmc_test_verify_read(struct mmc_test_card *test)
1037{
1038	int ret;
1039	struct scatterlist sg;
1040
1041	sg_init_one(&sg, test->buffer, 512);
1042
1043	ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0);
1044	if (ret)
1045		return ret;
1046
1047	return 0;
1048}
1049
1050static int mmc_test_multi_write(struct mmc_test_card *test)
1051{
1052	int ret;
1053	unsigned int size;
1054	struct scatterlist sg;
1055
1056	if (test->card->host->max_blk_count == 1)
1057		return RESULT_UNSUP_HOST;
1058
1059	size = PAGE_SIZE * 2;
1060	size = min(size, test->card->host->max_req_size);
1061	size = min(size, test->card->host->max_seg_size);
1062	size = min(size, test->card->host->max_blk_count * 512);
1063
1064	if (size < 1024)
1065		return RESULT_UNSUP_HOST;
1066
1067	sg_init_one(&sg, test->buffer, size);
1068
1069	ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1);
1070	if (ret)
1071		return ret;
1072
1073	return 0;
1074}
1075
1076static int mmc_test_multi_read(struct mmc_test_card *test)
1077{
1078	int ret;
1079	unsigned int size;
1080	struct scatterlist sg;
1081
1082	if (test->card->host->max_blk_count == 1)
1083		return RESULT_UNSUP_HOST;
1084
1085	size = PAGE_SIZE * 2;
1086	size = min(size, test->card->host->max_req_size);
1087	size = min(size, test->card->host->max_seg_size);
1088	size = min(size, test->card->host->max_blk_count * 512);
1089
1090	if (size < 1024)
1091		return RESULT_UNSUP_HOST;
1092
1093	sg_init_one(&sg, test->buffer, size);
1094
1095	ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0);
1096	if (ret)
1097		return ret;
1098
1099	return 0;
1100}
1101
1102static int mmc_test_pow2_write(struct mmc_test_card *test)
1103{
1104	int ret, i;
1105	struct scatterlist sg;
1106
1107	if (!test->card->csd.write_partial)
1108		return RESULT_UNSUP_CARD;
1109
1110	for (i = 1; i < 512;i <<= 1) {
1111		sg_init_one(&sg, test->buffer, i);
1112		ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 1);
1113		if (ret)
1114			return ret;
1115	}
1116
1117	return 0;
1118}
1119
1120static int mmc_test_pow2_read(struct mmc_test_card *test)
1121{
1122	int ret, i;
1123	struct scatterlist sg;
1124
1125	if (!test->card->csd.read_partial)
1126		return RESULT_UNSUP_CARD;
1127
1128	for (i = 1; i < 512;i <<= 1) {
1129		sg_init_one(&sg, test->buffer, i);
1130		ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 0);
1131		if (ret)
1132			return ret;
1133	}
1134
1135	return 0;
1136}
1137
1138static int mmc_test_weird_write(struct mmc_test_card *test)
1139{
1140	int ret, i;
1141	struct scatterlist sg;
1142
1143	if (!test->card->csd.write_partial)
1144		return RESULT_UNSUP_CARD;
1145
1146	for (i = 3; i < 512;i += 7) {
1147		sg_init_one(&sg, test->buffer, i);
1148		ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 1);
1149		if (ret)
1150			return ret;
1151	}
1152
1153	return 0;
1154}
1155
1156static int mmc_test_weird_read(struct mmc_test_card *test)
1157{
1158	int ret, i;
1159	struct scatterlist sg;
1160
1161	if (!test->card->csd.read_partial)
1162		return RESULT_UNSUP_CARD;
1163
1164	for (i = 3; i < 512;i += 7) {
1165		sg_init_one(&sg, test->buffer, i);
1166		ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 0);
1167		if (ret)
1168			return ret;
1169	}
1170
1171	return 0;
1172}
1173
1174static int mmc_test_align_write(struct mmc_test_card *test)
1175{
1176	int ret, i;
1177	struct scatterlist sg;
1178
1179	for (i = 1; i < TEST_ALIGN_END; i++) {
1180		sg_init_one(&sg, test->buffer + i, 512);
1181		ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1);
1182		if (ret)
1183			return ret;
1184	}
1185
1186	return 0;
1187}
1188
1189static int mmc_test_align_read(struct mmc_test_card *test)
1190{
1191	int ret, i;
1192	struct scatterlist sg;
1193
1194	for (i = 1; i < TEST_ALIGN_END; i++) {
1195		sg_init_one(&sg, test->buffer + i, 512);
1196		ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0);
1197		if (ret)
1198			return ret;
1199	}
1200
1201	return 0;
1202}
1203
1204static int mmc_test_align_multi_write(struct mmc_test_card *test)
1205{
1206	int ret, i;
1207	unsigned int size;
1208	struct scatterlist sg;
1209
1210	if (test->card->host->max_blk_count == 1)
1211		return RESULT_UNSUP_HOST;
1212
1213	size = PAGE_SIZE * 2;
1214	size = min(size, test->card->host->max_req_size);
1215	size = min(size, test->card->host->max_seg_size);
1216	size = min(size, test->card->host->max_blk_count * 512);
1217
1218	if (size < 1024)
1219		return RESULT_UNSUP_HOST;
1220
1221	for (i = 1; i < TEST_ALIGN_END; i++) {
1222		sg_init_one(&sg, test->buffer + i, size);
1223		ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1);
1224		if (ret)
1225			return ret;
1226	}
1227
1228	return 0;
1229}
1230
1231static int mmc_test_align_multi_read(struct mmc_test_card *test)
1232{
1233	int ret, i;
1234	unsigned int size;
1235	struct scatterlist sg;
1236
1237	if (test->card->host->max_blk_count == 1)
1238		return RESULT_UNSUP_HOST;
1239
1240	size = PAGE_SIZE * 2;
1241	size = min(size, test->card->host->max_req_size);
1242	size = min(size, test->card->host->max_seg_size);
1243	size = min(size, test->card->host->max_blk_count * 512);
1244
1245	if (size < 1024)
1246		return RESULT_UNSUP_HOST;
1247
1248	for (i = 1; i < TEST_ALIGN_END; i++) {
1249		sg_init_one(&sg, test->buffer + i, size);
1250		ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0);
1251		if (ret)
1252			return ret;
1253	}
1254
1255	return 0;
1256}
1257
1258static int mmc_test_xfersize_write(struct mmc_test_card *test)
1259{
1260	int ret;
1261
1262	ret = mmc_test_set_blksize(test, 512);
1263	if (ret)
1264		return ret;
1265
1266	ret = mmc_test_broken_transfer(test, 1, 512, 1);
1267	if (ret)
1268		return ret;
1269
1270	return 0;
1271}
1272
1273static int mmc_test_xfersize_read(struct mmc_test_card *test)
1274{
1275	int ret;
1276
1277	ret = mmc_test_set_blksize(test, 512);
1278	if (ret)
1279		return ret;
1280
1281	ret = mmc_test_broken_transfer(test, 1, 512, 0);
1282	if (ret)
1283		return ret;
1284
1285	return 0;
1286}
1287
1288static int mmc_test_multi_xfersize_write(struct mmc_test_card *test)
1289{
1290	int ret;
1291
1292	if (test->card->host->max_blk_count == 1)
1293		return RESULT_UNSUP_HOST;
1294
1295	ret = mmc_test_set_blksize(test, 512);
1296	if (ret)
1297		return ret;
1298
1299	ret = mmc_test_broken_transfer(test, 2, 512, 1);
1300	if (ret)
1301		return ret;
1302
1303	return 0;
1304}
1305
1306static int mmc_test_multi_xfersize_read(struct mmc_test_card *test)
1307{
1308	int ret;
1309
1310	if (test->card->host->max_blk_count == 1)
1311		return RESULT_UNSUP_HOST;
1312
1313	ret = mmc_test_set_blksize(test, 512);
1314	if (ret)
1315		return ret;
1316
1317	ret = mmc_test_broken_transfer(test, 2, 512, 0);
1318	if (ret)
1319		return ret;
1320
1321	return 0;
1322}
1323
1324#ifdef CONFIG_HIGHMEM
1325
1326static int mmc_test_write_high(struct mmc_test_card *test)
1327{
1328	int ret;
1329	struct scatterlist sg;
1330
1331	sg_init_table(&sg, 1);
1332	sg_set_page(&sg, test->highmem, 512, 0);
1333
1334	ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1);
1335	if (ret)
1336		return ret;
1337
1338	return 0;
1339}
1340
1341static int mmc_test_read_high(struct mmc_test_card *test)
1342{
1343	int ret;
1344	struct scatterlist sg;
1345
1346	sg_init_table(&sg, 1);
1347	sg_set_page(&sg, test->highmem, 512, 0);
1348
1349	ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0);
1350	if (ret)
1351		return ret;
1352
1353	return 0;
1354}
1355
1356static int mmc_test_multi_write_high(struct mmc_test_card *test)
1357{
1358	int ret;
1359	unsigned int size;
1360	struct scatterlist sg;
1361
1362	if (test->card->host->max_blk_count == 1)
1363		return RESULT_UNSUP_HOST;
1364
1365	size = PAGE_SIZE * 2;
1366	size = min(size, test->card->host->max_req_size);
1367	size = min(size, test->card->host->max_seg_size);
1368	size = min(size, test->card->host->max_blk_count * 512);
1369
1370	if (size < 1024)
1371		return RESULT_UNSUP_HOST;
1372
1373	sg_init_table(&sg, 1);
1374	sg_set_page(&sg, test->highmem, size, 0);
1375
1376	ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1);
1377	if (ret)
1378		return ret;
1379
1380	return 0;
1381}
1382
1383static int mmc_test_multi_read_high(struct mmc_test_card *test)
1384{
1385	int ret;
1386	unsigned int size;
1387	struct scatterlist sg;
1388
1389	if (test->card->host->max_blk_count == 1)
1390		return RESULT_UNSUP_HOST;
1391
1392	size = PAGE_SIZE * 2;
1393	size = min(size, test->card->host->max_req_size);
1394	size = min(size, test->card->host->max_seg_size);
1395	size = min(size, test->card->host->max_blk_count * 512);
1396
1397	if (size < 1024)
1398		return RESULT_UNSUP_HOST;
1399
1400	sg_init_table(&sg, 1);
1401	sg_set_page(&sg, test->highmem, size, 0);
1402
1403	ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0);
1404	if (ret)
1405		return ret;
1406
1407	return 0;
1408}
1409
1410#else
1411
1412static int mmc_test_no_highmem(struct mmc_test_card *test)
1413{
1414	pr_info("%s: Highmem not configured - test skipped\n",
1415	       mmc_hostname(test->card->host));
1416	return 0;
1417}
1418
1419#endif /* CONFIG_HIGHMEM */
1420
1421/*
1422 * Map sz bytes so that it can be transferred.
1423 */
1424static int mmc_test_area_map(struct mmc_test_card *test, unsigned long sz,
1425			     int max_scatter, int min_sg_len)
1426{
1427	struct mmc_test_area *t = &test->area;
1428	int err;
1429
1430	t->blocks = sz >> 9;
1431
1432	if (max_scatter) {
1433		err = mmc_test_map_sg_max_scatter(t->mem, sz, t->sg,
1434						  t->max_segs, t->max_seg_sz,
1435				       &t->sg_len);
1436	} else {
1437		err = mmc_test_map_sg(t->mem, sz, t->sg, 1, t->max_segs,
1438				      t->max_seg_sz, &t->sg_len, min_sg_len);
1439	}
1440	if (err)
1441		pr_info("%s: Failed to map sg list\n",
1442		       mmc_hostname(test->card->host));
1443	return err;
1444}
1445
1446/*
1447 * Transfer bytes mapped by mmc_test_area_map().
1448 */
1449static int mmc_test_area_transfer(struct mmc_test_card *test,
1450				  unsigned int dev_addr, int write)
1451{
1452	struct mmc_test_area *t = &test->area;
1453
1454	return mmc_test_simple_transfer(test, t->sg, t->sg_len, dev_addr,
1455					t->blocks, 512, write);
1456}
1457
1458/*
1459 * Map and transfer bytes for multiple transfers.
1460 */
1461static int mmc_test_area_io_seq(struct mmc_test_card *test, unsigned long sz,
1462				unsigned int dev_addr, int write,
1463				int max_scatter, int timed, int count,
1464				bool nonblock, int min_sg_len)
1465{
1466	struct timespec ts1, ts2;
1467	int ret = 0;
1468	int i;
1469	struct mmc_test_area *t = &test->area;
1470
1471	/*
1472	 * In the case of a maximally scattered transfer, the maximum transfer
1473	 * size is further limited by using PAGE_SIZE segments.
1474	 */
1475	if (max_scatter) {
1476		struct mmc_test_area *t = &test->area;
1477		unsigned long max_tfr;
1478
1479		if (t->max_seg_sz >= PAGE_SIZE)
1480			max_tfr = t->max_segs * PAGE_SIZE;
1481		else
1482			max_tfr = t->max_segs * t->max_seg_sz;
1483		if (sz > max_tfr)
1484			sz = max_tfr;
1485	}
1486
1487	ret = mmc_test_area_map(test, sz, max_scatter, min_sg_len);
1488	if (ret)
1489		return ret;
1490
1491	if (timed)
1492		getnstimeofday(&ts1);
1493	if (nonblock)
1494		ret = mmc_test_nonblock_transfer(test, t->sg, t->sg_len,
1495				 dev_addr, t->blocks, 512, write, count);
1496	else
1497		for (i = 0; i < count && ret == 0; i++) {
1498			ret = mmc_test_area_transfer(test, dev_addr, write);
1499			dev_addr += sz >> 9;
1500		}
1501
1502	if (ret)
1503		return ret;
1504
1505	if (timed)
1506		getnstimeofday(&ts2);
1507
1508	if (timed)
1509		mmc_test_print_avg_rate(test, sz, count, &ts1, &ts2);
1510
1511	return 0;
1512}
1513
1514static int mmc_test_area_io(struct mmc_test_card *test, unsigned long sz,
1515			    unsigned int dev_addr, int write, int max_scatter,
1516			    int timed)
1517{
1518	return mmc_test_area_io_seq(test, sz, dev_addr, write, max_scatter,
1519				    timed, 1, false, 0);
1520}
1521
1522/*
1523 * Write the test area entirely.
1524 */
1525static int mmc_test_area_fill(struct mmc_test_card *test)
1526{
1527	struct mmc_test_area *t = &test->area;
1528
1529	return mmc_test_area_io(test, t->max_tfr, t->dev_addr, 1, 0, 0);
1530}
1531
1532/*
1533 * Erase the test area entirely.
1534 */
1535static int mmc_test_area_erase(struct mmc_test_card *test)
1536{
1537	struct mmc_test_area *t = &test->area;
1538
1539	if (!mmc_can_erase(test->card))
1540		return 0;
1541
1542	return mmc_erase(test->card, t->dev_addr, t->max_sz >> 9,
1543			 MMC_ERASE_ARG);
1544}
1545
1546/*
1547 * Cleanup struct mmc_test_area.
1548 */
1549static int mmc_test_area_cleanup(struct mmc_test_card *test)
1550{
1551	struct mmc_test_area *t = &test->area;
1552
1553	kfree(t->sg);
1554	mmc_test_free_mem(t->mem);
1555
1556	return 0;
1557}
1558
1559/*
1560 * Initialize an area for testing large transfers.  The test area is set to the
1561 * middle of the card because cards may have different charateristics at the
1562 * front (for FAT file system optimization).  Optionally, the area is erased
1563 * (if the card supports it) which may improve write performance.  Optionally,
1564 * the area is filled with data for subsequent read tests.
1565 */
1566static int mmc_test_area_init(struct mmc_test_card *test, int erase, int fill)
1567{
1568	struct mmc_test_area *t = &test->area;
1569	unsigned long min_sz = 64 * 1024, sz;
1570	int ret;
1571
1572	ret = mmc_test_set_blksize(test, 512);
1573	if (ret)
1574		return ret;
1575
1576	/* Make the test area size about 4MiB */
1577	sz = (unsigned long)test->card->pref_erase << 9;
1578	t->max_sz = sz;
1579	while (t->max_sz < 4 * 1024 * 1024)
1580		t->max_sz += sz;
1581	while (t->max_sz > TEST_AREA_MAX_SIZE && t->max_sz > sz)
1582		t->max_sz -= sz;
1583
1584	t->max_segs = test->card->host->max_segs;
1585	t->max_seg_sz = test->card->host->max_seg_size;
1586	t->max_seg_sz -= t->max_seg_sz % 512;
1587
1588	t->max_tfr = t->max_sz;
1589	if (t->max_tfr >> 9 > test->card->host->max_blk_count)
1590		t->max_tfr = test->card->host->max_blk_count << 9;
1591	if (t->max_tfr > test->card->host->max_req_size)
1592		t->max_tfr = test->card->host->max_req_size;
1593	if (t->max_tfr / t->max_seg_sz > t->max_segs)
1594		t->max_tfr = t->max_segs * t->max_seg_sz;
1595
1596	/*
1597	 * Try to allocate enough memory for a max. sized transfer.  Less is OK
1598	 * because the same memory can be mapped into the scatterlist more than
1599	 * once.  Also, take into account the limits imposed on scatterlist
1600	 * segments by the host driver.
1601	 */
1602	t->mem = mmc_test_alloc_mem(min_sz, t->max_tfr, t->max_segs,
1603				    t->max_seg_sz);
1604	if (!t->mem)
1605		return -ENOMEM;
1606
1607	t->sg = kmalloc(sizeof(struct scatterlist) * t->max_segs, GFP_KERNEL);
1608	if (!t->sg) {
1609		ret = -ENOMEM;
1610		goto out_free;
1611	}
1612
1613	t->dev_addr = mmc_test_capacity(test->card) / 2;
1614	t->dev_addr -= t->dev_addr % (t->max_sz >> 9);
1615
1616	if (erase) {
1617		ret = mmc_test_area_erase(test);
1618		if (ret)
1619			goto out_free;
1620	}
1621
1622	if (fill) {
1623		ret = mmc_test_area_fill(test);
1624		if (ret)
1625			goto out_free;
1626	}
1627
1628	return 0;
1629
1630out_free:
1631	mmc_test_area_cleanup(test);
1632	return ret;
1633}
1634
1635/*
1636 * Prepare for large transfers.  Do not erase the test area.
1637 */
1638static int mmc_test_area_prepare(struct mmc_test_card *test)
1639{
1640	return mmc_test_area_init(test, 0, 0);
1641}
1642
1643/*
1644 * Prepare for large transfers.  Do erase the test area.
1645 */
1646static int mmc_test_area_prepare_erase(struct mmc_test_card *test)
1647{
1648	return mmc_test_area_init(test, 1, 0);
1649}
1650
1651/*
1652 * Prepare for large transfers.  Erase and fill the test area.
1653 */
1654static int mmc_test_area_prepare_fill(struct mmc_test_card *test)
1655{
1656	return mmc_test_area_init(test, 1, 1);
1657}
1658
1659/*
1660 * Test best-case performance.  Best-case performance is expected from
1661 * a single large transfer.
1662 *
1663 * An additional option (max_scatter) allows the measurement of the same
1664 * transfer but with no contiguous pages in the scatter list.  This tests
1665 * the efficiency of DMA to handle scattered pages.
1666 */
1667static int mmc_test_best_performance(struct mmc_test_card *test, int write,
1668				     int max_scatter)
1669{
1670	struct mmc_test_area *t = &test->area;
1671
1672	return mmc_test_area_io(test, t->max_tfr, t->dev_addr, write,
1673				max_scatter, 1);
1674}
1675
1676/*
1677 * Best-case read performance.
1678 */
1679static int mmc_test_best_read_performance(struct mmc_test_card *test)
1680{
1681	return mmc_test_best_performance(test, 0, 0);
1682}
1683
1684/*
1685 * Best-case write performance.
1686 */
1687static int mmc_test_best_write_performance(struct mmc_test_card *test)
1688{
1689	return mmc_test_best_performance(test, 1, 0);
1690}
1691
1692/*
1693 * Best-case read performance into scattered pages.
1694 */
1695static int mmc_test_best_read_perf_max_scatter(struct mmc_test_card *test)
1696{
1697	return mmc_test_best_performance(test, 0, 1);
1698}
1699
1700/*
1701 * Best-case write performance from scattered pages.
1702 */
1703static int mmc_test_best_write_perf_max_scatter(struct mmc_test_card *test)
1704{
1705	return mmc_test_best_performance(test, 1, 1);
1706}
1707
1708/*
1709 * Single read performance by transfer size.
1710 */
1711static int mmc_test_profile_read_perf(struct mmc_test_card *test)
1712{
1713	struct mmc_test_area *t = &test->area;
1714	unsigned long sz;
1715	unsigned int dev_addr;
1716	int ret;
1717
1718	for (sz = 512; sz < t->max_tfr; sz <<= 1) {
1719		dev_addr = t->dev_addr + (sz >> 9);
1720		ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 1);
1721		if (ret)
1722			return ret;
1723	}
1724	sz = t->max_tfr;
1725	dev_addr = t->dev_addr;
1726	return mmc_test_area_io(test, sz, dev_addr, 0, 0, 1);
1727}
1728
1729/*
1730 * Single write performance by transfer size.
1731 */
1732static int mmc_test_profile_write_perf(struct mmc_test_card *test)
1733{
1734	struct mmc_test_area *t = &test->area;
1735	unsigned long sz;
1736	unsigned int dev_addr;
1737	int ret;
1738
1739	ret = mmc_test_area_erase(test);
1740	if (ret)
1741		return ret;
1742	for (sz = 512; sz < t->max_tfr; sz <<= 1) {
1743		dev_addr = t->dev_addr + (sz >> 9);
1744		ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 1);
1745		if (ret)
1746			return ret;
1747	}
1748	ret = mmc_test_area_erase(test);
1749	if (ret)
1750		return ret;
1751	sz = t->max_tfr;
1752	dev_addr = t->dev_addr;
1753	return mmc_test_area_io(test, sz, dev_addr, 1, 0, 1);
1754}
1755
1756/*
1757 * Single trim performance by transfer size.
1758 */
1759static int mmc_test_profile_trim_perf(struct mmc_test_card *test)
1760{
1761	struct mmc_test_area *t = &test->area;
1762	unsigned long sz;
1763	unsigned int dev_addr;
1764	struct timespec ts1, ts2;
1765	int ret;
1766
1767	if (!mmc_can_trim(test->card))
1768		return RESULT_UNSUP_CARD;
1769
1770	if (!mmc_can_erase(test->card))
1771		return RESULT_UNSUP_HOST;
1772
1773	for (sz = 512; sz < t->max_sz; sz <<= 1) {
1774		dev_addr = t->dev_addr + (sz >> 9);
1775		getnstimeofday(&ts1);
1776		ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG);
1777		if (ret)
1778			return ret;
1779		getnstimeofday(&ts2);
1780		mmc_test_print_rate(test, sz, &ts1, &ts2);
1781	}
1782	dev_addr = t->dev_addr;
1783	getnstimeofday(&ts1);
1784	ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG);
1785	if (ret)
1786		return ret;
1787	getnstimeofday(&ts2);
1788	mmc_test_print_rate(test, sz, &ts1, &ts2);
1789	return 0;
1790}
1791
1792static int mmc_test_seq_read_perf(struct mmc_test_card *test, unsigned long sz)
1793{
1794	struct mmc_test_area *t = &test->area;
1795	unsigned int dev_addr, i, cnt;
1796	struct timespec ts1, ts2;
1797	int ret;
1798
1799	cnt = t->max_sz / sz;
1800	dev_addr = t->dev_addr;
1801	getnstimeofday(&ts1);
1802	for (i = 0; i < cnt; i++) {
1803		ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 0);
1804		if (ret)
1805			return ret;
1806		dev_addr += (sz >> 9);
1807	}
1808	getnstimeofday(&ts2);
1809	mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1810	return 0;
1811}
1812
1813/*
1814 * Consecutive read performance by transfer size.
1815 */
1816static int mmc_test_profile_seq_read_perf(struct mmc_test_card *test)
1817{
1818	struct mmc_test_area *t = &test->area;
1819	unsigned long sz;
1820	int ret;
1821
1822	for (sz = 512; sz < t->max_tfr; sz <<= 1) {
1823		ret = mmc_test_seq_read_perf(test, sz);
1824		if (ret)
1825			return ret;
1826	}
1827	sz = t->max_tfr;
1828	return mmc_test_seq_read_perf(test, sz);
1829}
1830
1831static int mmc_test_seq_write_perf(struct mmc_test_card *test, unsigned long sz)
1832{
1833	struct mmc_test_area *t = &test->area;
1834	unsigned int dev_addr, i, cnt;
1835	struct timespec ts1, ts2;
1836	int ret;
1837
1838	ret = mmc_test_area_erase(test);
1839	if (ret)
1840		return ret;
1841	cnt = t->max_sz / sz;
1842	dev_addr = t->dev_addr;
1843	getnstimeofday(&ts1);
1844	for (i = 0; i < cnt; i++) {
1845		ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 0);
1846		if (ret)
1847			return ret;
1848		dev_addr += (sz >> 9);
1849	}
1850	getnstimeofday(&ts2);
1851	mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1852	return 0;
1853}
1854
1855/*
1856 * Consecutive write performance by transfer size.
1857 */
1858static int mmc_test_profile_seq_write_perf(struct mmc_test_card *test)
1859{
1860	struct mmc_test_area *t = &test->area;
1861	unsigned long sz;
1862	int ret;
1863
1864	for (sz = 512; sz < t->max_tfr; sz <<= 1) {
1865		ret = mmc_test_seq_write_perf(test, sz);
1866		if (ret)
1867			return ret;
1868	}
1869	sz = t->max_tfr;
1870	return mmc_test_seq_write_perf(test, sz);
1871}
1872
1873/*
1874 * Consecutive trim performance by transfer size.
1875 */
1876static int mmc_test_profile_seq_trim_perf(struct mmc_test_card *test)
1877{
1878	struct mmc_test_area *t = &test->area;
1879	unsigned long sz;
1880	unsigned int dev_addr, i, cnt;
1881	struct timespec ts1, ts2;
1882	int ret;
1883
1884	if (!mmc_can_trim(test->card))
1885		return RESULT_UNSUP_CARD;
1886
1887	if (!mmc_can_erase(test->card))
1888		return RESULT_UNSUP_HOST;
1889
1890	for (sz = 512; sz <= t->max_sz; sz <<= 1) {
1891		ret = mmc_test_area_erase(test);
1892		if (ret)
1893			return ret;
1894		ret = mmc_test_area_fill(test);
1895		if (ret)
1896			return ret;
1897		cnt = t->max_sz / sz;
1898		dev_addr = t->dev_addr;
1899		getnstimeofday(&ts1);
1900		for (i = 0; i < cnt; i++) {
1901			ret = mmc_erase(test->card, dev_addr, sz >> 9,
1902					MMC_TRIM_ARG);
1903			if (ret)
1904				return ret;
1905			dev_addr += (sz >> 9);
1906		}
1907		getnstimeofday(&ts2);
1908		mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1909	}
1910	return 0;
1911}
1912
1913static unsigned int rnd_next = 1;
1914
1915static unsigned int mmc_test_rnd_num(unsigned int rnd_cnt)
1916{
1917	uint64_t r;
1918
1919	rnd_next = rnd_next * 1103515245 + 12345;
1920	r = (rnd_next >> 16) & 0x7fff;
1921	return (r * rnd_cnt) >> 15;
1922}
1923
1924static int mmc_test_rnd_perf(struct mmc_test_card *test, int write, int print,
1925			     unsigned long sz)
1926{
1927	unsigned int dev_addr, cnt, rnd_addr, range1, range2, last_ea = 0, ea;
1928	unsigned int ssz;
1929	struct timespec ts1, ts2, ts;
1930	int ret;
1931
1932	ssz = sz >> 9;
1933
1934	rnd_addr = mmc_test_capacity(test->card) / 4;
1935	range1 = rnd_addr / test->card->pref_erase;
1936	range2 = range1 / ssz;
1937
1938	getnstimeofday(&ts1);
1939	for (cnt = 0; cnt < UINT_MAX; cnt++) {
1940		getnstimeofday(&ts2);
1941		ts = timespec_sub(ts2, ts1);
1942		if (ts.tv_sec >= 10)
1943			break;
1944		ea = mmc_test_rnd_num(range1);
1945		if (ea == last_ea)
1946			ea -= 1;
1947		last_ea = ea;
1948		dev_addr = rnd_addr + test->card->pref_erase * ea +
1949			   ssz * mmc_test_rnd_num(range2);
1950		ret = mmc_test_area_io(test, sz, dev_addr, write, 0, 0);
1951		if (ret)
1952			return ret;
1953	}
1954	if (print)
1955		mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1956	return 0;
1957}
1958
1959static int mmc_test_random_perf(struct mmc_test_card *test, int write)
1960{
1961	struct mmc_test_area *t = &test->area;
1962	unsigned int next;
1963	unsigned long sz;
1964	int ret;
1965
1966	for (sz = 512; sz < t->max_tfr; sz <<= 1) {
1967		/*
1968		 * When writing, try to get more consistent results by running
1969		 * the test twice with exactly the same I/O but outputting the
1970		 * results only for the 2nd run.
1971		 */
1972		if (write) {
1973			next = rnd_next;
1974			ret = mmc_test_rnd_perf(test, write, 0, sz);
1975			if (ret)
1976				return ret;
1977			rnd_next = next;
1978		}
1979		ret = mmc_test_rnd_perf(test, write, 1, sz);
1980		if (ret)
1981			return ret;
1982	}
1983	sz = t->max_tfr;
1984	if (write) {
1985		next = rnd_next;
1986		ret = mmc_test_rnd_perf(test, write, 0, sz);
1987		if (ret)
1988			return ret;
1989		rnd_next = next;
1990	}
1991	return mmc_test_rnd_perf(test, write, 1, sz);
1992}
1993
1994/*
1995 * Random read performance by transfer size.
1996 */
1997static int mmc_test_random_read_perf(struct mmc_test_card *test)
1998{
1999	return mmc_test_random_perf(test, 0);
2000}
2001
2002/*
2003 * Random write performance by transfer size.
2004 */
2005static int mmc_test_random_write_perf(struct mmc_test_card *test)
2006{
2007	return mmc_test_random_perf(test, 1);
2008}
2009
2010static int mmc_test_seq_perf(struct mmc_test_card *test, int write,
2011			     unsigned int tot_sz, int max_scatter)
2012{
2013	struct mmc_test_area *t = &test->area;
2014	unsigned int dev_addr, i, cnt, sz, ssz;
2015	struct timespec ts1, ts2;
2016	int ret;
2017
2018	sz = t->max_tfr;
2019
2020	/*
2021	 * In the case of a maximally scattered transfer, the maximum transfer
2022	 * size is further limited by using PAGE_SIZE segments.
2023	 */
2024	if (max_scatter) {
2025		unsigned long max_tfr;
2026
2027		if (t->max_seg_sz >= PAGE_SIZE)
2028			max_tfr = t->max_segs * PAGE_SIZE;
2029		else
2030			max_tfr = t->max_segs * t->max_seg_sz;
2031		if (sz > max_tfr)
2032			sz = max_tfr;
2033	}
2034
2035	ssz = sz >> 9;
2036	dev_addr = mmc_test_capacity(test->card) / 4;
2037	if (tot_sz > dev_addr << 9)
2038		tot_sz = dev_addr << 9;
2039	cnt = tot_sz / sz;
2040	dev_addr &= 0xffff0000; /* Round to 64MiB boundary */
2041
2042	getnstimeofday(&ts1);
2043	for (i = 0; i < cnt; i++) {
2044		ret = mmc_test_area_io(test, sz, dev_addr, write,
2045				       max_scatter, 0);
2046		if (ret)
2047			return ret;
2048		dev_addr += ssz;
2049	}
2050	getnstimeofday(&ts2);
2051
2052	mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
2053
2054	return 0;
2055}
2056
2057static int mmc_test_large_seq_perf(struct mmc_test_card *test, int write)
2058{
2059	int ret, i;
2060
2061	for (i = 0; i < 10; i++) {
2062		ret = mmc_test_seq_perf(test, write, 10 * 1024 * 1024, 1);
2063		if (ret)
2064			return ret;
2065	}
2066	for (i = 0; i < 5; i++) {
2067		ret = mmc_test_seq_perf(test, write, 100 * 1024 * 1024, 1);
2068		if (ret)
2069			return ret;
2070	}
2071	for (i = 0; i < 3; i++) {
2072		ret = mmc_test_seq_perf(test, write, 1000 * 1024 * 1024, 1);
2073		if (ret)
2074			return ret;
2075	}
2076
2077	return ret;
2078}
2079
2080/*
2081 * Large sequential read performance.
2082 */
2083static int mmc_test_large_seq_read_perf(struct mmc_test_card *test)
2084{
2085	return mmc_test_large_seq_perf(test, 0);
2086}
2087
2088/*
2089 * Large sequential write performance.
2090 */
2091static int mmc_test_large_seq_write_perf(struct mmc_test_card *test)
2092{
2093	return mmc_test_large_seq_perf(test, 1);
2094}
2095
2096static int mmc_test_rw_multiple(struct mmc_test_card *test,
2097				struct mmc_test_multiple_rw *tdata,
2098				unsigned int reqsize, unsigned int size,
2099				int min_sg_len)
2100{
2101	unsigned int dev_addr;
2102	struct mmc_test_area *t = &test->area;
2103	int ret = 0;
2104
2105	/* Set up test area */
2106	if (size > mmc_test_capacity(test->card) / 2 * 512)
2107		size = mmc_test_capacity(test->card) / 2 * 512;
2108	if (reqsize > t->max_tfr)
2109		reqsize = t->max_tfr;
2110	dev_addr = mmc_test_capacity(test->card) / 4;
2111	if ((dev_addr & 0xffff0000))
2112		dev_addr &= 0xffff0000; /* Round to 64MiB boundary */
2113	else
2114		dev_addr &= 0xfffff800; /* Round to 1MiB boundary */
2115	if (!dev_addr)
2116		goto err;
2117
2118	if (reqsize > size)
2119		return 0;
2120
2121	/* prepare test area */
2122	if (mmc_can_erase(test->card) &&
2123	    tdata->prepare & MMC_TEST_PREP_ERASE) {
2124		ret = mmc_erase(test->card, dev_addr,
2125				size / 512, MMC_SECURE_ERASE_ARG);
2126		if (ret)
2127			ret = mmc_erase(test->card, dev_addr,
2128					size / 512, MMC_ERASE_ARG);
2129		if (ret)
2130			goto err;
2131	}
2132
2133	/* Run test */
2134	ret = mmc_test_area_io_seq(test, reqsize, dev_addr,
2135				   tdata->do_write, 0, 1, size / reqsize,
2136				   tdata->do_nonblock_req, min_sg_len);
2137	if (ret)
2138		goto err;
2139
2140	return ret;
2141 err:
2142	pr_info("[%s] error\n", __func__);
2143	return ret;
2144}
2145
2146static int mmc_test_rw_multiple_size(struct mmc_test_card *test,
2147				     struct mmc_test_multiple_rw *rw)
2148{
2149	int ret = 0;
2150	int i;
2151	void *pre_req = test->card->host->ops->pre_req;
2152	void *post_req = test->card->host->ops->post_req;
2153
2154	if (rw->do_nonblock_req &&
2155	    ((!pre_req && post_req) || (pre_req && !post_req))) {
2156		pr_info("error: only one of pre/post is defined\n");
2157		return -EINVAL;
2158	}
2159
2160	for (i = 0 ; i < rw->len && ret == 0; i++) {
2161		ret = mmc_test_rw_multiple(test, rw, rw->bs[i], rw->size, 0);
2162		if (ret)
2163			break;
2164	}
2165	return ret;
2166}
2167
2168static int mmc_test_rw_multiple_sg_len(struct mmc_test_card *test,
2169				       struct mmc_test_multiple_rw *rw)
2170{
2171	int ret = 0;
2172	int i;
2173
2174	for (i = 0 ; i < rw->len && ret == 0; i++) {
2175		ret = mmc_test_rw_multiple(test, rw, 512*1024, rw->size,
2176					   rw->sg_len[i]);
2177		if (ret)
2178			break;
2179	}
2180	return ret;
2181}
2182
2183/*
2184 * Multiple blocking write 4k to 4 MB chunks
2185 */
2186static int mmc_test_profile_mult_write_blocking_perf(struct mmc_test_card *test)
2187{
2188	unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
2189			     1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
2190	struct mmc_test_multiple_rw test_data = {
2191		.bs = bs,
2192		.size = TEST_AREA_MAX_SIZE,
2193		.len = ARRAY_SIZE(bs),
2194		.do_write = true,
2195		.do_nonblock_req = false,
2196		.prepare = MMC_TEST_PREP_ERASE,
2197	};
2198
2199	return mmc_test_rw_multiple_size(test, &test_data);
2200};
2201
2202/*
2203 * Multiple non-blocking write 4k to 4 MB chunks
2204 */
2205static int mmc_test_profile_mult_write_nonblock_perf(struct mmc_test_card *test)
2206{
2207	unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
2208			     1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
2209	struct mmc_test_multiple_rw test_data = {
2210		.bs = bs,
2211		.size = TEST_AREA_MAX_SIZE,
2212		.len = ARRAY_SIZE(bs),
2213		.do_write = true,
2214		.do_nonblock_req = true,
2215		.prepare = MMC_TEST_PREP_ERASE,
2216	};
2217
2218	return mmc_test_rw_multiple_size(test, &test_data);
2219}
2220
2221/*
2222 * Multiple blocking read 4k to 4 MB chunks
2223 */
2224static int mmc_test_profile_mult_read_blocking_perf(struct mmc_test_card *test)
2225{
2226	unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
2227			     1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
2228	struct mmc_test_multiple_rw test_data = {
2229		.bs = bs,
2230		.size = TEST_AREA_MAX_SIZE,
2231		.len = ARRAY_SIZE(bs),
2232		.do_write = false,
2233		.do_nonblock_req = false,
2234		.prepare = MMC_TEST_PREP_NONE,
2235	};
2236
2237	return mmc_test_rw_multiple_size(test, &test_data);
2238}
2239
2240/*
2241 * Multiple non-blocking read 4k to 4 MB chunks
2242 */
2243static int mmc_test_profile_mult_read_nonblock_perf(struct mmc_test_card *test)
2244{
2245	unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
2246			     1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
2247	struct mmc_test_multiple_rw test_data = {
2248		.bs = bs,
2249		.size = TEST_AREA_MAX_SIZE,
2250		.len = ARRAY_SIZE(bs),
2251		.do_write = false,
2252		.do_nonblock_req = true,
2253		.prepare = MMC_TEST_PREP_NONE,
2254	};
2255
2256	return mmc_test_rw_multiple_size(test, &test_data);
2257}
2258
2259/*
2260 * Multiple blocking write 1 to 512 sg elements
2261 */
2262static int mmc_test_profile_sglen_wr_blocking_perf(struct mmc_test_card *test)
2263{
2264	unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
2265				 1 << 7, 1 << 8, 1 << 9};
2266	struct mmc_test_multiple_rw test_data = {
2267		.sg_len = sg_len,
2268		.size = TEST_AREA_MAX_SIZE,
2269		.len = ARRAY_SIZE(sg_len),
2270		.do_write = true,
2271		.do_nonblock_req = false,
2272		.prepare = MMC_TEST_PREP_ERASE,
2273	};
2274
2275	return mmc_test_rw_multiple_sg_len(test, &test_data);
2276};
2277
2278/*
2279 * Multiple non-blocking write 1 to 512 sg elements
2280 */
2281static int mmc_test_profile_sglen_wr_nonblock_perf(struct mmc_test_card *test)
2282{
2283	unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
2284				 1 << 7, 1 << 8, 1 << 9};
2285	struct mmc_test_multiple_rw test_data = {
2286		.sg_len = sg_len,
2287		.size = TEST_AREA_MAX_SIZE,
2288		.len = ARRAY_SIZE(sg_len),
2289		.do_write = true,
2290		.do_nonblock_req = true,
2291		.prepare = MMC_TEST_PREP_ERASE,
2292	};
2293
2294	return mmc_test_rw_multiple_sg_len(test, &test_data);
2295}
2296
2297/*
2298 * Multiple blocking read 1 to 512 sg elements
2299 */
2300static int mmc_test_profile_sglen_r_blocking_perf(struct mmc_test_card *test)
2301{
2302	unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
2303				 1 << 7, 1 << 8, 1 << 9};
2304	struct mmc_test_multiple_rw test_data = {
2305		.sg_len = sg_len,
2306		.size = TEST_AREA_MAX_SIZE,
2307		.len = ARRAY_SIZE(sg_len),
2308		.do_write = false,
2309		.do_nonblock_req = false,
2310		.prepare = MMC_TEST_PREP_NONE,
2311	};
2312
2313	return mmc_test_rw_multiple_sg_len(test, &test_data);
2314}
2315
2316/*
2317 * Multiple non-blocking read 1 to 512 sg elements
2318 */
2319static int mmc_test_profile_sglen_r_nonblock_perf(struct mmc_test_card *test)
2320{
2321	unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
2322				 1 << 7, 1 << 8, 1 << 9};
2323	struct mmc_test_multiple_rw test_data = {
2324		.sg_len = sg_len,
2325		.size = TEST_AREA_MAX_SIZE,
2326		.len = ARRAY_SIZE(sg_len),
2327		.do_write = false,
2328		.do_nonblock_req = true,
2329		.prepare = MMC_TEST_PREP_NONE,
2330	};
2331
2332	return mmc_test_rw_multiple_sg_len(test, &test_data);
2333}
2334
2335/*
2336 * eMMC hardware reset.
2337 */
2338static int mmc_test_hw_reset(struct mmc_test_card *test)
2339{
2340	struct mmc_card *card = test->card;
2341	struct mmc_host *host = card->host;
2342	int err;
2343
2344	if (!mmc_card_mmc(card) || !mmc_can_reset(card))
2345		return RESULT_UNSUP_CARD;
2346
2347	err = mmc_hw_reset(host);
2348	if (!err)
2349		return RESULT_OK;
2350	else if (err == -EOPNOTSUPP)
2351		return RESULT_UNSUP_HOST;
2352
2353	return RESULT_FAIL;
2354}
2355
2356static const struct mmc_test_case mmc_test_cases[] = {
2357	{
2358		.name = "Basic write (no data verification)",
2359		.run = mmc_test_basic_write,
2360	},
2361
2362	{
2363		.name = "Basic read (no data verification)",
2364		.run = mmc_test_basic_read,
2365	},
2366
2367	{
2368		.name = "Basic write (with data verification)",
2369		.prepare = mmc_test_prepare_write,
2370		.run = mmc_test_verify_write,
2371		.cleanup = mmc_test_cleanup,
2372	},
2373
2374	{
2375		.name = "Basic read (with data verification)",
2376		.prepare = mmc_test_prepare_read,
2377		.run = mmc_test_verify_read,
2378		.cleanup = mmc_test_cleanup,
2379	},
2380
2381	{
2382		.name = "Multi-block write",
2383		.prepare = mmc_test_prepare_write,
2384		.run = mmc_test_multi_write,
2385		.cleanup = mmc_test_cleanup,
2386	},
2387
2388	{
2389		.name = "Multi-block read",
2390		.prepare = mmc_test_prepare_read,
2391		.run = mmc_test_multi_read,
2392		.cleanup = mmc_test_cleanup,
2393	},
2394
2395	{
2396		.name = "Power of two block writes",
2397		.prepare = mmc_test_prepare_write,
2398		.run = mmc_test_pow2_write,
2399		.cleanup = mmc_test_cleanup,
2400	},
2401
2402	{
2403		.name = "Power of two block reads",
2404		.prepare = mmc_test_prepare_read,
2405		.run = mmc_test_pow2_read,
2406		.cleanup = mmc_test_cleanup,
2407	},
2408
2409	{
2410		.name = "Weird sized block writes",
2411		.prepare = mmc_test_prepare_write,
2412		.run = mmc_test_weird_write,
2413		.cleanup = mmc_test_cleanup,
2414	},
2415
2416	{
2417		.name = "Weird sized block reads",
2418		.prepare = mmc_test_prepare_read,
2419		.run = mmc_test_weird_read,
2420		.cleanup = mmc_test_cleanup,
2421	},
2422
2423	{
2424		.name = "Badly aligned write",
2425		.prepare = mmc_test_prepare_write,
2426		.run = mmc_test_align_write,
2427		.cleanup = mmc_test_cleanup,
2428	},
2429
2430	{
2431		.name = "Badly aligned read",
2432		.prepare = mmc_test_prepare_read,
2433		.run = mmc_test_align_read,
2434		.cleanup = mmc_test_cleanup,
2435	},
2436
2437	{
2438		.name = "Badly aligned multi-block write",
2439		.prepare = mmc_test_prepare_write,
2440		.run = mmc_test_align_multi_write,
2441		.cleanup = mmc_test_cleanup,
2442	},
2443
2444	{
2445		.name = "Badly aligned multi-block read",
2446		.prepare = mmc_test_prepare_read,
2447		.run = mmc_test_align_multi_read,
2448		.cleanup = mmc_test_cleanup,
2449	},
2450
2451	{
2452		.name = "Correct xfer_size at write (start failure)",
2453		.run = mmc_test_xfersize_write,
2454	},
2455
2456	{
2457		.name = "Correct xfer_size at read (start failure)",
2458		.run = mmc_test_xfersize_read,
2459	},
2460
2461	{
2462		.name = "Correct xfer_size at write (midway failure)",
2463		.run = mmc_test_multi_xfersize_write,
2464	},
2465
2466	{
2467		.name = "Correct xfer_size at read (midway failure)",
2468		.run = mmc_test_multi_xfersize_read,
2469	},
2470
2471#ifdef CONFIG_HIGHMEM
2472
2473	{
2474		.name = "Highmem write",
2475		.prepare = mmc_test_prepare_write,
2476		.run = mmc_test_write_high,
2477		.cleanup = mmc_test_cleanup,
2478	},
2479
2480	{
2481		.name = "Highmem read",
2482		.prepare = mmc_test_prepare_read,
2483		.run = mmc_test_read_high,
2484		.cleanup = mmc_test_cleanup,
2485	},
2486
2487	{
2488		.name = "Multi-block highmem write",
2489		.prepare = mmc_test_prepare_write,
2490		.run = mmc_test_multi_write_high,
2491		.cleanup = mmc_test_cleanup,
2492	},
2493
2494	{
2495		.name = "Multi-block highmem read",
2496		.prepare = mmc_test_prepare_read,
2497		.run = mmc_test_multi_read_high,
2498		.cleanup = mmc_test_cleanup,
2499	},
2500
2501#else
2502
2503	{
2504		.name = "Highmem write",
2505		.run = mmc_test_no_highmem,
2506	},
2507
2508	{
2509		.name = "Highmem read",
2510		.run = mmc_test_no_highmem,
2511	},
2512
2513	{
2514		.name = "Multi-block highmem write",
2515		.run = mmc_test_no_highmem,
2516	},
2517
2518	{
2519		.name = "Multi-block highmem read",
2520		.run = mmc_test_no_highmem,
2521	},
2522
2523#endif /* CONFIG_HIGHMEM */
2524
2525	{
2526		.name = "Best-case read performance",
2527		.prepare = mmc_test_area_prepare_fill,
2528		.run = mmc_test_best_read_performance,
2529		.cleanup = mmc_test_area_cleanup,
2530	},
2531
2532	{
2533		.name = "Best-case write performance",
2534		.prepare = mmc_test_area_prepare_erase,
2535		.run = mmc_test_best_write_performance,
2536		.cleanup = mmc_test_area_cleanup,
2537	},
2538
2539	{
2540		.name = "Best-case read performance into scattered pages",
2541		.prepare = mmc_test_area_prepare_fill,
2542		.run = mmc_test_best_read_perf_max_scatter,
2543		.cleanup = mmc_test_area_cleanup,
2544	},
2545
2546	{
2547		.name = "Best-case write performance from scattered pages",
2548		.prepare = mmc_test_area_prepare_erase,
2549		.run = mmc_test_best_write_perf_max_scatter,
2550		.cleanup = mmc_test_area_cleanup,
2551	},
2552
2553	{
2554		.name = "Single read performance by transfer size",
2555		.prepare = mmc_test_area_prepare_fill,
2556		.run = mmc_test_profile_read_perf,
2557		.cleanup = mmc_test_area_cleanup,
2558	},
2559
2560	{
2561		.name = "Single write performance by transfer size",
2562		.prepare = mmc_test_area_prepare,
2563		.run = mmc_test_profile_write_perf,
2564		.cleanup = mmc_test_area_cleanup,
2565	},
2566
2567	{
2568		.name = "Single trim performance by transfer size",
2569		.prepare = mmc_test_area_prepare_fill,
2570		.run = mmc_test_profile_trim_perf,
2571		.cleanup = mmc_test_area_cleanup,
2572	},
2573
2574	{
2575		.name = "Consecutive read performance by transfer size",
2576		.prepare = mmc_test_area_prepare_fill,
2577		.run = mmc_test_profile_seq_read_perf,
2578		.cleanup = mmc_test_area_cleanup,
2579	},
2580
2581	{
2582		.name = "Consecutive write performance by transfer size",
2583		.prepare = mmc_test_area_prepare,
2584		.run = mmc_test_profile_seq_write_perf,
2585		.cleanup = mmc_test_area_cleanup,
2586	},
2587
2588	{
2589		.name = "Consecutive trim performance by transfer size",
2590		.prepare = mmc_test_area_prepare,
2591		.run = mmc_test_profile_seq_trim_perf,
2592		.cleanup = mmc_test_area_cleanup,
2593	},
2594
2595	{
2596		.name = "Random read performance by transfer size",
2597		.prepare = mmc_test_area_prepare,
2598		.run = mmc_test_random_read_perf,
2599		.cleanup = mmc_test_area_cleanup,
2600	},
2601
2602	{
2603		.name = "Random write performance by transfer size",
2604		.prepare = mmc_test_area_prepare,
2605		.run = mmc_test_random_write_perf,
2606		.cleanup = mmc_test_area_cleanup,
2607	},
2608
2609	{
2610		.name = "Large sequential read into scattered pages",
2611		.prepare = mmc_test_area_prepare,
2612		.run = mmc_test_large_seq_read_perf,
2613		.cleanup = mmc_test_area_cleanup,
2614	},
2615
2616	{
2617		.name = "Large sequential write from scattered pages",
2618		.prepare = mmc_test_area_prepare,
2619		.run = mmc_test_large_seq_write_perf,
2620		.cleanup = mmc_test_area_cleanup,
2621	},
2622
2623	{
2624		.name = "Write performance with blocking req 4k to 4MB",
2625		.prepare = mmc_test_area_prepare,
2626		.run = mmc_test_profile_mult_write_blocking_perf,
2627		.cleanup = mmc_test_area_cleanup,
2628	},
2629
2630	{
2631		.name = "Write performance with non-blocking req 4k to 4MB",
2632		.prepare = mmc_test_area_prepare,
2633		.run = mmc_test_profile_mult_write_nonblock_perf,
2634		.cleanup = mmc_test_area_cleanup,
2635	},
2636
2637	{
2638		.name = "Read performance with blocking req 4k to 4MB",
2639		.prepare = mmc_test_area_prepare,
2640		.run = mmc_test_profile_mult_read_blocking_perf,
2641		.cleanup = mmc_test_area_cleanup,
2642	},
2643
2644	{
2645		.name = "Read performance with non-blocking req 4k to 4MB",
2646		.prepare = mmc_test_area_prepare,
2647		.run = mmc_test_profile_mult_read_nonblock_perf,
2648		.cleanup = mmc_test_area_cleanup,
2649	},
2650
2651	{
2652		.name = "Write performance blocking req 1 to 512 sg elems",
2653		.prepare = mmc_test_area_prepare,
2654		.run = mmc_test_profile_sglen_wr_blocking_perf,
2655		.cleanup = mmc_test_area_cleanup,
2656	},
2657
2658	{
2659		.name = "Write performance non-blocking req 1 to 512 sg elems",
2660		.prepare = mmc_test_area_prepare,
2661		.run = mmc_test_profile_sglen_wr_nonblock_perf,
2662		.cleanup = mmc_test_area_cleanup,
2663	},
2664
2665	{
2666		.name = "Read performance blocking req 1 to 512 sg elems",
2667		.prepare = mmc_test_area_prepare,
2668		.run = mmc_test_profile_sglen_r_blocking_perf,
2669		.cleanup = mmc_test_area_cleanup,
2670	},
2671
2672	{
2673		.name = "Read performance non-blocking req 1 to 512 sg elems",
2674		.prepare = mmc_test_area_prepare,
2675		.run = mmc_test_profile_sglen_r_nonblock_perf,
2676		.cleanup = mmc_test_area_cleanup,
2677	},
2678
2679	{
2680		.name = "eMMC hardware reset",
2681		.run = mmc_test_hw_reset,
2682	},
2683};
2684
2685static DEFINE_MUTEX(mmc_test_lock);
2686
2687static LIST_HEAD(mmc_test_result);
2688
2689static void mmc_test_run(struct mmc_test_card *test, int testcase)
2690{
2691	int i, ret;
2692
2693	pr_info("%s: Starting tests of card %s...\n",
2694		mmc_hostname(test->card->host), mmc_card_id(test->card));
2695
2696	mmc_claim_host(test->card->host);
2697
2698	for (i = 0;i < ARRAY_SIZE(mmc_test_cases);i++) {
2699		struct mmc_test_general_result *gr;
2700
2701		if (testcase && ((i + 1) != testcase))
2702			continue;
2703
2704		pr_info("%s: Test case %d. %s...\n",
2705			mmc_hostname(test->card->host), i + 1,
2706			mmc_test_cases[i].name);
2707
2708		if (mmc_test_cases[i].prepare) {
2709			ret = mmc_test_cases[i].prepare(test);
2710			if (ret) {
2711				pr_info("%s: Result: Prepare "
2712					"stage failed! (%d)\n",
2713					mmc_hostname(test->card->host),
2714					ret);
2715				continue;
2716			}
2717		}
2718
2719		gr = kzalloc(sizeof(struct mmc_test_general_result),
2720			GFP_KERNEL);
2721		if (gr) {
2722			INIT_LIST_HEAD(&gr->tr_lst);
2723
2724			/* Assign data what we know already */
2725			gr->card = test->card;
2726			gr->testcase = i;
2727
2728			/* Append container to global one */
2729			list_add_tail(&gr->link, &mmc_test_result);
2730
2731			/*
2732			 * Save the pointer to created container in our private
2733			 * structure.
2734			 */
2735			test->gr = gr;
2736		}
2737
2738		ret = mmc_test_cases[i].run(test);
2739		switch (ret) {
2740		case RESULT_OK:
2741			pr_info("%s: Result: OK\n",
2742				mmc_hostname(test->card->host));
2743			break;
2744		case RESULT_FAIL:
2745			pr_info("%s: Result: FAILED\n",
2746				mmc_hostname(test->card->host));
2747			break;
2748		case RESULT_UNSUP_HOST:
2749			pr_info("%s: Result: UNSUPPORTED "
2750				"(by host)\n",
2751				mmc_hostname(test->card->host));
2752			break;
2753		case RESULT_UNSUP_CARD:
2754			pr_info("%s: Result: UNSUPPORTED "
2755				"(by card)\n",
2756				mmc_hostname(test->card->host));
2757			break;
2758		default:
2759			pr_info("%s: Result: ERROR (%d)\n",
2760				mmc_hostname(test->card->host), ret);
2761		}
2762
2763		/* Save the result */
2764		if (gr)
2765			gr->result = ret;
2766
2767		if (mmc_test_cases[i].cleanup) {
2768			ret = mmc_test_cases[i].cleanup(test);
2769			if (ret) {
2770				pr_info("%s: Warning: Cleanup "
2771					"stage failed! (%d)\n",
2772					mmc_hostname(test->card->host),
2773					ret);
2774			}
2775		}
2776	}
2777
2778	mmc_release_host(test->card->host);
2779
2780	pr_info("%s: Tests completed.\n",
2781		mmc_hostname(test->card->host));
2782}
2783
2784static void mmc_test_free_result(struct mmc_card *card)
2785{
2786	struct mmc_test_general_result *gr, *grs;
2787
2788	mutex_lock(&mmc_test_lock);
2789
2790	list_for_each_entry_safe(gr, grs, &mmc_test_result, link) {
2791		struct mmc_test_transfer_result *tr, *trs;
2792
2793		if (card && gr->card != card)
2794			continue;
2795
2796		list_for_each_entry_safe(tr, trs, &gr->tr_lst, link) {
2797			list_del(&tr->link);
2798			kfree(tr);
2799		}
2800
2801		list_del(&gr->link);
2802		kfree(gr);
2803	}
2804
2805	mutex_unlock(&mmc_test_lock);
2806}
2807
2808static LIST_HEAD(mmc_test_file_test);
2809
2810static int mtf_test_show(struct seq_file *sf, void *data)
2811{
2812	struct mmc_card *card = (struct mmc_card *)sf->private;
2813	struct mmc_test_general_result *gr;
2814
2815	mutex_lock(&mmc_test_lock);
2816
2817	list_for_each_entry(gr, &mmc_test_result, link) {
2818		struct mmc_test_transfer_result *tr;
2819
2820		if (gr->card != card)
2821			continue;
2822
2823		seq_printf(sf, "Test %d: %d\n", gr->testcase + 1, gr->result);
2824
2825		list_for_each_entry(tr, &gr->tr_lst, link) {
2826			seq_printf(sf, "%u %d %lu.%09lu %u %u.%02u\n",
2827				tr->count, tr->sectors,
2828				(unsigned long)tr->ts.tv_sec,
2829				(unsigned long)tr->ts.tv_nsec,
2830				tr->rate, tr->iops / 100, tr->iops % 100);
2831		}
2832	}
2833
2834	mutex_unlock(&mmc_test_lock);
2835
2836	return 0;
2837}
2838
2839static int mtf_test_open(struct inode *inode, struct file *file)
2840{
2841	return single_open(file, mtf_test_show, inode->i_private);
2842}
2843
2844static ssize_t mtf_test_write(struct file *file, const char __user *buf,
2845	size_t count, loff_t *pos)
2846{
2847	struct seq_file *sf = (struct seq_file *)file->private_data;
2848	struct mmc_card *card = (struct mmc_card *)sf->private;
2849	struct mmc_test_card *test;
2850	long testcase;
2851	int ret;
2852
2853	ret = kstrtol_from_user(buf, count, 10, &testcase);
2854	if (ret)
2855		return ret;
2856
2857	test = kzalloc(sizeof(struct mmc_test_card), GFP_KERNEL);
2858	if (!test)
2859		return -ENOMEM;
2860
2861	/*
2862	 * Remove all test cases associated with given card. Thus we have only
2863	 * actual data of the last run.
2864	 */
2865	mmc_test_free_result(card);
2866
2867	test->card = card;
2868
2869	test->buffer = kzalloc(BUFFER_SIZE, GFP_KERNEL);
2870#ifdef CONFIG_HIGHMEM
2871	test->highmem = alloc_pages(GFP_KERNEL | __GFP_HIGHMEM, BUFFER_ORDER);
2872#endif
2873
2874#ifdef CONFIG_HIGHMEM
2875	if (test->buffer && test->highmem) {
2876#else
2877	if (test->buffer) {
2878#endif
2879		mutex_lock(&mmc_test_lock);
2880		mmc_test_run(test, testcase);
2881		mutex_unlock(&mmc_test_lock);
2882	}
2883
2884#ifdef CONFIG_HIGHMEM
2885	__free_pages(test->highmem, BUFFER_ORDER);
2886#endif
2887	kfree(test->buffer);
2888	kfree(test);
2889
2890	return count;
2891}
2892
2893static const struct file_operations mmc_test_fops_test = {
2894	.open		= mtf_test_open,
2895	.read		= seq_read,
2896	.write		= mtf_test_write,
2897	.llseek		= seq_lseek,
2898	.release	= single_release,
2899};
2900
2901static int mtf_testlist_show(struct seq_file *sf, void *data)
2902{
2903	int i;
2904
2905	mutex_lock(&mmc_test_lock);
2906
2907	for (i = 0; i < ARRAY_SIZE(mmc_test_cases); i++)
2908		seq_printf(sf, "%d:\t%s\n", i+1, mmc_test_cases[i].name);
2909
2910	mutex_unlock(&mmc_test_lock);
2911
2912	return 0;
2913}
2914
2915static int mtf_testlist_open(struct inode *inode, struct file *file)
2916{
2917	return single_open(file, mtf_testlist_show, inode->i_private);
2918}
2919
2920static const struct file_operations mmc_test_fops_testlist = {
2921	.open		= mtf_testlist_open,
2922	.read		= seq_read,
2923	.llseek		= seq_lseek,
2924	.release	= single_release,
2925};
2926
2927static void mmc_test_free_dbgfs_file(struct mmc_card *card)
2928{
2929	struct mmc_test_dbgfs_file *df, *dfs;
2930
2931	mutex_lock(&mmc_test_lock);
2932
2933	list_for_each_entry_safe(df, dfs, &mmc_test_file_test, link) {
2934		if (card && df->card != card)
2935			continue;
2936		debugfs_remove(df->file);
2937		list_del(&df->link);
2938		kfree(df);
2939	}
2940
2941	mutex_unlock(&mmc_test_lock);
2942}
2943
2944static int __mmc_test_register_dbgfs_file(struct mmc_card *card,
2945	const char *name, umode_t mode, const struct file_operations *fops)
2946{
2947	struct dentry *file = NULL;
2948	struct mmc_test_dbgfs_file *df;
2949
2950	if (card->debugfs_root)
2951		file = debugfs_create_file(name, mode, card->debugfs_root,
2952			card, fops);
2953
2954	if (IS_ERR_OR_NULL(file)) {
2955		dev_err(&card->dev,
2956			"Can't create %s. Perhaps debugfs is disabled.\n",
2957			name);
2958		return -ENODEV;
2959	}
2960
2961	df = kmalloc(sizeof(struct mmc_test_dbgfs_file), GFP_KERNEL);
2962	if (!df) {
2963		debugfs_remove(file);
2964		dev_err(&card->dev,
2965			"Can't allocate memory for internal usage.\n");
2966		return -ENOMEM;
2967	}
2968
2969	df->card = card;
2970	df->file = file;
2971
2972	list_add(&df->link, &mmc_test_file_test);
2973	return 0;
2974}
2975
2976static int mmc_test_register_dbgfs_file(struct mmc_card *card)
2977{
2978	int ret;
2979
2980	mutex_lock(&mmc_test_lock);
2981
2982	ret = __mmc_test_register_dbgfs_file(card, "test", S_IWUSR | S_IRUGO,
2983		&mmc_test_fops_test);
2984	if (ret)
2985		goto err;
2986
2987	ret = __mmc_test_register_dbgfs_file(card, "testlist", S_IRUGO,
2988		&mmc_test_fops_testlist);
2989	if (ret)
2990		goto err;
2991
2992err:
2993	mutex_unlock(&mmc_test_lock);
2994
2995	return ret;
2996}
2997
2998static int mmc_test_probe(struct mmc_card *card)
2999{
3000	int ret;
3001
3002	if (!mmc_card_mmc(card) && !mmc_card_sd(card))
3003		return -ENODEV;
3004
3005	ret = mmc_test_register_dbgfs_file(card);
3006	if (ret)
3007		return ret;
3008
3009	dev_info(&card->dev, "Card claimed for testing.\n");
3010
3011	return 0;
3012}
3013
3014static void mmc_test_remove(struct mmc_card *card)
3015{
3016	mmc_test_free_result(card);
3017	mmc_test_free_dbgfs_file(card);
3018}
3019
3020static void mmc_test_shutdown(struct mmc_card *card)
3021{
3022}
3023
3024static struct mmc_driver mmc_driver = {
3025	.drv		= {
3026		.name	= "mmc_test",
3027	},
3028	.probe		= mmc_test_probe,
3029	.remove		= mmc_test_remove,
3030	.shutdown	= mmc_test_shutdown,
3031};
3032
3033static int __init mmc_test_init(void)
3034{
3035	return mmc_register_driver(&mmc_driver);
3036}
3037
3038static void __exit mmc_test_exit(void)
3039{
3040	/* Clear stalled data if card is still plugged */
3041	mmc_test_free_result(NULL);
3042	mmc_test_free_dbgfs_file(NULL);
3043
3044	mmc_unregister_driver(&mmc_driver);
3045}
3046
3047module_init(mmc_test_init);
3048module_exit(mmc_test_exit);
3049
3050MODULE_LICENSE("GPL");
3051MODULE_DESCRIPTION("Multimedia Card (MMC) host test driver");
3052MODULE_AUTHOR("Pierre Ossman");
3053