1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Copyright 2007-2008 Pierre Ossman
4 */
5
6 #include <linux/mmc/core.h>
7 #include <linux/mmc/card.h>
8 #include <linux/mmc/host.h>
9 #include <linux/mmc/mmc.h>
10 #include <linux/slab.h>
11
12 #include <linux/scatterlist.h>
13 #include <linux/swap.h> /* For nr_free_buffer_pages() */
14 #include <linux/list.h>
15
16 #include <linux/debugfs.h>
17 #include <linux/uaccess.h>
18 #include <linux/seq_file.h>
19 #include <linux/module.h>
20
21 #include "core.h"
22 #include "card.h"
23 #include "host.h"
24 #include "bus.h"
25 #include "mmc_ops.h"
26
27 #define RESULT_OK 0
28 #define RESULT_FAIL 1
29 #define RESULT_UNSUP_HOST 2
30 #define RESULT_UNSUP_CARD 3
31
32 #define BUFFER_ORDER 2
33 #define BUFFER_SIZE (PAGE_SIZE << BUFFER_ORDER)
34
35 #define TEST_ALIGN_END 8
36
37 /*
38 * Limit the test area size to the maximum MMC HC erase group size. Note that
39 * the maximum SD allocation unit size is just 4MiB.
40 */
41 #define TEST_AREA_MAX_SIZE (128 * 1024 * 1024)
42
43 /**
44 * struct mmc_test_pages - pages allocated by 'alloc_pages()'.
45 * @page: first page in the allocation
46 * @order: order of the number of pages allocated
47 */
48 struct mmc_test_pages {
49 struct page *page;
50 unsigned int order;
51 };
52
53 /**
54 * struct mmc_test_mem - allocated memory.
55 * @arr: array of allocations
56 * @cnt: number of allocations
57 */
58 struct mmc_test_mem {
59 struct mmc_test_pages *arr;
60 unsigned int cnt;
61 };
62
63 /**
64 * struct mmc_test_area - information for performance tests.
65 * @max_sz: test area size (in bytes)
66 * @dev_addr: address on card at which to do performance tests
67 * @max_tfr: maximum transfer size allowed by driver (in bytes)
68 * @max_segs: maximum segments allowed by driver in scatterlist @sg
69 * @max_seg_sz: maximum segment size allowed by driver
70 * @blocks: number of (512 byte) blocks currently mapped by @sg
71 * @sg_len: length of currently mapped scatterlist @sg
72 * @mem: allocated memory
73 * @sg: scatterlist
74 * @sg_areq: scatterlist for non-blocking request
75 */
76 struct mmc_test_area {
77 unsigned long max_sz;
78 unsigned int dev_addr;
79 unsigned int max_tfr;
80 unsigned int max_segs;
81 unsigned int max_seg_sz;
82 unsigned int blocks;
83 unsigned int sg_len;
84 struct mmc_test_mem *mem;
85 struct scatterlist *sg;
86 struct scatterlist *sg_areq;
87 };
88
89 /**
90 * struct mmc_test_transfer_result - transfer results for performance tests.
91 * @link: double-linked list
92 * @count: amount of group of sectors to check
93 * @sectors: amount of sectors to check in one group
94 * @ts: time values of transfer
95 * @rate: calculated transfer rate
96 * @iops: I/O operations per second (times 100)
97 */
98 struct mmc_test_transfer_result {
99 struct list_head link;
100 unsigned int count;
101 unsigned int sectors;
102 struct timespec64 ts;
103 unsigned int rate;
104 unsigned int iops;
105 };
106
107 /**
108 * struct mmc_test_general_result - results for tests.
109 * @link: double-linked list
110 * @card: card under test
111 * @testcase: number of test case
112 * @result: result of test run
113 * @tr_lst: transfer measurements if any as mmc_test_transfer_result
114 */
115 struct mmc_test_general_result {
116 struct list_head link;
117 struct mmc_card *card;
118 int testcase;
119 int result;
120 struct list_head tr_lst;
121 };
122
123 /**
124 * struct mmc_test_dbgfs_file - debugfs related file.
125 * @link: double-linked list
126 * @card: card under test
127 * @file: file created under debugfs
128 */
129 struct mmc_test_dbgfs_file {
130 struct list_head link;
131 struct mmc_card *card;
132 struct dentry *file;
133 };
134
135 /**
136 * struct mmc_test_card - test information.
137 * @card: card under test
138 * @scratch: transfer buffer
139 * @buffer: transfer buffer
140 * @highmem: buffer for highmem tests
141 * @area: information for performance tests
142 * @gr: pointer to results of current testcase
143 */
144 struct mmc_test_card {
145 struct mmc_card *card;
146
147 u8 scratch[BUFFER_SIZE];
148 u8 *buffer;
149 #ifdef CONFIG_HIGHMEM
150 struct page *highmem;
151 #endif
152 struct mmc_test_area area;
153 struct mmc_test_general_result *gr;
154 };
155
156 enum mmc_test_prep_media {
157 MMC_TEST_PREP_NONE = 0,
158 MMC_TEST_PREP_WRITE_FULL = 1 << 0,
159 MMC_TEST_PREP_ERASE = 1 << 1,
160 };
161
162 struct mmc_test_multiple_rw {
163 unsigned int *sg_len;
164 unsigned int *bs;
165 unsigned int len;
166 unsigned int size;
167 bool do_write;
168 bool do_nonblock_req;
169 enum mmc_test_prep_media prepare;
170 };
171
172 /*******************************************************************/
173 /* General helper functions */
174 /*******************************************************************/
175
176 /*
177 * Configure correct block size in card
178 */
mmc_test_set_blksize(struct mmc_test_card * test,unsigned size)179 static int mmc_test_set_blksize(struct mmc_test_card *test, unsigned size)
180 {
181 return mmc_set_blocklen(test->card, size);
182 }
183
mmc_test_card_cmd23(struct mmc_card * card)184 static bool mmc_test_card_cmd23(struct mmc_card *card)
185 {
186 return mmc_card_mmc(card) ||
187 (mmc_card_sd(card) && card->scr.cmds & SD_SCR_CMD23_SUPPORT);
188 }
189
mmc_test_prepare_sbc(struct mmc_test_card * test,struct mmc_request * mrq,unsigned int blocks)190 static void mmc_test_prepare_sbc(struct mmc_test_card *test,
191 struct mmc_request *mrq, unsigned int blocks)
192 {
193 struct mmc_card *card = test->card;
194
195 if (!mrq->sbc || !mmc_host_cmd23(card->host) ||
196 !mmc_test_card_cmd23(card) || !mmc_op_multi(mrq->cmd->opcode) ||
197 (card->quirks & MMC_QUIRK_BLK_NO_CMD23)) {
198 mrq->sbc = NULL;
199 return;
200 }
201
202 mrq->sbc->opcode = MMC_SET_BLOCK_COUNT;
203 mrq->sbc->arg = blocks;
204 mrq->sbc->flags = MMC_RSP_R1 | MMC_CMD_AC;
205 }
206
207 /*
208 * Fill in the mmc_request structure given a set of transfer parameters.
209 */
mmc_test_prepare_mrq(struct mmc_test_card * test,struct mmc_request * mrq,struct scatterlist * sg,unsigned sg_len,unsigned dev_addr,unsigned blocks,unsigned blksz,int write)210 static void mmc_test_prepare_mrq(struct mmc_test_card *test,
211 struct mmc_request *mrq, struct scatterlist *sg, unsigned sg_len,
212 unsigned dev_addr, unsigned blocks, unsigned blksz, int write)
213 {
214 if (WARN_ON(!mrq || !mrq->cmd || !mrq->data || !mrq->stop))
215 return;
216
217 if (blocks > 1) {
218 mrq->cmd->opcode = write ?
219 MMC_WRITE_MULTIPLE_BLOCK : MMC_READ_MULTIPLE_BLOCK;
220 } else {
221 mrq->cmd->opcode = write ?
222 MMC_WRITE_BLOCK : MMC_READ_SINGLE_BLOCK;
223 }
224
225 mrq->cmd->arg = dev_addr;
226 if (!mmc_card_blockaddr(test->card))
227 mrq->cmd->arg <<= 9;
228
229 mrq->cmd->flags = MMC_RSP_R1 | MMC_CMD_ADTC;
230
231 if (blocks == 1)
232 mrq->stop = NULL;
233 else {
234 mrq->stop->opcode = MMC_STOP_TRANSMISSION;
235 mrq->stop->arg = 0;
236 mrq->stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
237 }
238
239 mrq->data->blksz = blksz;
240 mrq->data->blocks = blocks;
241 mrq->data->flags = write ? MMC_DATA_WRITE : MMC_DATA_READ;
242 mrq->data->sg = sg;
243 mrq->data->sg_len = sg_len;
244
245 mmc_test_prepare_sbc(test, mrq, blocks);
246
247 mmc_set_data_timeout(mrq->data, test->card);
248 }
249
mmc_test_busy(struct mmc_command * cmd)250 static int mmc_test_busy(struct mmc_command *cmd)
251 {
252 return !(cmd->resp[0] & R1_READY_FOR_DATA) ||
253 (R1_CURRENT_STATE(cmd->resp[0]) == R1_STATE_PRG);
254 }
255
256 /*
257 * Wait for the card to finish the busy state
258 */
mmc_test_wait_busy(struct mmc_test_card * test)259 static int mmc_test_wait_busy(struct mmc_test_card *test)
260 {
261 int ret, busy;
262 struct mmc_command cmd = {};
263
264 busy = 0;
265 do {
266 memset(&cmd, 0, sizeof(struct mmc_command));
267
268 cmd.opcode = MMC_SEND_STATUS;
269 cmd.arg = test->card->rca << 16;
270 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
271
272 ret = mmc_wait_for_cmd(test->card->host, &cmd, 0);
273 if (ret)
274 break;
275
276 if (!busy && mmc_test_busy(&cmd)) {
277 busy = 1;
278 if (test->card->host->caps & MMC_CAP_WAIT_WHILE_BUSY)
279 pr_info("%s: Warning: Host did not wait for busy state to end.\n",
280 mmc_hostname(test->card->host));
281 }
282 } while (mmc_test_busy(&cmd));
283
284 return ret;
285 }
286
287 /*
288 * Transfer a single sector of kernel addressable data
289 */
mmc_test_buffer_transfer(struct mmc_test_card * test,u8 * buffer,unsigned addr,unsigned blksz,int write)290 static int mmc_test_buffer_transfer(struct mmc_test_card *test,
291 u8 *buffer, unsigned addr, unsigned blksz, int write)
292 {
293 struct mmc_request mrq = {};
294 struct mmc_command cmd = {};
295 struct mmc_command stop = {};
296 struct mmc_data data = {};
297
298 struct scatterlist sg;
299
300 mrq.cmd = &cmd;
301 mrq.data = &data;
302 mrq.stop = &stop;
303
304 sg_init_one(&sg, buffer, blksz);
305
306 mmc_test_prepare_mrq(test, &mrq, &sg, 1, addr, 1, blksz, write);
307
308 mmc_wait_for_req(test->card->host, &mrq);
309
310 if (cmd.error)
311 return cmd.error;
312 if (data.error)
313 return data.error;
314
315 return mmc_test_wait_busy(test);
316 }
317
mmc_test_free_mem(struct mmc_test_mem * mem)318 static void mmc_test_free_mem(struct mmc_test_mem *mem)
319 {
320 if (!mem)
321 return;
322 while (mem->cnt--)
323 __free_pages(mem->arr[mem->cnt].page,
324 mem->arr[mem->cnt].order);
325 kfree(mem->arr);
326 kfree(mem);
327 }
328
329 /*
330 * Allocate a lot of memory, preferably max_sz but at least min_sz. In case
331 * there isn't much memory do not exceed 1/16th total lowmem pages. Also do
332 * not exceed a maximum number of segments and try not to make segments much
333 * bigger than maximum segment size.
334 */
mmc_test_alloc_mem(unsigned long min_sz,unsigned long max_sz,unsigned int max_segs,unsigned int max_seg_sz)335 static struct mmc_test_mem *mmc_test_alloc_mem(unsigned long min_sz,
336 unsigned long max_sz,
337 unsigned int max_segs,
338 unsigned int max_seg_sz)
339 {
340 unsigned long max_page_cnt = DIV_ROUND_UP(max_sz, PAGE_SIZE);
341 unsigned long min_page_cnt = DIV_ROUND_UP(min_sz, PAGE_SIZE);
342 unsigned long max_seg_page_cnt = DIV_ROUND_UP(max_seg_sz, PAGE_SIZE);
343 unsigned long page_cnt = 0;
344 unsigned long limit = nr_free_buffer_pages() >> 4;
345 struct mmc_test_mem *mem;
346
347 if (max_page_cnt > limit)
348 max_page_cnt = limit;
349 if (min_page_cnt > max_page_cnt)
350 min_page_cnt = max_page_cnt;
351
352 if (max_seg_page_cnt > max_page_cnt)
353 max_seg_page_cnt = max_page_cnt;
354
355 if (max_segs > max_page_cnt)
356 max_segs = max_page_cnt;
357
358 mem = kzalloc(sizeof(*mem), GFP_KERNEL);
359 if (!mem)
360 return NULL;
361
362 mem->arr = kcalloc(max_segs, sizeof(*mem->arr), GFP_KERNEL);
363 if (!mem->arr)
364 goto out_free;
365
366 while (max_page_cnt) {
367 struct page *page;
368 unsigned int order;
369 gfp_t flags = GFP_KERNEL | GFP_DMA | __GFP_NOWARN |
370 __GFP_NORETRY;
371
372 order = get_order(max_seg_page_cnt << PAGE_SHIFT);
373 while (1) {
374 page = alloc_pages(flags, order);
375 if (page || !order)
376 break;
377 order -= 1;
378 }
379 if (!page) {
380 if (page_cnt < min_page_cnt)
381 goto out_free;
382 break;
383 }
384 mem->arr[mem->cnt].page = page;
385 mem->arr[mem->cnt].order = order;
386 mem->cnt += 1;
387 if (max_page_cnt <= (1UL << order))
388 break;
389 max_page_cnt -= 1UL << order;
390 page_cnt += 1UL << order;
391 if (mem->cnt >= max_segs) {
392 if (page_cnt < min_page_cnt)
393 goto out_free;
394 break;
395 }
396 }
397
398 return mem;
399
400 out_free:
401 mmc_test_free_mem(mem);
402 return NULL;
403 }
404
405 /*
406 * Map memory into a scatterlist. Optionally allow the same memory to be
407 * mapped more than once.
408 */
mmc_test_map_sg(struct mmc_test_mem * mem,unsigned long size,struct scatterlist * sglist,int repeat,unsigned int max_segs,unsigned int max_seg_sz,unsigned int * sg_len,int min_sg_len)409 static int mmc_test_map_sg(struct mmc_test_mem *mem, unsigned long size,
410 struct scatterlist *sglist, int repeat,
411 unsigned int max_segs, unsigned int max_seg_sz,
412 unsigned int *sg_len, int min_sg_len)
413 {
414 struct scatterlist *sg = NULL;
415 unsigned int i;
416 unsigned long sz = size;
417
418 sg_init_table(sglist, max_segs);
419 if (min_sg_len > max_segs)
420 min_sg_len = max_segs;
421
422 *sg_len = 0;
423 do {
424 for (i = 0; i < mem->cnt; i++) {
425 unsigned long len = PAGE_SIZE << mem->arr[i].order;
426
427 if (min_sg_len && (size / min_sg_len < len))
428 len = ALIGN(size / min_sg_len, 512);
429 if (len > sz)
430 len = sz;
431 if (len > max_seg_sz)
432 len = max_seg_sz;
433 if (sg)
434 sg = sg_next(sg);
435 else
436 sg = sglist;
437 if (!sg)
438 return -EINVAL;
439 sg_set_page(sg, mem->arr[i].page, len, 0);
440 sz -= len;
441 *sg_len += 1;
442 if (!sz)
443 break;
444 }
445 } while (sz && repeat);
446
447 if (sz)
448 return -EINVAL;
449
450 if (sg)
451 sg_mark_end(sg);
452
453 return 0;
454 }
455
456 /*
457 * Map memory into a scatterlist so that no pages are contiguous. Allow the
458 * same memory to be mapped more than once.
459 */
mmc_test_map_sg_max_scatter(struct mmc_test_mem * mem,unsigned long sz,struct scatterlist * sglist,unsigned int max_segs,unsigned int max_seg_sz,unsigned int * sg_len)460 static int mmc_test_map_sg_max_scatter(struct mmc_test_mem *mem,
461 unsigned long sz,
462 struct scatterlist *sglist,
463 unsigned int max_segs,
464 unsigned int max_seg_sz,
465 unsigned int *sg_len)
466 {
467 struct scatterlist *sg = NULL;
468 unsigned int i = mem->cnt, cnt;
469 unsigned long len;
470 void *base, *addr, *last_addr = NULL;
471
472 sg_init_table(sglist, max_segs);
473
474 *sg_len = 0;
475 while (sz) {
476 base = page_address(mem->arr[--i].page);
477 cnt = 1 << mem->arr[i].order;
478 while (sz && cnt) {
479 addr = base + PAGE_SIZE * --cnt;
480 if (last_addr && last_addr + PAGE_SIZE == addr)
481 continue;
482 last_addr = addr;
483 len = PAGE_SIZE;
484 if (len > max_seg_sz)
485 len = max_seg_sz;
486 if (len > sz)
487 len = sz;
488 if (sg)
489 sg = sg_next(sg);
490 else
491 sg = sglist;
492 if (!sg)
493 return -EINVAL;
494 sg_set_page(sg, virt_to_page(addr), len, 0);
495 sz -= len;
496 *sg_len += 1;
497 }
498 if (i == 0)
499 i = mem->cnt;
500 }
501
502 if (sg)
503 sg_mark_end(sg);
504
505 return 0;
506 }
507
508 /*
509 * Calculate transfer rate in bytes per second.
510 */
mmc_test_rate(uint64_t bytes,struct timespec64 * ts)511 static unsigned int mmc_test_rate(uint64_t bytes, struct timespec64 *ts)
512 {
513 uint64_t ns;
514
515 ns = timespec64_to_ns(ts);
516 bytes *= 1000000000;
517
518 while (ns > UINT_MAX) {
519 bytes >>= 1;
520 ns >>= 1;
521 }
522
523 if (!ns)
524 return 0;
525
526 do_div(bytes, (uint32_t)ns);
527
528 return bytes;
529 }
530
531 /*
532 * Save transfer results for future usage
533 */
mmc_test_save_transfer_result(struct mmc_test_card * test,unsigned int count,unsigned int sectors,struct timespec64 ts,unsigned int rate,unsigned int iops)534 static void mmc_test_save_transfer_result(struct mmc_test_card *test,
535 unsigned int count, unsigned int sectors, struct timespec64 ts,
536 unsigned int rate, unsigned int iops)
537 {
538 struct mmc_test_transfer_result *tr;
539
540 if (!test->gr)
541 return;
542
543 tr = kmalloc(sizeof(*tr), GFP_KERNEL);
544 if (!tr)
545 return;
546
547 tr->count = count;
548 tr->sectors = sectors;
549 tr->ts = ts;
550 tr->rate = rate;
551 tr->iops = iops;
552
553 list_add_tail(&tr->link, &test->gr->tr_lst);
554 }
555
556 /*
557 * Print the transfer rate.
558 */
mmc_test_print_rate(struct mmc_test_card * test,uint64_t bytes,struct timespec64 * ts1,struct timespec64 * ts2)559 static void mmc_test_print_rate(struct mmc_test_card *test, uint64_t bytes,
560 struct timespec64 *ts1, struct timespec64 *ts2)
561 {
562 unsigned int rate, iops, sectors = bytes >> 9;
563 struct timespec64 ts;
564
565 ts = timespec64_sub(*ts2, *ts1);
566
567 rate = mmc_test_rate(bytes, &ts);
568 iops = mmc_test_rate(100, &ts); /* I/O ops per sec x 100 */
569
570 pr_info("%s: Transfer of %u sectors (%u%s KiB) took %llu.%09u "
571 "seconds (%u kB/s, %u KiB/s, %u.%02u IOPS)\n",
572 mmc_hostname(test->card->host), sectors, sectors >> 1,
573 (sectors & 1 ? ".5" : ""), (u64)ts.tv_sec,
574 (u32)ts.tv_nsec, rate / 1000, rate / 1024,
575 iops / 100, iops % 100);
576
577 mmc_test_save_transfer_result(test, 1, sectors, ts, rate, iops);
578 }
579
580 /*
581 * Print the average transfer rate.
582 */
mmc_test_print_avg_rate(struct mmc_test_card * test,uint64_t bytes,unsigned int count,struct timespec64 * ts1,struct timespec64 * ts2)583 static void mmc_test_print_avg_rate(struct mmc_test_card *test, uint64_t bytes,
584 unsigned int count, struct timespec64 *ts1,
585 struct timespec64 *ts2)
586 {
587 unsigned int rate, iops, sectors = bytes >> 9;
588 uint64_t tot = bytes * count;
589 struct timespec64 ts;
590
591 ts = timespec64_sub(*ts2, *ts1);
592
593 rate = mmc_test_rate(tot, &ts);
594 iops = mmc_test_rate(count * 100, &ts); /* I/O ops per sec x 100 */
595
596 pr_info("%s: Transfer of %u x %u sectors (%u x %u%s KiB) took "
597 "%llu.%09u seconds (%u kB/s, %u KiB/s, "
598 "%u.%02u IOPS, sg_len %d)\n",
599 mmc_hostname(test->card->host), count, sectors, count,
600 sectors >> 1, (sectors & 1 ? ".5" : ""),
601 (u64)ts.tv_sec, (u32)ts.tv_nsec,
602 rate / 1000, rate / 1024, iops / 100, iops % 100,
603 test->area.sg_len);
604
605 mmc_test_save_transfer_result(test, count, sectors, ts, rate, iops);
606 }
607
608 /*
609 * Return the card size in sectors.
610 */
mmc_test_capacity(struct mmc_card * card)611 static unsigned int mmc_test_capacity(struct mmc_card *card)
612 {
613 if (!mmc_card_sd(card) && mmc_card_blockaddr(card))
614 return card->ext_csd.sectors;
615 else
616 return card->csd.capacity << (card->csd.read_blkbits - 9);
617 }
618
619 /*******************************************************************/
620 /* Test preparation and cleanup */
621 /*******************************************************************/
622
623 /*
624 * Fill the first couple of sectors of the card with known data
625 * so that bad reads/writes can be detected
626 */
__mmc_test_prepare(struct mmc_test_card * test,int write)627 static int __mmc_test_prepare(struct mmc_test_card *test, int write)
628 {
629 int ret, i;
630
631 ret = mmc_test_set_blksize(test, 512);
632 if (ret)
633 return ret;
634
635 if (write)
636 memset(test->buffer, 0xDF, 512);
637 else {
638 for (i = 0; i < 512; i++)
639 test->buffer[i] = i;
640 }
641
642 for (i = 0; i < BUFFER_SIZE / 512; i++) {
643 ret = mmc_test_buffer_transfer(test, test->buffer, i, 512, 1);
644 if (ret)
645 return ret;
646 }
647
648 return 0;
649 }
650
mmc_test_prepare_write(struct mmc_test_card * test)651 static int mmc_test_prepare_write(struct mmc_test_card *test)
652 {
653 return __mmc_test_prepare(test, 1);
654 }
655
mmc_test_prepare_read(struct mmc_test_card * test)656 static int mmc_test_prepare_read(struct mmc_test_card *test)
657 {
658 return __mmc_test_prepare(test, 0);
659 }
660
mmc_test_cleanup(struct mmc_test_card * test)661 static int mmc_test_cleanup(struct mmc_test_card *test)
662 {
663 int ret, i;
664
665 ret = mmc_test_set_blksize(test, 512);
666 if (ret)
667 return ret;
668
669 memset(test->buffer, 0, 512);
670
671 for (i = 0; i < BUFFER_SIZE / 512; i++) {
672 ret = mmc_test_buffer_transfer(test, test->buffer, i, 512, 1);
673 if (ret)
674 return ret;
675 }
676
677 return 0;
678 }
679
680 /*******************************************************************/
681 /* Test execution helpers */
682 /*******************************************************************/
683
684 /*
685 * Modifies the mmc_request to perform the "short transfer" tests
686 */
mmc_test_prepare_broken_mrq(struct mmc_test_card * test,struct mmc_request * mrq,int write)687 static void mmc_test_prepare_broken_mrq(struct mmc_test_card *test,
688 struct mmc_request *mrq, int write)
689 {
690 if (WARN_ON(!mrq || !mrq->cmd || !mrq->data))
691 return;
692
693 if (mrq->data->blocks > 1) {
694 mrq->cmd->opcode = write ?
695 MMC_WRITE_BLOCK : MMC_READ_SINGLE_BLOCK;
696 mrq->stop = NULL;
697 } else {
698 mrq->cmd->opcode = MMC_SEND_STATUS;
699 mrq->cmd->arg = test->card->rca << 16;
700 }
701 }
702
703 /*
704 * Checks that a normal transfer didn't have any errors
705 */
mmc_test_check_result(struct mmc_test_card * test,struct mmc_request * mrq)706 static int mmc_test_check_result(struct mmc_test_card *test,
707 struct mmc_request *mrq)
708 {
709 int ret;
710
711 if (WARN_ON(!mrq || !mrq->cmd || !mrq->data))
712 return -EINVAL;
713
714 ret = 0;
715
716 if (mrq->sbc && mrq->sbc->error)
717 ret = mrq->sbc->error;
718 if (!ret && mrq->cmd->error)
719 ret = mrq->cmd->error;
720 if (!ret && mrq->data->error)
721 ret = mrq->data->error;
722 if (!ret && mrq->stop && mrq->stop->error)
723 ret = mrq->stop->error;
724 if (!ret && mrq->data->bytes_xfered !=
725 mrq->data->blocks * mrq->data->blksz)
726 ret = RESULT_FAIL;
727
728 if (ret == -EINVAL)
729 ret = RESULT_UNSUP_HOST;
730
731 return ret;
732 }
733
734 /*
735 * Checks that a "short transfer" behaved as expected
736 */
mmc_test_check_broken_result(struct mmc_test_card * test,struct mmc_request * mrq)737 static int mmc_test_check_broken_result(struct mmc_test_card *test,
738 struct mmc_request *mrq)
739 {
740 int ret;
741
742 if (WARN_ON(!mrq || !mrq->cmd || !mrq->data))
743 return -EINVAL;
744
745 ret = 0;
746
747 if (!ret && mrq->cmd->error)
748 ret = mrq->cmd->error;
749 if (!ret && mrq->data->error == 0)
750 ret = RESULT_FAIL;
751 if (!ret && mrq->data->error != -ETIMEDOUT)
752 ret = mrq->data->error;
753 if (!ret && mrq->stop && mrq->stop->error)
754 ret = mrq->stop->error;
755 if (mrq->data->blocks > 1) {
756 if (!ret && mrq->data->bytes_xfered > mrq->data->blksz)
757 ret = RESULT_FAIL;
758 } else {
759 if (!ret && mrq->data->bytes_xfered > 0)
760 ret = RESULT_FAIL;
761 }
762
763 if (ret == -EINVAL)
764 ret = RESULT_UNSUP_HOST;
765
766 return ret;
767 }
768
769 struct mmc_test_req {
770 struct mmc_request mrq;
771 struct mmc_command sbc;
772 struct mmc_command cmd;
773 struct mmc_command stop;
774 struct mmc_command status;
775 struct mmc_data data;
776 };
777
778 /*
779 * Tests nonblock transfer with certain parameters
780 */
mmc_test_req_reset(struct mmc_test_req * rq)781 static void mmc_test_req_reset(struct mmc_test_req *rq)
782 {
783 memset(rq, 0, sizeof(struct mmc_test_req));
784
785 rq->mrq.cmd = &rq->cmd;
786 rq->mrq.data = &rq->data;
787 rq->mrq.stop = &rq->stop;
788 }
789
mmc_test_req_alloc(void)790 static struct mmc_test_req *mmc_test_req_alloc(void)
791 {
792 struct mmc_test_req *rq = kmalloc(sizeof(*rq), GFP_KERNEL);
793
794 if (rq)
795 mmc_test_req_reset(rq);
796
797 return rq;
798 }
799
mmc_test_wait_done(struct mmc_request * mrq)800 static void mmc_test_wait_done(struct mmc_request *mrq)
801 {
802 complete(&mrq->completion);
803 }
804
mmc_test_start_areq(struct mmc_test_card * test,struct mmc_request * mrq,struct mmc_request * prev_mrq)805 static int mmc_test_start_areq(struct mmc_test_card *test,
806 struct mmc_request *mrq,
807 struct mmc_request *prev_mrq)
808 {
809 struct mmc_host *host = test->card->host;
810 int err = 0;
811
812 if (mrq) {
813 init_completion(&mrq->completion);
814 mrq->done = mmc_test_wait_done;
815 mmc_pre_req(host, mrq);
816 }
817
818 if (prev_mrq) {
819 wait_for_completion(&prev_mrq->completion);
820 err = mmc_test_wait_busy(test);
821 if (!err)
822 err = mmc_test_check_result(test, prev_mrq);
823 }
824
825 if (!err && mrq) {
826 err = mmc_start_request(host, mrq);
827 if (err)
828 mmc_retune_release(host);
829 }
830
831 if (prev_mrq)
832 mmc_post_req(host, prev_mrq, 0);
833
834 if (err && mrq)
835 mmc_post_req(host, mrq, err);
836
837 return err;
838 }
839
mmc_test_nonblock_transfer(struct mmc_test_card * test,unsigned int dev_addr,int write,int count)840 static int mmc_test_nonblock_transfer(struct mmc_test_card *test,
841 unsigned int dev_addr, int write,
842 int count)
843 {
844 struct mmc_test_req *rq1, *rq2;
845 struct mmc_request *mrq, *prev_mrq;
846 int i;
847 int ret = RESULT_OK;
848 struct mmc_test_area *t = &test->area;
849 struct scatterlist *sg = t->sg;
850 struct scatterlist *sg_areq = t->sg_areq;
851
852 rq1 = mmc_test_req_alloc();
853 rq2 = mmc_test_req_alloc();
854 if (!rq1 || !rq2) {
855 ret = RESULT_FAIL;
856 goto err;
857 }
858
859 mrq = &rq1->mrq;
860 prev_mrq = NULL;
861
862 for (i = 0; i < count; i++) {
863 mmc_test_req_reset(container_of(mrq, struct mmc_test_req, mrq));
864 mmc_test_prepare_mrq(test, mrq, sg, t->sg_len, dev_addr,
865 t->blocks, 512, write);
866 ret = mmc_test_start_areq(test, mrq, prev_mrq);
867 if (ret)
868 goto err;
869
870 if (!prev_mrq)
871 prev_mrq = &rq2->mrq;
872
873 swap(mrq, prev_mrq);
874 swap(sg, sg_areq);
875 dev_addr += t->blocks;
876 }
877
878 ret = mmc_test_start_areq(test, NULL, prev_mrq);
879 err:
880 kfree(rq1);
881 kfree(rq2);
882 return ret;
883 }
884
885 /*
886 * Tests a basic transfer with certain parameters
887 */
mmc_test_simple_transfer(struct mmc_test_card * test,struct scatterlist * sg,unsigned sg_len,unsigned dev_addr,unsigned blocks,unsigned blksz,int write)888 static int mmc_test_simple_transfer(struct mmc_test_card *test,
889 struct scatterlist *sg, unsigned sg_len, unsigned dev_addr,
890 unsigned blocks, unsigned blksz, int write)
891 {
892 struct mmc_request mrq = {};
893 struct mmc_command cmd = {};
894 struct mmc_command stop = {};
895 struct mmc_data data = {};
896
897 mrq.cmd = &cmd;
898 mrq.data = &data;
899 mrq.stop = &stop;
900
901 mmc_test_prepare_mrq(test, &mrq, sg, sg_len, dev_addr,
902 blocks, blksz, write);
903
904 mmc_wait_for_req(test->card->host, &mrq);
905
906 mmc_test_wait_busy(test);
907
908 return mmc_test_check_result(test, &mrq);
909 }
910
911 /*
912 * Tests a transfer where the card will fail completely or partly
913 */
mmc_test_broken_transfer(struct mmc_test_card * test,unsigned blocks,unsigned blksz,int write)914 static int mmc_test_broken_transfer(struct mmc_test_card *test,
915 unsigned blocks, unsigned blksz, int write)
916 {
917 struct mmc_request mrq = {};
918 struct mmc_command cmd = {};
919 struct mmc_command stop = {};
920 struct mmc_data data = {};
921
922 struct scatterlist sg;
923
924 mrq.cmd = &cmd;
925 mrq.data = &data;
926 mrq.stop = &stop;
927
928 sg_init_one(&sg, test->buffer, blocks * blksz);
929
930 mmc_test_prepare_mrq(test, &mrq, &sg, 1, 0, blocks, blksz, write);
931 mmc_test_prepare_broken_mrq(test, &mrq, write);
932
933 mmc_wait_for_req(test->card->host, &mrq);
934
935 mmc_test_wait_busy(test);
936
937 return mmc_test_check_broken_result(test, &mrq);
938 }
939
940 /*
941 * Does a complete transfer test where data is also validated
942 *
943 * Note: mmc_test_prepare() must have been done before this call
944 */
mmc_test_transfer(struct mmc_test_card * test,struct scatterlist * sg,unsigned sg_len,unsigned dev_addr,unsigned blocks,unsigned blksz,int write)945 static int mmc_test_transfer(struct mmc_test_card *test,
946 struct scatterlist *sg, unsigned sg_len, unsigned dev_addr,
947 unsigned blocks, unsigned blksz, int write)
948 {
949 int ret, i;
950 unsigned long flags;
951
952 if (write) {
953 for (i = 0; i < blocks * blksz; i++)
954 test->scratch[i] = i;
955 } else {
956 memset(test->scratch, 0, BUFFER_SIZE);
957 }
958 local_irq_save(flags);
959 sg_copy_from_buffer(sg, sg_len, test->scratch, BUFFER_SIZE);
960 local_irq_restore(flags);
961
962 ret = mmc_test_set_blksize(test, blksz);
963 if (ret)
964 return ret;
965
966 ret = mmc_test_simple_transfer(test, sg, sg_len, dev_addr,
967 blocks, blksz, write);
968 if (ret)
969 return ret;
970
971 if (write) {
972 int sectors;
973
974 ret = mmc_test_set_blksize(test, 512);
975 if (ret)
976 return ret;
977
978 sectors = (blocks * blksz + 511) / 512;
979 if ((sectors * 512) == (blocks * blksz))
980 sectors++;
981
982 if ((sectors * 512) > BUFFER_SIZE)
983 return -EINVAL;
984
985 memset(test->buffer, 0, sectors * 512);
986
987 for (i = 0; i < sectors; i++) {
988 ret = mmc_test_buffer_transfer(test,
989 test->buffer + i * 512,
990 dev_addr + i, 512, 0);
991 if (ret)
992 return ret;
993 }
994
995 for (i = 0; i < blocks * blksz; i++) {
996 if (test->buffer[i] != (u8)i)
997 return RESULT_FAIL;
998 }
999
1000 for (; i < sectors * 512; i++) {
1001 if (test->buffer[i] != 0xDF)
1002 return RESULT_FAIL;
1003 }
1004 } else {
1005 local_irq_save(flags);
1006 sg_copy_to_buffer(sg, sg_len, test->scratch, BUFFER_SIZE);
1007 local_irq_restore(flags);
1008 for (i = 0; i < blocks * blksz; i++) {
1009 if (test->scratch[i] != (u8)i)
1010 return RESULT_FAIL;
1011 }
1012 }
1013
1014 return 0;
1015 }
1016
1017 /*******************************************************************/
1018 /* Tests */
1019 /*******************************************************************/
1020
1021 struct mmc_test_case {
1022 const char *name;
1023
1024 int (*prepare)(struct mmc_test_card *);
1025 int (*run)(struct mmc_test_card *);
1026 int (*cleanup)(struct mmc_test_card *);
1027 };
1028
mmc_test_basic_write(struct mmc_test_card * test)1029 static int mmc_test_basic_write(struct mmc_test_card *test)
1030 {
1031 int ret;
1032 struct scatterlist sg;
1033
1034 ret = mmc_test_set_blksize(test, 512);
1035 if (ret)
1036 return ret;
1037
1038 sg_init_one(&sg, test->buffer, 512);
1039
1040 return mmc_test_simple_transfer(test, &sg, 1, 0, 1, 512, 1);
1041 }
1042
mmc_test_basic_read(struct mmc_test_card * test)1043 static int mmc_test_basic_read(struct mmc_test_card *test)
1044 {
1045 int ret;
1046 struct scatterlist sg;
1047
1048 ret = mmc_test_set_blksize(test, 512);
1049 if (ret)
1050 return ret;
1051
1052 sg_init_one(&sg, test->buffer, 512);
1053
1054 return mmc_test_simple_transfer(test, &sg, 1, 0, 1, 512, 0);
1055 }
1056
mmc_test_verify_write(struct mmc_test_card * test)1057 static int mmc_test_verify_write(struct mmc_test_card *test)
1058 {
1059 struct scatterlist sg;
1060
1061 sg_init_one(&sg, test->buffer, 512);
1062
1063 return mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1);
1064 }
1065
mmc_test_verify_read(struct mmc_test_card * test)1066 static int mmc_test_verify_read(struct mmc_test_card *test)
1067 {
1068 struct scatterlist sg;
1069
1070 sg_init_one(&sg, test->buffer, 512);
1071
1072 return mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0);
1073 }
1074
mmc_test_multi_write(struct mmc_test_card * test)1075 static int mmc_test_multi_write(struct mmc_test_card *test)
1076 {
1077 unsigned int size;
1078 struct scatterlist sg;
1079
1080 if (test->card->host->max_blk_count == 1)
1081 return RESULT_UNSUP_HOST;
1082
1083 size = PAGE_SIZE * 2;
1084 size = min(size, test->card->host->max_req_size);
1085 size = min(size, test->card->host->max_seg_size);
1086 size = min(size, test->card->host->max_blk_count * 512);
1087
1088 if (size < 1024)
1089 return RESULT_UNSUP_HOST;
1090
1091 sg_init_one(&sg, test->buffer, size);
1092
1093 return mmc_test_transfer(test, &sg, 1, 0, size / 512, 512, 1);
1094 }
1095
mmc_test_multi_read(struct mmc_test_card * test)1096 static int mmc_test_multi_read(struct mmc_test_card *test)
1097 {
1098 unsigned int size;
1099 struct scatterlist sg;
1100
1101 if (test->card->host->max_blk_count == 1)
1102 return RESULT_UNSUP_HOST;
1103
1104 size = PAGE_SIZE * 2;
1105 size = min(size, test->card->host->max_req_size);
1106 size = min(size, test->card->host->max_seg_size);
1107 size = min(size, test->card->host->max_blk_count * 512);
1108
1109 if (size < 1024)
1110 return RESULT_UNSUP_HOST;
1111
1112 sg_init_one(&sg, test->buffer, size);
1113
1114 return mmc_test_transfer(test, &sg, 1, 0, size / 512, 512, 0);
1115 }
1116
mmc_test_pow2_write(struct mmc_test_card * test)1117 static int mmc_test_pow2_write(struct mmc_test_card *test)
1118 {
1119 int ret, i;
1120 struct scatterlist sg;
1121
1122 if (!test->card->csd.write_partial)
1123 return RESULT_UNSUP_CARD;
1124
1125 for (i = 1; i < 512; i <<= 1) {
1126 sg_init_one(&sg, test->buffer, i);
1127 ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 1);
1128 if (ret)
1129 return ret;
1130 }
1131
1132 return 0;
1133 }
1134
mmc_test_pow2_read(struct mmc_test_card * test)1135 static int mmc_test_pow2_read(struct mmc_test_card *test)
1136 {
1137 int ret, i;
1138 struct scatterlist sg;
1139
1140 if (!test->card->csd.read_partial)
1141 return RESULT_UNSUP_CARD;
1142
1143 for (i = 1; i < 512; i <<= 1) {
1144 sg_init_one(&sg, test->buffer, i);
1145 ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 0);
1146 if (ret)
1147 return ret;
1148 }
1149
1150 return 0;
1151 }
1152
mmc_test_weird_write(struct mmc_test_card * test)1153 static int mmc_test_weird_write(struct mmc_test_card *test)
1154 {
1155 int ret, i;
1156 struct scatterlist sg;
1157
1158 if (!test->card->csd.write_partial)
1159 return RESULT_UNSUP_CARD;
1160
1161 for (i = 3; i < 512; i += 7) {
1162 sg_init_one(&sg, test->buffer, i);
1163 ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 1);
1164 if (ret)
1165 return ret;
1166 }
1167
1168 return 0;
1169 }
1170
mmc_test_weird_read(struct mmc_test_card * test)1171 static int mmc_test_weird_read(struct mmc_test_card *test)
1172 {
1173 int ret, i;
1174 struct scatterlist sg;
1175
1176 if (!test->card->csd.read_partial)
1177 return RESULT_UNSUP_CARD;
1178
1179 for (i = 3; i < 512; i += 7) {
1180 sg_init_one(&sg, test->buffer, i);
1181 ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 0);
1182 if (ret)
1183 return ret;
1184 }
1185
1186 return 0;
1187 }
1188
mmc_test_align_write(struct mmc_test_card * test)1189 static int mmc_test_align_write(struct mmc_test_card *test)
1190 {
1191 int ret, i;
1192 struct scatterlist sg;
1193
1194 for (i = 1; i < TEST_ALIGN_END; i++) {
1195 sg_init_one(&sg, test->buffer + i, 512);
1196 ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1);
1197 if (ret)
1198 return ret;
1199 }
1200
1201 return 0;
1202 }
1203
mmc_test_align_read(struct mmc_test_card * test)1204 static int mmc_test_align_read(struct mmc_test_card *test)
1205 {
1206 int ret, i;
1207 struct scatterlist sg;
1208
1209 for (i = 1; i < TEST_ALIGN_END; i++) {
1210 sg_init_one(&sg, test->buffer + i, 512);
1211 ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0);
1212 if (ret)
1213 return ret;
1214 }
1215
1216 return 0;
1217 }
1218
mmc_test_align_multi_write(struct mmc_test_card * test)1219 static int mmc_test_align_multi_write(struct mmc_test_card *test)
1220 {
1221 int ret, i;
1222 unsigned int size;
1223 struct scatterlist sg;
1224
1225 if (test->card->host->max_blk_count == 1)
1226 return RESULT_UNSUP_HOST;
1227
1228 size = PAGE_SIZE * 2;
1229 size = min(size, test->card->host->max_req_size);
1230 size = min(size, test->card->host->max_seg_size);
1231 size = min(size, test->card->host->max_blk_count * 512);
1232
1233 if (size < 1024)
1234 return RESULT_UNSUP_HOST;
1235
1236 for (i = 1; i < TEST_ALIGN_END; i++) {
1237 sg_init_one(&sg, test->buffer + i, size);
1238 ret = mmc_test_transfer(test, &sg, 1, 0, size / 512, 512, 1);
1239 if (ret)
1240 return ret;
1241 }
1242
1243 return 0;
1244 }
1245
mmc_test_align_multi_read(struct mmc_test_card * test)1246 static int mmc_test_align_multi_read(struct mmc_test_card *test)
1247 {
1248 int ret, i;
1249 unsigned int size;
1250 struct scatterlist sg;
1251
1252 if (test->card->host->max_blk_count == 1)
1253 return RESULT_UNSUP_HOST;
1254
1255 size = PAGE_SIZE * 2;
1256 size = min(size, test->card->host->max_req_size);
1257 size = min(size, test->card->host->max_seg_size);
1258 size = min(size, test->card->host->max_blk_count * 512);
1259
1260 if (size < 1024)
1261 return RESULT_UNSUP_HOST;
1262
1263 for (i = 1; i < TEST_ALIGN_END; i++) {
1264 sg_init_one(&sg, test->buffer + i, size);
1265 ret = mmc_test_transfer(test, &sg, 1, 0, size / 512, 512, 0);
1266 if (ret)
1267 return ret;
1268 }
1269
1270 return 0;
1271 }
1272
mmc_test_xfersize_write(struct mmc_test_card * test)1273 static int mmc_test_xfersize_write(struct mmc_test_card *test)
1274 {
1275 int ret;
1276
1277 ret = mmc_test_set_blksize(test, 512);
1278 if (ret)
1279 return ret;
1280
1281 return mmc_test_broken_transfer(test, 1, 512, 1);
1282 }
1283
mmc_test_xfersize_read(struct mmc_test_card * test)1284 static int mmc_test_xfersize_read(struct mmc_test_card *test)
1285 {
1286 int ret;
1287
1288 ret = mmc_test_set_blksize(test, 512);
1289 if (ret)
1290 return ret;
1291
1292 return mmc_test_broken_transfer(test, 1, 512, 0);
1293 }
1294
mmc_test_multi_xfersize_write(struct mmc_test_card * test)1295 static int mmc_test_multi_xfersize_write(struct mmc_test_card *test)
1296 {
1297 int ret;
1298
1299 if (test->card->host->max_blk_count == 1)
1300 return RESULT_UNSUP_HOST;
1301
1302 ret = mmc_test_set_blksize(test, 512);
1303 if (ret)
1304 return ret;
1305
1306 return mmc_test_broken_transfer(test, 2, 512, 1);
1307 }
1308
mmc_test_multi_xfersize_read(struct mmc_test_card * test)1309 static int mmc_test_multi_xfersize_read(struct mmc_test_card *test)
1310 {
1311 int ret;
1312
1313 if (test->card->host->max_blk_count == 1)
1314 return RESULT_UNSUP_HOST;
1315
1316 ret = mmc_test_set_blksize(test, 512);
1317 if (ret)
1318 return ret;
1319
1320 return mmc_test_broken_transfer(test, 2, 512, 0);
1321 }
1322
1323 #ifdef CONFIG_HIGHMEM
1324
mmc_test_write_high(struct mmc_test_card * test)1325 static int mmc_test_write_high(struct mmc_test_card *test)
1326 {
1327 struct scatterlist sg;
1328
1329 sg_init_table(&sg, 1);
1330 sg_set_page(&sg, test->highmem, 512, 0);
1331
1332 return mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1);
1333 }
1334
mmc_test_read_high(struct mmc_test_card * test)1335 static int mmc_test_read_high(struct mmc_test_card *test)
1336 {
1337 struct scatterlist sg;
1338
1339 sg_init_table(&sg, 1);
1340 sg_set_page(&sg, test->highmem, 512, 0);
1341
1342 return mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0);
1343 }
1344
mmc_test_multi_write_high(struct mmc_test_card * test)1345 static int mmc_test_multi_write_high(struct mmc_test_card *test)
1346 {
1347 unsigned int size;
1348 struct scatterlist sg;
1349
1350 if (test->card->host->max_blk_count == 1)
1351 return RESULT_UNSUP_HOST;
1352
1353 size = PAGE_SIZE * 2;
1354 size = min(size, test->card->host->max_req_size);
1355 size = min(size, test->card->host->max_seg_size);
1356 size = min(size, test->card->host->max_blk_count * 512);
1357
1358 if (size < 1024)
1359 return RESULT_UNSUP_HOST;
1360
1361 sg_init_table(&sg, 1);
1362 sg_set_page(&sg, test->highmem, size, 0);
1363
1364 return mmc_test_transfer(test, &sg, 1, 0, size / 512, 512, 1);
1365 }
1366
mmc_test_multi_read_high(struct mmc_test_card * test)1367 static int mmc_test_multi_read_high(struct mmc_test_card *test)
1368 {
1369 unsigned int size;
1370 struct scatterlist sg;
1371
1372 if (test->card->host->max_blk_count == 1)
1373 return RESULT_UNSUP_HOST;
1374
1375 size = PAGE_SIZE * 2;
1376 size = min(size, test->card->host->max_req_size);
1377 size = min(size, test->card->host->max_seg_size);
1378 size = min(size, test->card->host->max_blk_count * 512);
1379
1380 if (size < 1024)
1381 return RESULT_UNSUP_HOST;
1382
1383 sg_init_table(&sg, 1);
1384 sg_set_page(&sg, test->highmem, size, 0);
1385
1386 return mmc_test_transfer(test, &sg, 1, 0, size / 512, 512, 0);
1387 }
1388
1389 #else
1390
mmc_test_no_highmem(struct mmc_test_card * test)1391 static int mmc_test_no_highmem(struct mmc_test_card *test)
1392 {
1393 pr_info("%s: Highmem not configured - test skipped\n",
1394 mmc_hostname(test->card->host));
1395 return 0;
1396 }
1397
1398 #endif /* CONFIG_HIGHMEM */
1399
1400 /*
1401 * Map sz bytes so that it can be transferred.
1402 */
mmc_test_area_map(struct mmc_test_card * test,unsigned long sz,int max_scatter,int min_sg_len,bool nonblock)1403 static int mmc_test_area_map(struct mmc_test_card *test, unsigned long sz,
1404 int max_scatter, int min_sg_len, bool nonblock)
1405 {
1406 struct mmc_test_area *t = &test->area;
1407 int err;
1408 unsigned int sg_len = 0;
1409
1410 t->blocks = sz >> 9;
1411
1412 if (max_scatter) {
1413 err = mmc_test_map_sg_max_scatter(t->mem, sz, t->sg,
1414 t->max_segs, t->max_seg_sz,
1415 &t->sg_len);
1416 } else {
1417 err = mmc_test_map_sg(t->mem, sz, t->sg, 1, t->max_segs,
1418 t->max_seg_sz, &t->sg_len, min_sg_len);
1419 }
1420
1421 if (err || !nonblock)
1422 goto err;
1423
1424 if (max_scatter) {
1425 err = mmc_test_map_sg_max_scatter(t->mem, sz, t->sg_areq,
1426 t->max_segs, t->max_seg_sz,
1427 &sg_len);
1428 } else {
1429 err = mmc_test_map_sg(t->mem, sz, t->sg_areq, 1, t->max_segs,
1430 t->max_seg_sz, &sg_len, min_sg_len);
1431 }
1432 if (!err && sg_len != t->sg_len)
1433 err = -EINVAL;
1434
1435 err:
1436 if (err)
1437 pr_info("%s: Failed to map sg list\n",
1438 mmc_hostname(test->card->host));
1439 return err;
1440 }
1441
1442 /*
1443 * Transfer bytes mapped by mmc_test_area_map().
1444 */
mmc_test_area_transfer(struct mmc_test_card * test,unsigned int dev_addr,int write)1445 static int mmc_test_area_transfer(struct mmc_test_card *test,
1446 unsigned int dev_addr, int write)
1447 {
1448 struct mmc_test_area *t = &test->area;
1449
1450 return mmc_test_simple_transfer(test, t->sg, t->sg_len, dev_addr,
1451 t->blocks, 512, write);
1452 }
1453
1454 /*
1455 * Map and transfer bytes for multiple transfers.
1456 */
mmc_test_area_io_seq(struct mmc_test_card * test,unsigned long sz,unsigned int dev_addr,int write,int max_scatter,int timed,int count,bool nonblock,int min_sg_len)1457 static int mmc_test_area_io_seq(struct mmc_test_card *test, unsigned long sz,
1458 unsigned int dev_addr, int write,
1459 int max_scatter, int timed, int count,
1460 bool nonblock, int min_sg_len)
1461 {
1462 struct timespec64 ts1, ts2;
1463 int ret = 0;
1464 int i;
1465
1466 /*
1467 * In the case of a maximally scattered transfer, the maximum transfer
1468 * size is further limited by using PAGE_SIZE segments.
1469 */
1470 if (max_scatter) {
1471 struct mmc_test_area *t = &test->area;
1472 unsigned long max_tfr;
1473
1474 if (t->max_seg_sz >= PAGE_SIZE)
1475 max_tfr = t->max_segs * PAGE_SIZE;
1476 else
1477 max_tfr = t->max_segs * t->max_seg_sz;
1478 if (sz > max_tfr)
1479 sz = max_tfr;
1480 }
1481
1482 ret = mmc_test_area_map(test, sz, max_scatter, min_sg_len, nonblock);
1483 if (ret)
1484 return ret;
1485
1486 if (timed)
1487 ktime_get_ts64(&ts1);
1488 if (nonblock)
1489 ret = mmc_test_nonblock_transfer(test, dev_addr, write, count);
1490 else
1491 for (i = 0; i < count && ret == 0; i++) {
1492 ret = mmc_test_area_transfer(test, dev_addr, write);
1493 dev_addr += sz >> 9;
1494 }
1495
1496 if (ret)
1497 return ret;
1498
1499 if (timed)
1500 ktime_get_ts64(&ts2);
1501
1502 if (timed)
1503 mmc_test_print_avg_rate(test, sz, count, &ts1, &ts2);
1504
1505 return 0;
1506 }
1507
mmc_test_area_io(struct mmc_test_card * test,unsigned long sz,unsigned int dev_addr,int write,int max_scatter,int timed)1508 static int mmc_test_area_io(struct mmc_test_card *test, unsigned long sz,
1509 unsigned int dev_addr, int write, int max_scatter,
1510 int timed)
1511 {
1512 return mmc_test_area_io_seq(test, sz, dev_addr, write, max_scatter,
1513 timed, 1, false, 0);
1514 }
1515
1516 /*
1517 * Write the test area entirely.
1518 */
mmc_test_area_fill(struct mmc_test_card * test)1519 static int mmc_test_area_fill(struct mmc_test_card *test)
1520 {
1521 struct mmc_test_area *t = &test->area;
1522
1523 return mmc_test_area_io(test, t->max_tfr, t->dev_addr, 1, 0, 0);
1524 }
1525
1526 /*
1527 * Erase the test area entirely.
1528 */
mmc_test_area_erase(struct mmc_test_card * test)1529 static int mmc_test_area_erase(struct mmc_test_card *test)
1530 {
1531 struct mmc_test_area *t = &test->area;
1532
1533 if (!mmc_can_erase(test->card))
1534 return 0;
1535
1536 return mmc_erase(test->card, t->dev_addr, t->max_sz >> 9,
1537 MMC_ERASE_ARG);
1538 }
1539
1540 /*
1541 * Cleanup struct mmc_test_area.
1542 */
mmc_test_area_cleanup(struct mmc_test_card * test)1543 static int mmc_test_area_cleanup(struct mmc_test_card *test)
1544 {
1545 struct mmc_test_area *t = &test->area;
1546
1547 kfree(t->sg);
1548 kfree(t->sg_areq);
1549 mmc_test_free_mem(t->mem);
1550
1551 return 0;
1552 }
1553
1554 /*
1555 * Initialize an area for testing large transfers. The test area is set to the
1556 * middle of the card because cards may have different characteristics at the
1557 * front (for FAT file system optimization). Optionally, the area is erased
1558 * (if the card supports it) which may improve write performance. Optionally,
1559 * the area is filled with data for subsequent read tests.
1560 */
mmc_test_area_init(struct mmc_test_card * test,int erase,int fill)1561 static int mmc_test_area_init(struct mmc_test_card *test, int erase, int fill)
1562 {
1563 struct mmc_test_area *t = &test->area;
1564 unsigned long min_sz = 64 * 1024, sz;
1565 int ret;
1566
1567 ret = mmc_test_set_blksize(test, 512);
1568 if (ret)
1569 return ret;
1570
1571 /* Make the test area size about 4MiB */
1572 sz = (unsigned long)test->card->pref_erase << 9;
1573 t->max_sz = sz;
1574 while (t->max_sz < 4 * 1024 * 1024)
1575 t->max_sz += sz;
1576 while (t->max_sz > TEST_AREA_MAX_SIZE && t->max_sz > sz)
1577 t->max_sz -= sz;
1578
1579 t->max_segs = test->card->host->max_segs;
1580 t->max_seg_sz = test->card->host->max_seg_size;
1581 t->max_seg_sz -= t->max_seg_sz % 512;
1582
1583 t->max_tfr = t->max_sz;
1584 if (t->max_tfr >> 9 > test->card->host->max_blk_count)
1585 t->max_tfr = test->card->host->max_blk_count << 9;
1586 if (t->max_tfr > test->card->host->max_req_size)
1587 t->max_tfr = test->card->host->max_req_size;
1588 if (t->max_tfr / t->max_seg_sz > t->max_segs)
1589 t->max_tfr = t->max_segs * t->max_seg_sz;
1590
1591 /*
1592 * Try to allocate enough memory for a max. sized transfer. Less is OK
1593 * because the same memory can be mapped into the scatterlist more than
1594 * once. Also, take into account the limits imposed on scatterlist
1595 * segments by the host driver.
1596 */
1597 t->mem = mmc_test_alloc_mem(min_sz, t->max_tfr, t->max_segs,
1598 t->max_seg_sz);
1599 if (!t->mem)
1600 return -ENOMEM;
1601
1602 t->sg = kmalloc_array(t->max_segs, sizeof(*t->sg), GFP_KERNEL);
1603 if (!t->sg) {
1604 ret = -ENOMEM;
1605 goto out_free;
1606 }
1607
1608 t->sg_areq = kmalloc_array(t->max_segs, sizeof(*t->sg_areq),
1609 GFP_KERNEL);
1610 if (!t->sg_areq) {
1611 ret = -ENOMEM;
1612 goto out_free;
1613 }
1614
1615 t->dev_addr = mmc_test_capacity(test->card) / 2;
1616 t->dev_addr -= t->dev_addr % (t->max_sz >> 9);
1617
1618 if (erase) {
1619 ret = mmc_test_area_erase(test);
1620 if (ret)
1621 goto out_free;
1622 }
1623
1624 if (fill) {
1625 ret = mmc_test_area_fill(test);
1626 if (ret)
1627 goto out_free;
1628 }
1629
1630 return 0;
1631
1632 out_free:
1633 mmc_test_area_cleanup(test);
1634 return ret;
1635 }
1636
1637 /*
1638 * Prepare for large transfers. Do not erase the test area.
1639 */
mmc_test_area_prepare(struct mmc_test_card * test)1640 static int mmc_test_area_prepare(struct mmc_test_card *test)
1641 {
1642 return mmc_test_area_init(test, 0, 0);
1643 }
1644
1645 /*
1646 * Prepare for large transfers. Do erase the test area.
1647 */
mmc_test_area_prepare_erase(struct mmc_test_card * test)1648 static int mmc_test_area_prepare_erase(struct mmc_test_card *test)
1649 {
1650 return mmc_test_area_init(test, 1, 0);
1651 }
1652
1653 /*
1654 * Prepare for large transfers. Erase and fill the test area.
1655 */
mmc_test_area_prepare_fill(struct mmc_test_card * test)1656 static int mmc_test_area_prepare_fill(struct mmc_test_card *test)
1657 {
1658 return mmc_test_area_init(test, 1, 1);
1659 }
1660
1661 /*
1662 * Test best-case performance. Best-case performance is expected from
1663 * a single large transfer.
1664 *
1665 * An additional option (max_scatter) allows the measurement of the same
1666 * transfer but with no contiguous pages in the scatter list. This tests
1667 * the efficiency of DMA to handle scattered pages.
1668 */
mmc_test_best_performance(struct mmc_test_card * test,int write,int max_scatter)1669 static int mmc_test_best_performance(struct mmc_test_card *test, int write,
1670 int max_scatter)
1671 {
1672 struct mmc_test_area *t = &test->area;
1673
1674 return mmc_test_area_io(test, t->max_tfr, t->dev_addr, write,
1675 max_scatter, 1);
1676 }
1677
1678 /*
1679 * Best-case read performance.
1680 */
mmc_test_best_read_performance(struct mmc_test_card * test)1681 static int mmc_test_best_read_performance(struct mmc_test_card *test)
1682 {
1683 return mmc_test_best_performance(test, 0, 0);
1684 }
1685
1686 /*
1687 * Best-case write performance.
1688 */
mmc_test_best_write_performance(struct mmc_test_card * test)1689 static int mmc_test_best_write_performance(struct mmc_test_card *test)
1690 {
1691 return mmc_test_best_performance(test, 1, 0);
1692 }
1693
1694 /*
1695 * Best-case read performance into scattered pages.
1696 */
mmc_test_best_read_perf_max_scatter(struct mmc_test_card * test)1697 static int mmc_test_best_read_perf_max_scatter(struct mmc_test_card *test)
1698 {
1699 return mmc_test_best_performance(test, 0, 1);
1700 }
1701
1702 /*
1703 * Best-case write performance from scattered pages.
1704 */
mmc_test_best_write_perf_max_scatter(struct mmc_test_card * test)1705 static int mmc_test_best_write_perf_max_scatter(struct mmc_test_card *test)
1706 {
1707 return mmc_test_best_performance(test, 1, 1);
1708 }
1709
1710 /*
1711 * Single read performance by transfer size.
1712 */
mmc_test_profile_read_perf(struct mmc_test_card * test)1713 static int mmc_test_profile_read_perf(struct mmc_test_card *test)
1714 {
1715 struct mmc_test_area *t = &test->area;
1716 unsigned long sz;
1717 unsigned int dev_addr;
1718 int ret;
1719
1720 for (sz = 512; sz < t->max_tfr; sz <<= 1) {
1721 dev_addr = t->dev_addr + (sz >> 9);
1722 ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 1);
1723 if (ret)
1724 return ret;
1725 }
1726 sz = t->max_tfr;
1727 dev_addr = t->dev_addr;
1728 return mmc_test_area_io(test, sz, dev_addr, 0, 0, 1);
1729 }
1730
1731 /*
1732 * Single write performance by transfer size.
1733 */
mmc_test_profile_write_perf(struct mmc_test_card * test)1734 static int mmc_test_profile_write_perf(struct mmc_test_card *test)
1735 {
1736 struct mmc_test_area *t = &test->area;
1737 unsigned long sz;
1738 unsigned int dev_addr;
1739 int ret;
1740
1741 ret = mmc_test_area_erase(test);
1742 if (ret)
1743 return ret;
1744 for (sz = 512; sz < t->max_tfr; sz <<= 1) {
1745 dev_addr = t->dev_addr + (sz >> 9);
1746 ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 1);
1747 if (ret)
1748 return ret;
1749 }
1750 ret = mmc_test_area_erase(test);
1751 if (ret)
1752 return ret;
1753 sz = t->max_tfr;
1754 dev_addr = t->dev_addr;
1755 return mmc_test_area_io(test, sz, dev_addr, 1, 0, 1);
1756 }
1757
1758 /*
1759 * Single trim performance by transfer size.
1760 */
mmc_test_profile_trim_perf(struct mmc_test_card * test)1761 static int mmc_test_profile_trim_perf(struct mmc_test_card *test)
1762 {
1763 struct mmc_test_area *t = &test->area;
1764 unsigned long sz;
1765 unsigned int dev_addr;
1766 struct timespec64 ts1, ts2;
1767 int ret;
1768
1769 if (!mmc_can_trim(test->card))
1770 return RESULT_UNSUP_CARD;
1771
1772 if (!mmc_can_erase(test->card))
1773 return RESULT_UNSUP_HOST;
1774
1775 for (sz = 512; sz < t->max_sz; sz <<= 1) {
1776 dev_addr = t->dev_addr + (sz >> 9);
1777 ktime_get_ts64(&ts1);
1778 ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG);
1779 if (ret)
1780 return ret;
1781 ktime_get_ts64(&ts2);
1782 mmc_test_print_rate(test, sz, &ts1, &ts2);
1783 }
1784 dev_addr = t->dev_addr;
1785 ktime_get_ts64(&ts1);
1786 ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG);
1787 if (ret)
1788 return ret;
1789 ktime_get_ts64(&ts2);
1790 mmc_test_print_rate(test, sz, &ts1, &ts2);
1791 return 0;
1792 }
1793
mmc_test_seq_read_perf(struct mmc_test_card * test,unsigned long sz)1794 static int mmc_test_seq_read_perf(struct mmc_test_card *test, unsigned long sz)
1795 {
1796 struct mmc_test_area *t = &test->area;
1797 unsigned int dev_addr, i, cnt;
1798 struct timespec64 ts1, ts2;
1799 int ret;
1800
1801 cnt = t->max_sz / sz;
1802 dev_addr = t->dev_addr;
1803 ktime_get_ts64(&ts1);
1804 for (i = 0; i < cnt; i++) {
1805 ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 0);
1806 if (ret)
1807 return ret;
1808 dev_addr += (sz >> 9);
1809 }
1810 ktime_get_ts64(&ts2);
1811 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1812 return 0;
1813 }
1814
1815 /*
1816 * Consecutive read performance by transfer size.
1817 */
mmc_test_profile_seq_read_perf(struct mmc_test_card * test)1818 static int mmc_test_profile_seq_read_perf(struct mmc_test_card *test)
1819 {
1820 struct mmc_test_area *t = &test->area;
1821 unsigned long sz;
1822 int ret;
1823
1824 for (sz = 512; sz < t->max_tfr; sz <<= 1) {
1825 ret = mmc_test_seq_read_perf(test, sz);
1826 if (ret)
1827 return ret;
1828 }
1829 sz = t->max_tfr;
1830 return mmc_test_seq_read_perf(test, sz);
1831 }
1832
mmc_test_seq_write_perf(struct mmc_test_card * test,unsigned long sz)1833 static int mmc_test_seq_write_perf(struct mmc_test_card *test, unsigned long sz)
1834 {
1835 struct mmc_test_area *t = &test->area;
1836 unsigned int dev_addr, i, cnt;
1837 struct timespec64 ts1, ts2;
1838 int ret;
1839
1840 ret = mmc_test_area_erase(test);
1841 if (ret)
1842 return ret;
1843 cnt = t->max_sz / sz;
1844 dev_addr = t->dev_addr;
1845 ktime_get_ts64(&ts1);
1846 for (i = 0; i < cnt; i++) {
1847 ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 0);
1848 if (ret)
1849 return ret;
1850 dev_addr += (sz >> 9);
1851 }
1852 ktime_get_ts64(&ts2);
1853 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1854 return 0;
1855 }
1856
1857 /*
1858 * Consecutive write performance by transfer size.
1859 */
mmc_test_profile_seq_write_perf(struct mmc_test_card * test)1860 static int mmc_test_profile_seq_write_perf(struct mmc_test_card *test)
1861 {
1862 struct mmc_test_area *t = &test->area;
1863 unsigned long sz;
1864 int ret;
1865
1866 for (sz = 512; sz < t->max_tfr; sz <<= 1) {
1867 ret = mmc_test_seq_write_perf(test, sz);
1868 if (ret)
1869 return ret;
1870 }
1871 sz = t->max_tfr;
1872 return mmc_test_seq_write_perf(test, sz);
1873 }
1874
1875 /*
1876 * Consecutive trim performance by transfer size.
1877 */
mmc_test_profile_seq_trim_perf(struct mmc_test_card * test)1878 static int mmc_test_profile_seq_trim_perf(struct mmc_test_card *test)
1879 {
1880 struct mmc_test_area *t = &test->area;
1881 unsigned long sz;
1882 unsigned int dev_addr, i, cnt;
1883 struct timespec64 ts1, ts2;
1884 int ret;
1885
1886 if (!mmc_can_trim(test->card))
1887 return RESULT_UNSUP_CARD;
1888
1889 if (!mmc_can_erase(test->card))
1890 return RESULT_UNSUP_HOST;
1891
1892 for (sz = 512; sz <= t->max_sz; sz <<= 1) {
1893 ret = mmc_test_area_erase(test);
1894 if (ret)
1895 return ret;
1896 ret = mmc_test_area_fill(test);
1897 if (ret)
1898 return ret;
1899 cnt = t->max_sz / sz;
1900 dev_addr = t->dev_addr;
1901 ktime_get_ts64(&ts1);
1902 for (i = 0; i < cnt; i++) {
1903 ret = mmc_erase(test->card, dev_addr, sz >> 9,
1904 MMC_TRIM_ARG);
1905 if (ret)
1906 return ret;
1907 dev_addr += (sz >> 9);
1908 }
1909 ktime_get_ts64(&ts2);
1910 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1911 }
1912 return 0;
1913 }
1914
1915 static unsigned int rnd_next = 1;
1916
mmc_test_rnd_num(unsigned int rnd_cnt)1917 static unsigned int mmc_test_rnd_num(unsigned int rnd_cnt)
1918 {
1919 uint64_t r;
1920
1921 rnd_next = rnd_next * 1103515245 + 12345;
1922 r = (rnd_next >> 16) & 0x7fff;
1923 return (r * rnd_cnt) >> 15;
1924 }
1925
mmc_test_rnd_perf(struct mmc_test_card * test,int write,int print,unsigned long sz)1926 static int mmc_test_rnd_perf(struct mmc_test_card *test, int write, int print,
1927 unsigned long sz)
1928 {
1929 unsigned int dev_addr, cnt, rnd_addr, range1, range2, last_ea = 0, ea;
1930 unsigned int ssz;
1931 struct timespec64 ts1, ts2, ts;
1932 int ret;
1933
1934 ssz = sz >> 9;
1935
1936 rnd_addr = mmc_test_capacity(test->card) / 4;
1937 range1 = rnd_addr / test->card->pref_erase;
1938 range2 = range1 / ssz;
1939
1940 ktime_get_ts64(&ts1);
1941 for (cnt = 0; cnt < UINT_MAX; cnt++) {
1942 ktime_get_ts64(&ts2);
1943 ts = timespec64_sub(ts2, ts1);
1944 if (ts.tv_sec >= 10)
1945 break;
1946 ea = mmc_test_rnd_num(range1);
1947 if (ea == last_ea)
1948 ea -= 1;
1949 last_ea = ea;
1950 dev_addr = rnd_addr + test->card->pref_erase * ea +
1951 ssz * mmc_test_rnd_num(range2);
1952 ret = mmc_test_area_io(test, sz, dev_addr, write, 0, 0);
1953 if (ret)
1954 return ret;
1955 }
1956 if (print)
1957 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1958 return 0;
1959 }
1960
mmc_test_random_perf(struct mmc_test_card * test,int write)1961 static int mmc_test_random_perf(struct mmc_test_card *test, int write)
1962 {
1963 struct mmc_test_area *t = &test->area;
1964 unsigned int next;
1965 unsigned long sz;
1966 int ret;
1967
1968 for (sz = 512; sz < t->max_tfr; sz <<= 1) {
1969 /*
1970 * When writing, try to get more consistent results by running
1971 * the test twice with exactly the same I/O but outputting the
1972 * results only for the 2nd run.
1973 */
1974 if (write) {
1975 next = rnd_next;
1976 ret = mmc_test_rnd_perf(test, write, 0, sz);
1977 if (ret)
1978 return ret;
1979 rnd_next = next;
1980 }
1981 ret = mmc_test_rnd_perf(test, write, 1, sz);
1982 if (ret)
1983 return ret;
1984 }
1985 sz = t->max_tfr;
1986 if (write) {
1987 next = rnd_next;
1988 ret = mmc_test_rnd_perf(test, write, 0, sz);
1989 if (ret)
1990 return ret;
1991 rnd_next = next;
1992 }
1993 return mmc_test_rnd_perf(test, write, 1, sz);
1994 }
1995
1996 /*
1997 * Random read performance by transfer size.
1998 */
mmc_test_random_read_perf(struct mmc_test_card * test)1999 static int mmc_test_random_read_perf(struct mmc_test_card *test)
2000 {
2001 return mmc_test_random_perf(test, 0);
2002 }
2003
2004 /*
2005 * Random write performance by transfer size.
2006 */
mmc_test_random_write_perf(struct mmc_test_card * test)2007 static int mmc_test_random_write_perf(struct mmc_test_card *test)
2008 {
2009 return mmc_test_random_perf(test, 1);
2010 }
2011
mmc_test_seq_perf(struct mmc_test_card * test,int write,unsigned int tot_sz,int max_scatter)2012 static int mmc_test_seq_perf(struct mmc_test_card *test, int write,
2013 unsigned int tot_sz, int max_scatter)
2014 {
2015 struct mmc_test_area *t = &test->area;
2016 unsigned int dev_addr, i, cnt, sz, ssz;
2017 struct timespec64 ts1, ts2;
2018 int ret;
2019
2020 sz = t->max_tfr;
2021
2022 /*
2023 * In the case of a maximally scattered transfer, the maximum transfer
2024 * size is further limited by using PAGE_SIZE segments.
2025 */
2026 if (max_scatter) {
2027 unsigned long max_tfr;
2028
2029 if (t->max_seg_sz >= PAGE_SIZE)
2030 max_tfr = t->max_segs * PAGE_SIZE;
2031 else
2032 max_tfr = t->max_segs * t->max_seg_sz;
2033 if (sz > max_tfr)
2034 sz = max_tfr;
2035 }
2036
2037 ssz = sz >> 9;
2038 dev_addr = mmc_test_capacity(test->card) / 4;
2039 if (tot_sz > dev_addr << 9)
2040 tot_sz = dev_addr << 9;
2041 cnt = tot_sz / sz;
2042 dev_addr &= 0xffff0000; /* Round to 64MiB boundary */
2043
2044 ktime_get_ts64(&ts1);
2045 for (i = 0; i < cnt; i++) {
2046 ret = mmc_test_area_io(test, sz, dev_addr, write,
2047 max_scatter, 0);
2048 if (ret)
2049 return ret;
2050 dev_addr += ssz;
2051 }
2052 ktime_get_ts64(&ts2);
2053
2054 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
2055
2056 return 0;
2057 }
2058
mmc_test_large_seq_perf(struct mmc_test_card * test,int write)2059 static int mmc_test_large_seq_perf(struct mmc_test_card *test, int write)
2060 {
2061 int ret, i;
2062
2063 for (i = 0; i < 10; i++) {
2064 ret = mmc_test_seq_perf(test, write, 10 * 1024 * 1024, 1);
2065 if (ret)
2066 return ret;
2067 }
2068 for (i = 0; i < 5; i++) {
2069 ret = mmc_test_seq_perf(test, write, 100 * 1024 * 1024, 1);
2070 if (ret)
2071 return ret;
2072 }
2073 for (i = 0; i < 3; i++) {
2074 ret = mmc_test_seq_perf(test, write, 1000 * 1024 * 1024, 1);
2075 if (ret)
2076 return ret;
2077 }
2078
2079 return ret;
2080 }
2081
2082 /*
2083 * Large sequential read performance.
2084 */
mmc_test_large_seq_read_perf(struct mmc_test_card * test)2085 static int mmc_test_large_seq_read_perf(struct mmc_test_card *test)
2086 {
2087 return mmc_test_large_seq_perf(test, 0);
2088 }
2089
2090 /*
2091 * Large sequential write performance.
2092 */
mmc_test_large_seq_write_perf(struct mmc_test_card * test)2093 static int mmc_test_large_seq_write_perf(struct mmc_test_card *test)
2094 {
2095 return mmc_test_large_seq_perf(test, 1);
2096 }
2097
mmc_test_rw_multiple(struct mmc_test_card * test,struct mmc_test_multiple_rw * tdata,unsigned int reqsize,unsigned int size,int min_sg_len)2098 static int mmc_test_rw_multiple(struct mmc_test_card *test,
2099 struct mmc_test_multiple_rw *tdata,
2100 unsigned int reqsize, unsigned int size,
2101 int min_sg_len)
2102 {
2103 unsigned int dev_addr;
2104 struct mmc_test_area *t = &test->area;
2105 int ret = 0;
2106
2107 /* Set up test area */
2108 if (size > mmc_test_capacity(test->card) / 2 * 512)
2109 size = mmc_test_capacity(test->card) / 2 * 512;
2110 if (reqsize > t->max_tfr)
2111 reqsize = t->max_tfr;
2112 dev_addr = mmc_test_capacity(test->card) / 4;
2113 if ((dev_addr & 0xffff0000))
2114 dev_addr &= 0xffff0000; /* Round to 64MiB boundary */
2115 else
2116 dev_addr &= 0xfffff800; /* Round to 1MiB boundary */
2117 if (!dev_addr)
2118 goto err;
2119
2120 if (reqsize > size)
2121 return 0;
2122
2123 /* prepare test area */
2124 if (mmc_can_erase(test->card) &&
2125 tdata->prepare & MMC_TEST_PREP_ERASE) {
2126 ret = mmc_erase(test->card, dev_addr,
2127 size / 512, MMC_SECURE_ERASE_ARG);
2128 if (ret)
2129 ret = mmc_erase(test->card, dev_addr,
2130 size / 512, MMC_ERASE_ARG);
2131 if (ret)
2132 goto err;
2133 }
2134
2135 /* Run test */
2136 ret = mmc_test_area_io_seq(test, reqsize, dev_addr,
2137 tdata->do_write, 0, 1, size / reqsize,
2138 tdata->do_nonblock_req, min_sg_len);
2139 if (ret)
2140 goto err;
2141
2142 return ret;
2143 err:
2144 pr_info("[%s] error\n", __func__);
2145 return ret;
2146 }
2147
mmc_test_rw_multiple_size(struct mmc_test_card * test,struct mmc_test_multiple_rw * rw)2148 static int mmc_test_rw_multiple_size(struct mmc_test_card *test,
2149 struct mmc_test_multiple_rw *rw)
2150 {
2151 int ret = 0;
2152 int i;
2153 void *pre_req = test->card->host->ops->pre_req;
2154 void *post_req = test->card->host->ops->post_req;
2155
2156 if (rw->do_nonblock_req &&
2157 ((!pre_req && post_req) || (pre_req && !post_req))) {
2158 pr_info("error: only one of pre/post is defined\n");
2159 return -EINVAL;
2160 }
2161
2162 for (i = 0 ; i < rw->len && ret == 0; i++) {
2163 ret = mmc_test_rw_multiple(test, rw, rw->bs[i], rw->size, 0);
2164 if (ret)
2165 break;
2166 }
2167 return ret;
2168 }
2169
mmc_test_rw_multiple_sg_len(struct mmc_test_card * test,struct mmc_test_multiple_rw * rw)2170 static int mmc_test_rw_multiple_sg_len(struct mmc_test_card *test,
2171 struct mmc_test_multiple_rw *rw)
2172 {
2173 int ret = 0;
2174 int i;
2175
2176 for (i = 0 ; i < rw->len && ret == 0; i++) {
2177 ret = mmc_test_rw_multiple(test, rw, 512 * 1024, rw->size,
2178 rw->sg_len[i]);
2179 if (ret)
2180 break;
2181 }
2182 return ret;
2183 }
2184
2185 /*
2186 * Multiple blocking write 4k to 4 MB chunks
2187 */
mmc_test_profile_mult_write_blocking_perf(struct mmc_test_card * test)2188 static int mmc_test_profile_mult_write_blocking_perf(struct mmc_test_card *test)
2189 {
2190 unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
2191 1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
2192 struct mmc_test_multiple_rw test_data = {
2193 .bs = bs,
2194 .size = TEST_AREA_MAX_SIZE,
2195 .len = ARRAY_SIZE(bs),
2196 .do_write = true,
2197 .do_nonblock_req = false,
2198 .prepare = MMC_TEST_PREP_ERASE,
2199 };
2200
2201 return mmc_test_rw_multiple_size(test, &test_data);
2202 };
2203
2204 /*
2205 * Multiple non-blocking write 4k to 4 MB chunks
2206 */
mmc_test_profile_mult_write_nonblock_perf(struct mmc_test_card * test)2207 static int mmc_test_profile_mult_write_nonblock_perf(struct mmc_test_card *test)
2208 {
2209 unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
2210 1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
2211 struct mmc_test_multiple_rw test_data = {
2212 .bs = bs,
2213 .size = TEST_AREA_MAX_SIZE,
2214 .len = ARRAY_SIZE(bs),
2215 .do_write = true,
2216 .do_nonblock_req = true,
2217 .prepare = MMC_TEST_PREP_ERASE,
2218 };
2219
2220 return mmc_test_rw_multiple_size(test, &test_data);
2221 }
2222
2223 /*
2224 * Multiple blocking read 4k to 4 MB chunks
2225 */
mmc_test_profile_mult_read_blocking_perf(struct mmc_test_card * test)2226 static int mmc_test_profile_mult_read_blocking_perf(struct mmc_test_card *test)
2227 {
2228 unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
2229 1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
2230 struct mmc_test_multiple_rw test_data = {
2231 .bs = bs,
2232 .size = TEST_AREA_MAX_SIZE,
2233 .len = ARRAY_SIZE(bs),
2234 .do_write = false,
2235 .do_nonblock_req = false,
2236 .prepare = MMC_TEST_PREP_NONE,
2237 };
2238
2239 return mmc_test_rw_multiple_size(test, &test_data);
2240 }
2241
2242 /*
2243 * Multiple non-blocking read 4k to 4 MB chunks
2244 */
mmc_test_profile_mult_read_nonblock_perf(struct mmc_test_card * test)2245 static int mmc_test_profile_mult_read_nonblock_perf(struct mmc_test_card *test)
2246 {
2247 unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
2248 1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
2249 struct mmc_test_multiple_rw test_data = {
2250 .bs = bs,
2251 .size = TEST_AREA_MAX_SIZE,
2252 .len = ARRAY_SIZE(bs),
2253 .do_write = false,
2254 .do_nonblock_req = true,
2255 .prepare = MMC_TEST_PREP_NONE,
2256 };
2257
2258 return mmc_test_rw_multiple_size(test, &test_data);
2259 }
2260
2261 /*
2262 * Multiple blocking write 1 to 512 sg elements
2263 */
mmc_test_profile_sglen_wr_blocking_perf(struct mmc_test_card * test)2264 static int mmc_test_profile_sglen_wr_blocking_perf(struct mmc_test_card *test)
2265 {
2266 unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
2267 1 << 7, 1 << 8, 1 << 9};
2268 struct mmc_test_multiple_rw test_data = {
2269 .sg_len = sg_len,
2270 .size = TEST_AREA_MAX_SIZE,
2271 .len = ARRAY_SIZE(sg_len),
2272 .do_write = true,
2273 .do_nonblock_req = false,
2274 .prepare = MMC_TEST_PREP_ERASE,
2275 };
2276
2277 return mmc_test_rw_multiple_sg_len(test, &test_data);
2278 };
2279
2280 /*
2281 * Multiple non-blocking write 1 to 512 sg elements
2282 */
mmc_test_profile_sglen_wr_nonblock_perf(struct mmc_test_card * test)2283 static int mmc_test_profile_sglen_wr_nonblock_perf(struct mmc_test_card *test)
2284 {
2285 unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
2286 1 << 7, 1 << 8, 1 << 9};
2287 struct mmc_test_multiple_rw test_data = {
2288 .sg_len = sg_len,
2289 .size = TEST_AREA_MAX_SIZE,
2290 .len = ARRAY_SIZE(sg_len),
2291 .do_write = true,
2292 .do_nonblock_req = true,
2293 .prepare = MMC_TEST_PREP_ERASE,
2294 };
2295
2296 return mmc_test_rw_multiple_sg_len(test, &test_data);
2297 }
2298
2299 /*
2300 * Multiple blocking read 1 to 512 sg elements
2301 */
mmc_test_profile_sglen_r_blocking_perf(struct mmc_test_card * test)2302 static int mmc_test_profile_sglen_r_blocking_perf(struct mmc_test_card *test)
2303 {
2304 unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
2305 1 << 7, 1 << 8, 1 << 9};
2306 struct mmc_test_multiple_rw test_data = {
2307 .sg_len = sg_len,
2308 .size = TEST_AREA_MAX_SIZE,
2309 .len = ARRAY_SIZE(sg_len),
2310 .do_write = false,
2311 .do_nonblock_req = false,
2312 .prepare = MMC_TEST_PREP_NONE,
2313 };
2314
2315 return mmc_test_rw_multiple_sg_len(test, &test_data);
2316 }
2317
2318 /*
2319 * Multiple non-blocking read 1 to 512 sg elements
2320 */
mmc_test_profile_sglen_r_nonblock_perf(struct mmc_test_card * test)2321 static int mmc_test_profile_sglen_r_nonblock_perf(struct mmc_test_card *test)
2322 {
2323 unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
2324 1 << 7, 1 << 8, 1 << 9};
2325 struct mmc_test_multiple_rw test_data = {
2326 .sg_len = sg_len,
2327 .size = TEST_AREA_MAX_SIZE,
2328 .len = ARRAY_SIZE(sg_len),
2329 .do_write = false,
2330 .do_nonblock_req = true,
2331 .prepare = MMC_TEST_PREP_NONE,
2332 };
2333
2334 return mmc_test_rw_multiple_sg_len(test, &test_data);
2335 }
2336
2337 /*
2338 * eMMC hardware reset.
2339 */
mmc_test_reset(struct mmc_test_card * test)2340 static int mmc_test_reset(struct mmc_test_card *test)
2341 {
2342 struct mmc_card *card = test->card;
2343 struct mmc_host *host = card->host;
2344 int err;
2345
2346 err = mmc_hw_reset(host);
2347 if (!err) {
2348 /*
2349 * Reset will re-enable the card's command queue, but tests
2350 * expect it to be disabled.
2351 */
2352 if (card->ext_csd.cmdq_en)
2353 mmc_cmdq_disable(card);
2354 return RESULT_OK;
2355 } else if (err == -EOPNOTSUPP) {
2356 return RESULT_UNSUP_HOST;
2357 }
2358
2359 return RESULT_FAIL;
2360 }
2361
mmc_test_send_status(struct mmc_test_card * test,struct mmc_command * cmd)2362 static int mmc_test_send_status(struct mmc_test_card *test,
2363 struct mmc_command *cmd)
2364 {
2365 memset(cmd, 0, sizeof(*cmd));
2366
2367 cmd->opcode = MMC_SEND_STATUS;
2368 if (!mmc_host_is_spi(test->card->host))
2369 cmd->arg = test->card->rca << 16;
2370 cmd->flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
2371
2372 return mmc_wait_for_cmd(test->card->host, cmd, 0);
2373 }
2374
mmc_test_ongoing_transfer(struct mmc_test_card * test,unsigned int dev_addr,int use_sbc,int repeat_cmd,int write,int use_areq)2375 static int mmc_test_ongoing_transfer(struct mmc_test_card *test,
2376 unsigned int dev_addr, int use_sbc,
2377 int repeat_cmd, int write, int use_areq)
2378 {
2379 struct mmc_test_req *rq = mmc_test_req_alloc();
2380 struct mmc_host *host = test->card->host;
2381 struct mmc_test_area *t = &test->area;
2382 struct mmc_request *mrq;
2383 unsigned long timeout;
2384 bool expired = false;
2385 int ret = 0, cmd_ret;
2386 u32 status = 0;
2387 int count = 0;
2388
2389 if (!rq)
2390 return -ENOMEM;
2391
2392 mrq = &rq->mrq;
2393 if (use_sbc)
2394 mrq->sbc = &rq->sbc;
2395 mrq->cap_cmd_during_tfr = true;
2396
2397 mmc_test_prepare_mrq(test, mrq, t->sg, t->sg_len, dev_addr, t->blocks,
2398 512, write);
2399
2400 if (use_sbc && t->blocks > 1 && !mrq->sbc) {
2401 ret = mmc_host_cmd23(host) ?
2402 RESULT_UNSUP_CARD :
2403 RESULT_UNSUP_HOST;
2404 goto out_free;
2405 }
2406
2407 /* Start ongoing data request */
2408 if (use_areq) {
2409 ret = mmc_test_start_areq(test, mrq, NULL);
2410 if (ret)
2411 goto out_free;
2412 } else {
2413 mmc_wait_for_req(host, mrq);
2414 }
2415
2416 timeout = jiffies + msecs_to_jiffies(3000);
2417 do {
2418 count += 1;
2419
2420 /* Send status command while data transfer in progress */
2421 cmd_ret = mmc_test_send_status(test, &rq->status);
2422 if (cmd_ret)
2423 break;
2424
2425 status = rq->status.resp[0];
2426 if (status & R1_ERROR) {
2427 cmd_ret = -EIO;
2428 break;
2429 }
2430
2431 if (mmc_is_req_done(host, mrq))
2432 break;
2433
2434 expired = time_after(jiffies, timeout);
2435 if (expired) {
2436 pr_info("%s: timeout waiting for Tran state status %#x\n",
2437 mmc_hostname(host), status);
2438 cmd_ret = -ETIMEDOUT;
2439 break;
2440 }
2441 } while (repeat_cmd && R1_CURRENT_STATE(status) != R1_STATE_TRAN);
2442
2443 /* Wait for data request to complete */
2444 if (use_areq) {
2445 ret = mmc_test_start_areq(test, NULL, mrq);
2446 } else {
2447 mmc_wait_for_req_done(test->card->host, mrq);
2448 }
2449
2450 /*
2451 * For cap_cmd_during_tfr request, upper layer must send stop if
2452 * required.
2453 */
2454 if (mrq->data->stop && (mrq->data->error || !mrq->sbc)) {
2455 if (ret)
2456 mmc_wait_for_cmd(host, mrq->data->stop, 0);
2457 else
2458 ret = mmc_wait_for_cmd(host, mrq->data->stop, 0);
2459 }
2460
2461 if (ret)
2462 goto out_free;
2463
2464 if (cmd_ret) {
2465 pr_info("%s: Send Status failed: status %#x, error %d\n",
2466 mmc_hostname(test->card->host), status, cmd_ret);
2467 }
2468
2469 ret = mmc_test_check_result(test, mrq);
2470 if (ret)
2471 goto out_free;
2472
2473 ret = mmc_test_wait_busy(test);
2474 if (ret)
2475 goto out_free;
2476
2477 if (repeat_cmd && (t->blocks + 1) << 9 > t->max_tfr)
2478 pr_info("%s: %d commands completed during transfer of %u blocks\n",
2479 mmc_hostname(test->card->host), count, t->blocks);
2480
2481 if (cmd_ret)
2482 ret = cmd_ret;
2483 out_free:
2484 kfree(rq);
2485
2486 return ret;
2487 }
2488
__mmc_test_cmds_during_tfr(struct mmc_test_card * test,unsigned long sz,int use_sbc,int write,int use_areq)2489 static int __mmc_test_cmds_during_tfr(struct mmc_test_card *test,
2490 unsigned long sz, int use_sbc, int write,
2491 int use_areq)
2492 {
2493 struct mmc_test_area *t = &test->area;
2494 int ret;
2495
2496 if (!(test->card->host->caps & MMC_CAP_CMD_DURING_TFR))
2497 return RESULT_UNSUP_HOST;
2498
2499 ret = mmc_test_area_map(test, sz, 0, 0, use_areq);
2500 if (ret)
2501 return ret;
2502
2503 ret = mmc_test_ongoing_transfer(test, t->dev_addr, use_sbc, 0, write,
2504 use_areq);
2505 if (ret)
2506 return ret;
2507
2508 return mmc_test_ongoing_transfer(test, t->dev_addr, use_sbc, 1, write,
2509 use_areq);
2510 }
2511
mmc_test_cmds_during_tfr(struct mmc_test_card * test,int use_sbc,int write,int use_areq)2512 static int mmc_test_cmds_during_tfr(struct mmc_test_card *test, int use_sbc,
2513 int write, int use_areq)
2514 {
2515 struct mmc_test_area *t = &test->area;
2516 unsigned long sz;
2517 int ret;
2518
2519 for (sz = 512; sz <= t->max_tfr; sz += 512) {
2520 ret = __mmc_test_cmds_during_tfr(test, sz, use_sbc, write,
2521 use_areq);
2522 if (ret)
2523 return ret;
2524 }
2525 return 0;
2526 }
2527
2528 /*
2529 * Commands during read - no Set Block Count (CMD23).
2530 */
mmc_test_cmds_during_read(struct mmc_test_card * test)2531 static int mmc_test_cmds_during_read(struct mmc_test_card *test)
2532 {
2533 return mmc_test_cmds_during_tfr(test, 0, 0, 0);
2534 }
2535
2536 /*
2537 * Commands during write - no Set Block Count (CMD23).
2538 */
mmc_test_cmds_during_write(struct mmc_test_card * test)2539 static int mmc_test_cmds_during_write(struct mmc_test_card *test)
2540 {
2541 return mmc_test_cmds_during_tfr(test, 0, 1, 0);
2542 }
2543
2544 /*
2545 * Commands during read - use Set Block Count (CMD23).
2546 */
mmc_test_cmds_during_read_cmd23(struct mmc_test_card * test)2547 static int mmc_test_cmds_during_read_cmd23(struct mmc_test_card *test)
2548 {
2549 return mmc_test_cmds_during_tfr(test, 1, 0, 0);
2550 }
2551
2552 /*
2553 * Commands during write - use Set Block Count (CMD23).
2554 */
mmc_test_cmds_during_write_cmd23(struct mmc_test_card * test)2555 static int mmc_test_cmds_during_write_cmd23(struct mmc_test_card *test)
2556 {
2557 return mmc_test_cmds_during_tfr(test, 1, 1, 0);
2558 }
2559
2560 /*
2561 * Commands during non-blocking read - use Set Block Count (CMD23).
2562 */
mmc_test_cmds_during_read_cmd23_nonblock(struct mmc_test_card * test)2563 static int mmc_test_cmds_during_read_cmd23_nonblock(struct mmc_test_card *test)
2564 {
2565 return mmc_test_cmds_during_tfr(test, 1, 0, 1);
2566 }
2567
2568 /*
2569 * Commands during non-blocking write - use Set Block Count (CMD23).
2570 */
mmc_test_cmds_during_write_cmd23_nonblock(struct mmc_test_card * test)2571 static int mmc_test_cmds_during_write_cmd23_nonblock(struct mmc_test_card *test)
2572 {
2573 return mmc_test_cmds_during_tfr(test, 1, 1, 1);
2574 }
2575
2576 static const struct mmc_test_case mmc_test_cases[] = {
2577 {
2578 .name = "Basic write (no data verification)",
2579 .run = mmc_test_basic_write,
2580 },
2581
2582 {
2583 .name = "Basic read (no data verification)",
2584 .run = mmc_test_basic_read,
2585 },
2586
2587 {
2588 .name = "Basic write (with data verification)",
2589 .prepare = mmc_test_prepare_write,
2590 .run = mmc_test_verify_write,
2591 .cleanup = mmc_test_cleanup,
2592 },
2593
2594 {
2595 .name = "Basic read (with data verification)",
2596 .prepare = mmc_test_prepare_read,
2597 .run = mmc_test_verify_read,
2598 .cleanup = mmc_test_cleanup,
2599 },
2600
2601 {
2602 .name = "Multi-block write",
2603 .prepare = mmc_test_prepare_write,
2604 .run = mmc_test_multi_write,
2605 .cleanup = mmc_test_cleanup,
2606 },
2607
2608 {
2609 .name = "Multi-block read",
2610 .prepare = mmc_test_prepare_read,
2611 .run = mmc_test_multi_read,
2612 .cleanup = mmc_test_cleanup,
2613 },
2614
2615 {
2616 .name = "Power of two block writes",
2617 .prepare = mmc_test_prepare_write,
2618 .run = mmc_test_pow2_write,
2619 .cleanup = mmc_test_cleanup,
2620 },
2621
2622 {
2623 .name = "Power of two block reads",
2624 .prepare = mmc_test_prepare_read,
2625 .run = mmc_test_pow2_read,
2626 .cleanup = mmc_test_cleanup,
2627 },
2628
2629 {
2630 .name = "Weird sized block writes",
2631 .prepare = mmc_test_prepare_write,
2632 .run = mmc_test_weird_write,
2633 .cleanup = mmc_test_cleanup,
2634 },
2635
2636 {
2637 .name = "Weird sized block reads",
2638 .prepare = mmc_test_prepare_read,
2639 .run = mmc_test_weird_read,
2640 .cleanup = mmc_test_cleanup,
2641 },
2642
2643 {
2644 .name = "Badly aligned write",
2645 .prepare = mmc_test_prepare_write,
2646 .run = mmc_test_align_write,
2647 .cleanup = mmc_test_cleanup,
2648 },
2649
2650 {
2651 .name = "Badly aligned read",
2652 .prepare = mmc_test_prepare_read,
2653 .run = mmc_test_align_read,
2654 .cleanup = mmc_test_cleanup,
2655 },
2656
2657 {
2658 .name = "Badly aligned multi-block write",
2659 .prepare = mmc_test_prepare_write,
2660 .run = mmc_test_align_multi_write,
2661 .cleanup = mmc_test_cleanup,
2662 },
2663
2664 {
2665 .name = "Badly aligned multi-block read",
2666 .prepare = mmc_test_prepare_read,
2667 .run = mmc_test_align_multi_read,
2668 .cleanup = mmc_test_cleanup,
2669 },
2670
2671 {
2672 .name = "Proper xfer_size at write (start failure)",
2673 .run = mmc_test_xfersize_write,
2674 },
2675
2676 {
2677 .name = "Proper xfer_size at read (start failure)",
2678 .run = mmc_test_xfersize_read,
2679 },
2680
2681 {
2682 .name = "Proper xfer_size at write (midway failure)",
2683 .run = mmc_test_multi_xfersize_write,
2684 },
2685
2686 {
2687 .name = "Proper xfer_size at read (midway failure)",
2688 .run = mmc_test_multi_xfersize_read,
2689 },
2690
2691 #ifdef CONFIG_HIGHMEM
2692
2693 {
2694 .name = "Highmem write",
2695 .prepare = mmc_test_prepare_write,
2696 .run = mmc_test_write_high,
2697 .cleanup = mmc_test_cleanup,
2698 },
2699
2700 {
2701 .name = "Highmem read",
2702 .prepare = mmc_test_prepare_read,
2703 .run = mmc_test_read_high,
2704 .cleanup = mmc_test_cleanup,
2705 },
2706
2707 {
2708 .name = "Multi-block highmem write",
2709 .prepare = mmc_test_prepare_write,
2710 .run = mmc_test_multi_write_high,
2711 .cleanup = mmc_test_cleanup,
2712 },
2713
2714 {
2715 .name = "Multi-block highmem read",
2716 .prepare = mmc_test_prepare_read,
2717 .run = mmc_test_multi_read_high,
2718 .cleanup = mmc_test_cleanup,
2719 },
2720
2721 #else
2722
2723 {
2724 .name = "Highmem write",
2725 .run = mmc_test_no_highmem,
2726 },
2727
2728 {
2729 .name = "Highmem read",
2730 .run = mmc_test_no_highmem,
2731 },
2732
2733 {
2734 .name = "Multi-block highmem write",
2735 .run = mmc_test_no_highmem,
2736 },
2737
2738 {
2739 .name = "Multi-block highmem read",
2740 .run = mmc_test_no_highmem,
2741 },
2742
2743 #endif /* CONFIG_HIGHMEM */
2744
2745 {
2746 .name = "Best-case read performance",
2747 .prepare = mmc_test_area_prepare_fill,
2748 .run = mmc_test_best_read_performance,
2749 .cleanup = mmc_test_area_cleanup,
2750 },
2751
2752 {
2753 .name = "Best-case write performance",
2754 .prepare = mmc_test_area_prepare_erase,
2755 .run = mmc_test_best_write_performance,
2756 .cleanup = mmc_test_area_cleanup,
2757 },
2758
2759 {
2760 .name = "Best-case read performance into scattered pages",
2761 .prepare = mmc_test_area_prepare_fill,
2762 .run = mmc_test_best_read_perf_max_scatter,
2763 .cleanup = mmc_test_area_cleanup,
2764 },
2765
2766 {
2767 .name = "Best-case write performance from scattered pages",
2768 .prepare = mmc_test_area_prepare_erase,
2769 .run = mmc_test_best_write_perf_max_scatter,
2770 .cleanup = mmc_test_area_cleanup,
2771 },
2772
2773 {
2774 .name = "Single read performance by transfer size",
2775 .prepare = mmc_test_area_prepare_fill,
2776 .run = mmc_test_profile_read_perf,
2777 .cleanup = mmc_test_area_cleanup,
2778 },
2779
2780 {
2781 .name = "Single write performance by transfer size",
2782 .prepare = mmc_test_area_prepare,
2783 .run = mmc_test_profile_write_perf,
2784 .cleanup = mmc_test_area_cleanup,
2785 },
2786
2787 {
2788 .name = "Single trim performance by transfer size",
2789 .prepare = mmc_test_area_prepare_fill,
2790 .run = mmc_test_profile_trim_perf,
2791 .cleanup = mmc_test_area_cleanup,
2792 },
2793
2794 {
2795 .name = "Consecutive read performance by transfer size",
2796 .prepare = mmc_test_area_prepare_fill,
2797 .run = mmc_test_profile_seq_read_perf,
2798 .cleanup = mmc_test_area_cleanup,
2799 },
2800
2801 {
2802 .name = "Consecutive write performance by transfer size",
2803 .prepare = mmc_test_area_prepare,
2804 .run = mmc_test_profile_seq_write_perf,
2805 .cleanup = mmc_test_area_cleanup,
2806 },
2807
2808 {
2809 .name = "Consecutive trim performance by transfer size",
2810 .prepare = mmc_test_area_prepare,
2811 .run = mmc_test_profile_seq_trim_perf,
2812 .cleanup = mmc_test_area_cleanup,
2813 },
2814
2815 {
2816 .name = "Random read performance by transfer size",
2817 .prepare = mmc_test_area_prepare,
2818 .run = mmc_test_random_read_perf,
2819 .cleanup = mmc_test_area_cleanup,
2820 },
2821
2822 {
2823 .name = "Random write performance by transfer size",
2824 .prepare = mmc_test_area_prepare,
2825 .run = mmc_test_random_write_perf,
2826 .cleanup = mmc_test_area_cleanup,
2827 },
2828
2829 {
2830 .name = "Large sequential read into scattered pages",
2831 .prepare = mmc_test_area_prepare,
2832 .run = mmc_test_large_seq_read_perf,
2833 .cleanup = mmc_test_area_cleanup,
2834 },
2835
2836 {
2837 .name = "Large sequential write from scattered pages",
2838 .prepare = mmc_test_area_prepare,
2839 .run = mmc_test_large_seq_write_perf,
2840 .cleanup = mmc_test_area_cleanup,
2841 },
2842
2843 {
2844 .name = "Write performance with blocking req 4k to 4MB",
2845 .prepare = mmc_test_area_prepare,
2846 .run = mmc_test_profile_mult_write_blocking_perf,
2847 .cleanup = mmc_test_area_cleanup,
2848 },
2849
2850 {
2851 .name = "Write performance with non-blocking req 4k to 4MB",
2852 .prepare = mmc_test_area_prepare,
2853 .run = mmc_test_profile_mult_write_nonblock_perf,
2854 .cleanup = mmc_test_area_cleanup,
2855 },
2856
2857 {
2858 .name = "Read performance with blocking req 4k to 4MB",
2859 .prepare = mmc_test_area_prepare,
2860 .run = mmc_test_profile_mult_read_blocking_perf,
2861 .cleanup = mmc_test_area_cleanup,
2862 },
2863
2864 {
2865 .name = "Read performance with non-blocking req 4k to 4MB",
2866 .prepare = mmc_test_area_prepare,
2867 .run = mmc_test_profile_mult_read_nonblock_perf,
2868 .cleanup = mmc_test_area_cleanup,
2869 },
2870
2871 {
2872 .name = "Write performance blocking req 1 to 512 sg elems",
2873 .prepare = mmc_test_area_prepare,
2874 .run = mmc_test_profile_sglen_wr_blocking_perf,
2875 .cleanup = mmc_test_area_cleanup,
2876 },
2877
2878 {
2879 .name = "Write performance non-blocking req 1 to 512 sg elems",
2880 .prepare = mmc_test_area_prepare,
2881 .run = mmc_test_profile_sglen_wr_nonblock_perf,
2882 .cleanup = mmc_test_area_cleanup,
2883 },
2884
2885 {
2886 .name = "Read performance blocking req 1 to 512 sg elems",
2887 .prepare = mmc_test_area_prepare,
2888 .run = mmc_test_profile_sglen_r_blocking_perf,
2889 .cleanup = mmc_test_area_cleanup,
2890 },
2891
2892 {
2893 .name = "Read performance non-blocking req 1 to 512 sg elems",
2894 .prepare = mmc_test_area_prepare,
2895 .run = mmc_test_profile_sglen_r_nonblock_perf,
2896 .cleanup = mmc_test_area_cleanup,
2897 },
2898
2899 {
2900 .name = "Reset test",
2901 .run = mmc_test_reset,
2902 },
2903
2904 {
2905 .name = "Commands during read - no Set Block Count (CMD23)",
2906 .prepare = mmc_test_area_prepare,
2907 .run = mmc_test_cmds_during_read,
2908 .cleanup = mmc_test_area_cleanup,
2909 },
2910
2911 {
2912 .name = "Commands during write - no Set Block Count (CMD23)",
2913 .prepare = mmc_test_area_prepare,
2914 .run = mmc_test_cmds_during_write,
2915 .cleanup = mmc_test_area_cleanup,
2916 },
2917
2918 {
2919 .name = "Commands during read - use Set Block Count (CMD23)",
2920 .prepare = mmc_test_area_prepare,
2921 .run = mmc_test_cmds_during_read_cmd23,
2922 .cleanup = mmc_test_area_cleanup,
2923 },
2924
2925 {
2926 .name = "Commands during write - use Set Block Count (CMD23)",
2927 .prepare = mmc_test_area_prepare,
2928 .run = mmc_test_cmds_during_write_cmd23,
2929 .cleanup = mmc_test_area_cleanup,
2930 },
2931
2932 {
2933 .name = "Commands during non-blocking read - use Set Block Count (CMD23)",
2934 .prepare = mmc_test_area_prepare,
2935 .run = mmc_test_cmds_during_read_cmd23_nonblock,
2936 .cleanup = mmc_test_area_cleanup,
2937 },
2938
2939 {
2940 .name = "Commands during non-blocking write - use Set Block Count (CMD23)",
2941 .prepare = mmc_test_area_prepare,
2942 .run = mmc_test_cmds_during_write_cmd23_nonblock,
2943 .cleanup = mmc_test_area_cleanup,
2944 },
2945 };
2946
2947 static DEFINE_MUTEX(mmc_test_lock);
2948
2949 static LIST_HEAD(mmc_test_result);
2950
mmc_test_run(struct mmc_test_card * test,int testcase)2951 static void mmc_test_run(struct mmc_test_card *test, int testcase)
2952 {
2953 int i, ret;
2954
2955 pr_info("%s: Starting tests of card %s...\n",
2956 mmc_hostname(test->card->host), mmc_card_id(test->card));
2957
2958 mmc_claim_host(test->card->host);
2959
2960 for (i = 0; i < ARRAY_SIZE(mmc_test_cases); i++) {
2961 struct mmc_test_general_result *gr;
2962
2963 if (testcase && ((i + 1) != testcase))
2964 continue;
2965
2966 pr_info("%s: Test case %d. %s...\n",
2967 mmc_hostname(test->card->host), i + 1,
2968 mmc_test_cases[i].name);
2969
2970 if (mmc_test_cases[i].prepare) {
2971 ret = mmc_test_cases[i].prepare(test);
2972 if (ret) {
2973 pr_info("%s: Result: Prepare stage failed! (%d)\n",
2974 mmc_hostname(test->card->host),
2975 ret);
2976 continue;
2977 }
2978 }
2979
2980 gr = kzalloc(sizeof(*gr), GFP_KERNEL);
2981 if (gr) {
2982 INIT_LIST_HEAD(&gr->tr_lst);
2983
2984 /* Assign data what we know already */
2985 gr->card = test->card;
2986 gr->testcase = i;
2987
2988 /* Append container to global one */
2989 list_add_tail(&gr->link, &mmc_test_result);
2990
2991 /*
2992 * Save the pointer to created container in our private
2993 * structure.
2994 */
2995 test->gr = gr;
2996 }
2997
2998 ret = mmc_test_cases[i].run(test);
2999 switch (ret) {
3000 case RESULT_OK:
3001 pr_info("%s: Result: OK\n",
3002 mmc_hostname(test->card->host));
3003 break;
3004 case RESULT_FAIL:
3005 pr_info("%s: Result: FAILED\n",
3006 mmc_hostname(test->card->host));
3007 break;
3008 case RESULT_UNSUP_HOST:
3009 pr_info("%s: Result: UNSUPPORTED (by host)\n",
3010 mmc_hostname(test->card->host));
3011 break;
3012 case RESULT_UNSUP_CARD:
3013 pr_info("%s: Result: UNSUPPORTED (by card)\n",
3014 mmc_hostname(test->card->host));
3015 break;
3016 default:
3017 pr_info("%s: Result: ERROR (%d)\n",
3018 mmc_hostname(test->card->host), ret);
3019 }
3020
3021 /* Save the result */
3022 if (gr)
3023 gr->result = ret;
3024
3025 if (mmc_test_cases[i].cleanup) {
3026 ret = mmc_test_cases[i].cleanup(test);
3027 if (ret) {
3028 pr_info("%s: Warning: Cleanup stage failed! (%d)\n",
3029 mmc_hostname(test->card->host),
3030 ret);
3031 }
3032 }
3033 }
3034
3035 mmc_release_host(test->card->host);
3036
3037 pr_info("%s: Tests completed.\n",
3038 mmc_hostname(test->card->host));
3039 }
3040
mmc_test_free_result(struct mmc_card * card)3041 static void mmc_test_free_result(struct mmc_card *card)
3042 {
3043 struct mmc_test_general_result *gr, *grs;
3044
3045 mutex_lock(&mmc_test_lock);
3046
3047 list_for_each_entry_safe(gr, grs, &mmc_test_result, link) {
3048 struct mmc_test_transfer_result *tr, *trs;
3049
3050 if (card && gr->card != card)
3051 continue;
3052
3053 list_for_each_entry_safe(tr, trs, &gr->tr_lst, link) {
3054 list_del(&tr->link);
3055 kfree(tr);
3056 }
3057
3058 list_del(&gr->link);
3059 kfree(gr);
3060 }
3061
3062 mutex_unlock(&mmc_test_lock);
3063 }
3064
3065 static LIST_HEAD(mmc_test_file_test);
3066
mtf_test_show(struct seq_file * sf,void * data)3067 static int mtf_test_show(struct seq_file *sf, void *data)
3068 {
3069 struct mmc_card *card = (struct mmc_card *)sf->private;
3070 struct mmc_test_general_result *gr;
3071
3072 mutex_lock(&mmc_test_lock);
3073
3074 list_for_each_entry(gr, &mmc_test_result, link) {
3075 struct mmc_test_transfer_result *tr;
3076
3077 if (gr->card != card)
3078 continue;
3079
3080 seq_printf(sf, "Test %d: %d\n", gr->testcase + 1, gr->result);
3081
3082 list_for_each_entry(tr, &gr->tr_lst, link) {
3083 seq_printf(sf, "%u %d %llu.%09u %u %u.%02u\n",
3084 tr->count, tr->sectors,
3085 (u64)tr->ts.tv_sec, (u32)tr->ts.tv_nsec,
3086 tr->rate, tr->iops / 100, tr->iops % 100);
3087 }
3088 }
3089
3090 mutex_unlock(&mmc_test_lock);
3091
3092 return 0;
3093 }
3094
mtf_test_open(struct inode * inode,struct file * file)3095 static int mtf_test_open(struct inode *inode, struct file *file)
3096 {
3097 return single_open(file, mtf_test_show, inode->i_private);
3098 }
3099
mtf_test_write(struct file * file,const char __user * buf,size_t count,loff_t * pos)3100 static ssize_t mtf_test_write(struct file *file, const char __user *buf,
3101 size_t count, loff_t *pos)
3102 {
3103 struct seq_file *sf = (struct seq_file *)file->private_data;
3104 struct mmc_card *card = (struct mmc_card *)sf->private;
3105 struct mmc_test_card *test;
3106 long testcase;
3107 int ret;
3108
3109 ret = kstrtol_from_user(buf, count, 10, &testcase);
3110 if (ret)
3111 return ret;
3112
3113 test = kzalloc(sizeof(*test), GFP_KERNEL);
3114 if (!test)
3115 return -ENOMEM;
3116
3117 /*
3118 * Remove all test cases associated with given card. Thus we have only
3119 * actual data of the last run.
3120 */
3121 mmc_test_free_result(card);
3122
3123 test->card = card;
3124
3125 test->buffer = kzalloc(BUFFER_SIZE, GFP_KERNEL);
3126 #ifdef CONFIG_HIGHMEM
3127 test->highmem = alloc_pages(GFP_KERNEL | __GFP_HIGHMEM, BUFFER_ORDER);
3128 #endif
3129
3130 #ifdef CONFIG_HIGHMEM
3131 if (test->buffer && test->highmem) {
3132 #else
3133 if (test->buffer) {
3134 #endif
3135 mutex_lock(&mmc_test_lock);
3136 mmc_test_run(test, testcase);
3137 mutex_unlock(&mmc_test_lock);
3138 }
3139
3140 #ifdef CONFIG_HIGHMEM
3141 __free_pages(test->highmem, BUFFER_ORDER);
3142 #endif
3143 kfree(test->buffer);
3144 kfree(test);
3145
3146 return count;
3147 }
3148
3149 static const struct file_operations mmc_test_fops_test = {
3150 .open = mtf_test_open,
3151 .read = seq_read,
3152 .write = mtf_test_write,
3153 .llseek = seq_lseek,
3154 .release = single_release,
3155 };
3156
3157 static int mtf_testlist_show(struct seq_file *sf, void *data)
3158 {
3159 int i;
3160
3161 mutex_lock(&mmc_test_lock);
3162
3163 seq_puts(sf, "0:\tRun all tests\n");
3164 for (i = 0; i < ARRAY_SIZE(mmc_test_cases); i++)
3165 seq_printf(sf, "%d:\t%s\n", i + 1, mmc_test_cases[i].name);
3166
3167 mutex_unlock(&mmc_test_lock);
3168
3169 return 0;
3170 }
3171
3172 DEFINE_SHOW_ATTRIBUTE(mtf_testlist);
3173
3174 static void mmc_test_free_dbgfs_file(struct mmc_card *card)
3175 {
3176 struct mmc_test_dbgfs_file *df, *dfs;
3177
3178 mutex_lock(&mmc_test_lock);
3179
3180 list_for_each_entry_safe(df, dfs, &mmc_test_file_test, link) {
3181 if (card && df->card != card)
3182 continue;
3183 debugfs_remove(df->file);
3184 list_del(&df->link);
3185 kfree(df);
3186 }
3187
3188 mutex_unlock(&mmc_test_lock);
3189 }
3190
3191 static int __mmc_test_register_dbgfs_file(struct mmc_card *card,
3192 const char *name, umode_t mode, const struct file_operations *fops)
3193 {
3194 struct dentry *file = NULL;
3195 struct mmc_test_dbgfs_file *df;
3196
3197 if (card->debugfs_root)
3198 debugfs_create_file(name, mode, card->debugfs_root, card, fops);
3199
3200 df = kmalloc(sizeof(*df), GFP_KERNEL);
3201 if (!df) {
3202 debugfs_remove(file);
3203 return -ENOMEM;
3204 }
3205
3206 df->card = card;
3207 df->file = file;
3208
3209 list_add(&df->link, &mmc_test_file_test);
3210 return 0;
3211 }
3212
3213 static int mmc_test_register_dbgfs_file(struct mmc_card *card)
3214 {
3215 int ret;
3216
3217 mutex_lock(&mmc_test_lock);
3218
3219 ret = __mmc_test_register_dbgfs_file(card, "test", S_IWUSR | S_IRUGO,
3220 &mmc_test_fops_test);
3221 if (ret)
3222 goto err;
3223
3224 ret = __mmc_test_register_dbgfs_file(card, "testlist", S_IRUGO,
3225 &mtf_testlist_fops);
3226 if (ret)
3227 goto err;
3228
3229 err:
3230 mutex_unlock(&mmc_test_lock);
3231
3232 return ret;
3233 }
3234
3235 static int mmc_test_probe(struct mmc_card *card)
3236 {
3237 int ret;
3238
3239 if (!mmc_card_mmc(card) && !mmc_card_sd(card))
3240 return -ENODEV;
3241
3242 ret = mmc_test_register_dbgfs_file(card);
3243 if (ret)
3244 return ret;
3245
3246 if (card->ext_csd.cmdq_en) {
3247 mmc_claim_host(card->host);
3248 ret = mmc_cmdq_disable(card);
3249 mmc_release_host(card->host);
3250 if (ret)
3251 return ret;
3252 }
3253
3254 dev_info(&card->dev, "Card claimed for testing.\n");
3255
3256 return 0;
3257 }
3258
3259 static void mmc_test_remove(struct mmc_card *card)
3260 {
3261 if (card->reenable_cmdq) {
3262 mmc_claim_host(card->host);
3263 mmc_cmdq_enable(card);
3264 mmc_release_host(card->host);
3265 }
3266 mmc_test_free_result(card);
3267 mmc_test_free_dbgfs_file(card);
3268 }
3269
3270 static void mmc_test_shutdown(struct mmc_card *card)
3271 {
3272 }
3273
3274 static struct mmc_driver mmc_driver = {
3275 .drv = {
3276 .name = "mmc_test",
3277 },
3278 .probe = mmc_test_probe,
3279 .remove = mmc_test_remove,
3280 .shutdown = mmc_test_shutdown,
3281 };
3282
3283 static int __init mmc_test_init(void)
3284 {
3285 return mmc_register_driver(&mmc_driver);
3286 }
3287
3288 static void __exit mmc_test_exit(void)
3289 {
3290 /* Clear stalled data if card is still plugged */
3291 mmc_test_free_result(NULL);
3292 mmc_test_free_dbgfs_file(NULL);
3293
3294 mmc_unregister_driver(&mmc_driver);
3295 }
3296
3297 module_init(mmc_test_init);
3298 module_exit(mmc_test_exit);
3299
3300 MODULE_LICENSE("GPL");
3301 MODULE_DESCRIPTION("Multimedia Card (MMC) host test driver");
3302 MODULE_AUTHOR("Pierre Ossman");
3303