1 // SPDX-License-Identifier: (GPL-2.0+ OR MIT)
2 /*
3 * Copyright (c) 2019 Amlogic, Inc. All rights reserved.
4 */
5
6 #include <linux/types.h>
7 #include <linux/stddef.h>
8 #include <linux/mtd/mtd.h>
9 #include <linux/mtd/partitions.h>
10 #include <linux/proc_fs.h>
11 #include <linux/seq_file.h>
12 #include <linux/genhd.h>
13 #include <linux/blkdev.h>
14 #include <linux/scatterlist.h>
15 #include <linux/cdev.h>
16 #include <linux/device.h>
17 #include <linux/uaccess.h>
18 #include <linux/vmalloc.h>
19 #include <linux/mmc/emmc_partitions.h>
20 #include <linux/amlogic/aml_sd.h>
21 #include "emmc_key.h"
22 #include "../../../block/blk.h"
23 #include "../../drivers/mmc/core/mmc_ops.h"
24 #include "../../drivers/mmc/core/core.h"
25
26 #define DTB_NAME "dtb"
27 #define SZ_1M 0x00100000
28 #define MMC_DTB_PART_OFFSET (40 * SZ_1M)
29 #define EMMC_BLOCK_SIZE (0x100)
30 #define MAX_EMMC_BLOCK_SIZE (128 * 1024)
31
32 #define DTB_RESERVE_OFFSET (4 * SZ_1M)
33 #define DTB_BLK_SIZE (0x200)
34 #define DTB_BLK_CNT (512)
35 #define DTB_SIZE (DTB_BLK_CNT * DTB_BLK_SIZE)
36 #define DTB_COPIES (2)
37 #define DTB_AREA_BLK_CNT (DTB_BLK_CNT * DTB_COPIES)
38 /* pertransfer for internal opearations. */
39 #define MAX_TRANS_BLK (256)
40 #define MAX_TRANS_SIZE (MAX_TRANS_BLK * DTB_BLK_SIZE)
41 #define stamp_after(a, b) ((int)(b) - (int)(a) < 0)
42
43 #define GPT_HEADER_SIGNATURE 0x5452415020494645ULL
44
45 struct aml_dtb_rsv {
46 u8 data[DTB_BLK_SIZE * DTB_BLK_CNT - 4L * sizeof(unsigned int)];
47 unsigned int magic;
48 unsigned int version;
49 unsigned int timestamp;
50 unsigned int checksum;
51 };
52
53 struct aml_dtb_info {
54 unsigned int stamp[2];
55 u8 valid[2];
56 };
57
58 struct efi_guid_t {
59 u8 b[16];
60 };
61
62 struct gpt_header {
63 __le64 signature;
64 __le32 revision;
65 __le32 header_size;
66 __le32 header_crc32;
67 __le32 reserved1;
68 __le64 my_lba;
69 __le64 alternate_lba;
70 __le64 first_usable_lba;
71 __le64 last_usable_lba;
72 struct efi_guid_t disk_guid;
73 __le64 partition_entry_lba;
74 __le32 num_partition_entries;
75 __le32 sizeof_partition_entry;
76 __le32 partition_entry_array_crc32;
77 };
78
79 static dev_t amlmmc_dtb_no;
80 struct cdev amlmmc_dtb;
81 struct device *dtb_dev;
82 struct class *amlmmc_dtb_class;
83 static char *glb_dtb_buf;
84 struct mmc_card *card_dtb;
85 static struct aml_dtb_info dtb_infos = {{0, 0}, {0, 0}};
86 struct mmc_partitions_fmt *pt_fmt;
87
88 static int mmc_transfer(struct mmc_card *card, unsigned int dev_addr, unsigned int blocks, void *buf, int write);
89
mmc_read_internal(struct mmc_card * card,unsigned int dev_addr,unsigned int blocks,void * buf)90 int mmc_read_internal(struct mmc_card *card, unsigned int dev_addr, unsigned int blocks, void *buf)
91 {
92 return mmc_transfer(card, dev_addr, blocks, buf, 0);
93 }
94
mmc_write_internal(struct mmc_card * card,unsigned int dev_addr,unsigned int blocks,void * buf)95 int mmc_write_internal(struct mmc_card *card, unsigned int dev_addr, unsigned int blocks, void *buf)
96 {
97 return mmc_transfer(card, dev_addr, blocks, buf, 1);
98 }
99
100 /* dtb read&write operation with backup updates */
_calc_dtb_checksum(struct aml_dtb_rsv * dtb)101 static unsigned int _calc_dtb_checksum(struct aml_dtb_rsv *dtb)
102 {
103 int i = 0;
104 int size = sizeof(struct aml_dtb_rsv) - sizeof(unsigned int);
105 unsigned int *buffer;
106 unsigned int checksum = 0;
107
108 size = size >> 2L;
109 buffer = (unsigned int *)dtb;
110 while (i < size) {
111 checksum += buffer[i++];
112 }
113
114 return checksum;
115 }
116
_verify_dtb_checksum(struct aml_dtb_rsv * dtb)117 static int _verify_dtb_checksum(struct aml_dtb_rsv *dtb)
118 {
119 unsigned int checksum;
120
121 checksum = _calc_dtb_checksum(dtb);
122 pr_info("calc %x, store %x\n", checksum, dtb->checksum);
123
124 return !(checksum == dtb->checksum);
125 }
126
_dtb_write(struct mmc_card * mmc,int blk,unsigned char * buf)127 static int _dtb_write(struct mmc_card *mmc, int blk, unsigned char *buf)
128 {
129 int ret = 0;
130 unsigned char *src = NULL;
131 int bit = mmc->csd.read_blkbits;
132 int cnt = CONFIG_DTB_SIZE >> bit;
133 int blk_tmp = blk;
134
135 src = (unsigned char *)buf;
136
137 mmc_claim_host(mmc->host);
138 do {
139 ret = mmc_write_internal(mmc, blk_tmp, MAX_TRANS_BLK, src);
140 if (ret) {
141 pr_err("%s: save dtb error", __func__);
142 ret = -EFAULT;
143 break;
144 }
145 blk_tmp += MAX_TRANS_BLK;
146 cnt -= MAX_TRANS_BLK;
147 src = (unsigned char *)buf + MAX_TRANS_SIZE;
148 } while (cnt != 0);
149 mmc_release_host(mmc->host);
150
151 return ret;
152 }
153
_dtb_read(struct mmc_card * mmc,int blk,unsigned char * buf)154 static int _dtb_read(struct mmc_card *mmc, int blk, unsigned char *buf)
155 {
156 int ret = 0;
157 unsigned char *dst = NULL;
158 int bit = mmc->csd.read_blkbits;
159 int cnt = CONFIG_DTB_SIZE >> bit;
160 int blk_tmp = blk;
161
162 dst = (unsigned char *)buf;
163 mmc_claim_host(mmc->host);
164 do {
165 ret = mmc_read_internal(mmc, blk_tmp, MAX_TRANS_BLK, dst);
166 if (ret) {
167 pr_err("%s: save dtb error", __func__);
168 ret = -EFAULT;
169 break;
170 }
171 blk_tmp += MAX_TRANS_BLK;
172 cnt -= MAX_TRANS_BLK;
173 dst = (unsigned char *)buf + MAX_TRANS_SIZE;
174 } while (cnt != 0);
175 mmc_release_host(mmc->host);
176 return ret;
177 }
178
_dtb_init(struct mmc_card * mmc)179 static int _dtb_init(struct mmc_card *mmc)
180 {
181 int ret = 0;
182 struct aml_dtb_rsv *dtb;
183 struct aml_dtb_info *info = &dtb_infos;
184 int cpy = 1, valid = 0;
185 int bit = mmc->csd.read_blkbits;
186 int blk;
187
188 if (!glb_dtb_buf) {
189 glb_dtb_buf = kmalloc(CONFIG_DTB_SIZE, GFP_KERNEL);
190 if (!glb_dtb_buf) {
191 return -ENOMEM;
192 }
193 }
194 dtb = (struct aml_dtb_rsv *)glb_dtb_buf;
195
196 /* read dtb2 1st, for compatibility without checksum. */
197 while (cpy >= 0) {
198 blk = ((get_reserve_partition_off_from_tbl() + DTB_RESERVE_OFFSET) >> bit) + cpy * DTB_BLK_CNT;
199 if (_dtb_read(mmc, blk, (unsigned char *)dtb)) {
200 pr_err("%s: block # %#x ERROR!\n", __func__, blk);
201 } else {
202 ret = _verify_dtb_checksum(dtb);
203 if (!ret) {
204 info->stamp[cpy] = dtb->timestamp;
205 info->valid[cpy] = 1;
206 } else {
207 pr_err("cpy %d is not valid\n", cpy);
208 }
209 }
210 valid += info->valid[cpy];
211 cpy--;
212 }
213 pr_info("total valid %d\n", valid);
214
215 return ret;
216 }
217
amlmmc_dtb_write(struct mmc_card * mmc,unsigned char * buf,int len)218 int amlmmc_dtb_write(struct mmc_card *mmc, unsigned char *buf, int len)
219 {
220 int ret = 0, blk;
221 int bit = mmc->csd.read_blkbits;
222 int cpy, valid;
223 struct aml_dtb_rsv *dtb = (struct aml_dtb_rsv *)buf;
224 struct aml_dtb_info *info = &dtb_infos;
225
226 if (len > CONFIG_DTB_SIZE) {
227 pr_err("%s dtb data len too much", __func__);
228 return -EFAULT;
229 }
230 /* set info */
231 valid = info->valid[0] + info->valid[1];
232 if (valid == 0) {
233 dtb->timestamp = 0;
234 } else if (valid == 1) {
235 dtb->timestamp = 1 + info->stamp[info->valid[0] ? 0 : 1];
236 } else {
237 /* both are valid */
238 if (info->stamp[0] != info->stamp[1]) {
239 pr_info("timestamp are not same %d:%d\n", info->stamp[0], info->stamp[1]);
240 dtb->timestamp = 1 + (stamp_after(info->stamp[1], info->stamp[0]) ? info->stamp[1] : info->stamp[0]);
241 } else {
242 dtb->timestamp = 1 + info->stamp[0];
243 }
244 }
245 /* setting version and magic */
246 dtb->version = 1; /* base version */
247 dtb->magic = 0x00447e41; /* A~D\0 */
248 dtb->checksum = _calc_dtb_checksum(dtb);
249 pr_info("stamp %d, checksum 0x%x, version %d, magic %s\n", dtb->timestamp, dtb->checksum, dtb->version,
250 (char *)&dtb->magic);
251 /* write down... */
252 for (cpy = 0; cpy < DTB_COPIES; cpy++) {
253 blk = ((get_reserve_partition_off_from_tbl() + DTB_RESERVE_OFFSET) >> bit) + cpy * DTB_BLK_CNT;
254 ret |= _dtb_write(mmc, blk, buf);
255 }
256
257 return ret;
258 }
259
amlmmc_dtb_read(struct mmc_card * card,unsigned char * buf,int len)260 int amlmmc_dtb_read(struct mmc_card *card, unsigned char *buf, int len)
261 {
262 int ret = 0, start_blk, size, blk_cnt;
263 int bit = card->csd.read_blkbits;
264 unsigned char *dst = NULL;
265 unsigned char *buffer = NULL;
266
267 if (len > CONFIG_DTB_SIZE) {
268 pr_err("%s dtb data len too much", __func__);
269 return -EFAULT;
270 }
271 memset(buf, 0x0, len);
272
273 start_blk = MMC_DTB_PART_OFFSET;
274 buffer = kmalloc(DTB_CELL_SIZE, GFP_KERNEL | __GFP_RECLAIM);
275 if (!buffer) {
276 return -ENOMEM;
277 }
278
279 start_blk >>= bit;
280 size = CONFIG_DTB_SIZE;
281 blk_cnt = size >> bit;
282 dst = (unsigned char *)buffer;
283 while (blk_cnt != 0) {
284 memset(buffer, 0x0, DTB_CELL_SIZE);
285 ret = mmc_read_internal(card, start_blk, (DTB_CELL_SIZE >> bit), dst);
286 if (ret) {
287 pr_err("%s read dtb error", __func__);
288 ret = -EFAULT;
289 kfree(buffer);
290 return ret;
291 }
292 start_blk += (DTB_CELL_SIZE >> bit);
293 blk_cnt -= (DTB_CELL_SIZE >> bit);
294 memcpy(buf, dst, DTB_CELL_SIZE);
295 buf += DTB_CELL_SIZE;
296 }
297 kfree(buffer);
298 return ret;
299 }
300 static CLASS_ATTR_STRING(emmcdtb, 0644, NULL);
301
mmc_dtb_open(struct inode * node,struct file * file)302 int mmc_dtb_open(struct inode *node, struct file *file)
303 {
304 return 0;
305 }
306
mmc_dtb_read(struct file * file,char __user * buf,size_t count,loff_t * ppos)307 ssize_t mmc_dtb_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
308 {
309 unsigned char *dtb_ptr = NULL;
310 ssize_t read_size = 0;
311 int ret = 0;
312
313 if (*ppos == CONFIG_DTB_SIZE) {
314 return 0;
315 }
316
317 if (*ppos >= CONFIG_DTB_SIZE) {
318 pr_err("%s: out of space!", __func__);
319 return -EFAULT;
320 }
321
322 dtb_ptr = glb_dtb_buf;
323 if (!dtb_ptr) {
324 return -ENOMEM;
325 }
326
327 mmc_claim_host(card_dtb->host);
328 ret = amlmmc_dtb_read(card_dtb, (unsigned char *)dtb_ptr, CONFIG_DTB_SIZE);
329 if (ret) {
330 pr_err("%s: read failed:%d", __func__, ret);
331 ret = -EFAULT;
332 goto exit;
333 }
334 if ((*ppos + count) > CONFIG_DTB_SIZE) {
335 read_size = CONFIG_DTB_SIZE - *ppos;
336 } else {
337 read_size = count;
338 }
339 ret = copy_to_user(buf, (dtb_ptr + *ppos), read_size);
340 *ppos += read_size;
341
342 exit:
343 mmc_release_host(card_dtb->host);
344 return read_size;
345 }
346
mmc_dtb_write(struct file * file,const char __user * buf,size_t count,loff_t * ppos)347 ssize_t mmc_dtb_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
348 {
349 unsigned char *dtb_ptr = NULL;
350 ssize_t write_size = 0;
351 int ret = 0;
352
353 if (*ppos == CONFIG_DTB_SIZE) {
354 return 0;
355 }
356 if (*ppos >= CONFIG_DTB_SIZE) {
357 pr_err("%s: out of space!", __func__);
358 return -EFAULT;
359 }
360
361 dtb_ptr = glb_dtb_buf;
362 if (!dtb_ptr) {
363 return -ENOMEM;
364 }
365
366 mmc_claim_host(card_dtb->host);
367
368 if ((*ppos + count) > CONFIG_DTB_SIZE) {
369 write_size = CONFIG_DTB_SIZE - *ppos;
370 } else {
371 write_size = count;
372 }
373
374 ret = copy_from_user((dtb_ptr + *ppos), buf, write_size);
375
376 ret = amlmmc_dtb_write(card_dtb, dtb_ptr, CONFIG_DTB_SIZE);
377 if (ret) {
378 pr_err("%s: write dtb failed", __func__);
379 ret = -EFAULT;
380 goto exit;
381 }
382
383 *ppos += write_size;
384 exit:
385 mmc_release_host(card_dtb->host);
386 return write_size;
387 }
388
mmc_dtb_ioctl(struct file * file,unsigned int cmd,unsigned long args)389 long mmc_dtb_ioctl(struct file *file, unsigned int cmd, unsigned long args)
390 {
391 return 0;
392 }
393
394 static const struct file_operations dtb_ops = {
395 .open = mmc_dtb_open,
396 .read = mmc_dtb_read,
397 .write = mmc_dtb_write,
398 .unlocked_ioctl = mmc_dtb_ioctl,
399 };
400
amlmmc_dtb_init(struct mmc_card * card)401 int amlmmc_dtb_init(struct mmc_card *card)
402 {
403 int ret = 0;
404
405 card_dtb = card;
406 pr_info("%s: register dtb chardev", __func__);
407
408 _dtb_init(card);
409
410 ret = alloc_chrdev_region(&amlmmc_dtb_no, 0, 1, DTB_NAME);
411 if (ret < 0) {
412 pr_err("alloc dtb dev_t no failed");
413 ret = -1;
414 goto exit_err;
415 }
416
417 cdev_init(&amlmmc_dtb, &dtb_ops);
418 amlmmc_dtb.owner = THIS_MODULE;
419 ret = cdev_add(&amlmmc_dtb, amlmmc_dtb_no, 1);
420 if (ret) {
421 pr_err("dtb dev add failed");
422 ret = -1;
423 goto exit_err1;
424 }
425
426 amlmmc_dtb_class = class_create(THIS_MODULE, DTB_NAME);
427 if (IS_ERR(amlmmc_dtb_class)) {
428 pr_err("dtb dev add failed");
429 ret = -1;
430 goto exit_err2;
431 }
432
433 ret = class_create_file(amlmmc_dtb_class, &class_attr_emmcdtb.attr);
434 if (ret) {
435 pr_err("dtb dev add failed");
436 ret = -1;
437 goto exit_err2;
438 }
439
440 dtb_dev = device_create(amlmmc_dtb_class, NULL, amlmmc_dtb_no, NULL, DTB_NAME);
441 if (IS_ERR(dtb_dev)) {
442 pr_err("dtb dev add failed");
443 ret = -1;
444 goto exit_err3;
445 }
446
447 pr_info("%s: register dtb chardev OK", __func__);
448
449 return ret;
450
451 exit_err3:
452 class_remove_file(amlmmc_dtb_class, &class_attr_emmcdtb.attr);
453 class_destroy(amlmmc_dtb_class);
454 exit_err2:
455 cdev_del(&amlmmc_dtb);
456 exit_err1:
457 unregister_chrdev_region(amlmmc_dtb_no, 1);
458 exit_err:
459 return ret;
460 }
461
462 /*
463 * Checks that a normal transfer didn't have any errors
464 */
mmc_check_result(struct mmc_request * mrq)465 static int mmc_check_result(struct mmc_request *mrq)
466 {
467 int ret;
468
469 WARN_ON(!mrq || !mrq->cmd || !mrq->data);
470
471 ret = 0;
472
473 if (!ret && mrq->cmd->error) {
474 ret = mrq->cmd->error;
475 }
476 if (!ret && mrq->data->error) {
477 ret = mrq->data->error;
478 }
479 if (!ret && mrq->stop && mrq->stop->error) {
480 ret = mrq->stop->error;
481 }
482 if (!ret && mrq->data->bytes_xfered != mrq->data->blocks * mrq->data->blksz) {
483 ret = RESULT_FAIL;
484 }
485
486 if (ret == -EINVAL) {
487 ret = RESULT_UNSUP_HOST;
488 }
489
490 return ret;
491 }
492
mmc_prepare_mrq(struct mmc_card * card,struct mmc_request * mrq,struct scatterlist * sg,unsigned int sg_len,unsigned int dev_addr,unsigned int blocks,unsigned int blksz,int write)493 static void mmc_prepare_mrq(struct mmc_card *card, struct mmc_request *mrq, struct scatterlist *sg, unsigned int sg_len,
494 unsigned int dev_addr, unsigned int blocks, unsigned int blksz, int write)
495 {
496 WARN_ON(!mrq || !mrq->cmd || !mrq->data || !mrq->stop);
497
498 if (blocks > 1) {
499 mrq->cmd->opcode = write ? MMC_WRITE_MULTIPLE_BLOCK : MMC_READ_MULTIPLE_BLOCK;
500 } else {
501 mrq->cmd->opcode = write ? MMC_WRITE_BLOCK : MMC_READ_SINGLE_BLOCK;
502 }
503
504 mrq->cmd->arg = dev_addr;
505 if (!mmc_card_is_blockaddr(card)) {
506 mrq->cmd->arg <<= 9L;
507 }
508
509 mrq->cmd->flags = MMC_RSP_R1 | MMC_CMD_ADTC;
510
511 if (blocks == 1) {
512 mrq->stop = NULL;
513 } else {
514 mrq->stop->opcode = MMC_STOP_TRANSMISSION;
515 mrq->stop->arg = 0;
516 mrq->stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
517 }
518
519 mrq->data->blksz = blksz;
520 mrq->data->blocks = blocks;
521 mrq->data->flags = write ? MMC_DATA_WRITE : MMC_DATA_READ;
522 mrq->data->sg = sg;
523 mrq->data->sg_len = sg_len;
524
525 mmc_set_data_timeout(mrq->data, card);
526 }
527
mmc_capacity(struct mmc_card * card)528 unsigned int mmc_capacity(struct mmc_card *card)
529 {
530 if (!mmc_card_sd(card) && mmc_card_is_blockaddr(card)) {
531 return card->ext_csd.sectors;
532 } else {
533 return card->csd.capacity << (card->csd.read_blkbits - 9L);
534 }
535 }
536
mmc_transfer(struct mmc_card * card,unsigned int dev_addr,unsigned int blocks,void * buf,int write)537 static int mmc_transfer(struct mmc_card *card, unsigned int dev_addr, unsigned int blocks, void *buf, int write)
538 {
539 u8 original_part_config;
540 u8 user_part_number = 0;
541 u8 cur_part_number;
542 bool switch_partition = false;
543 unsigned int size;
544 struct scatterlist sg;
545 struct mmc_request mrq = {0};
546 struct mmc_command cmd = {0};
547 struct mmc_command stop = {0};
548 struct mmc_data data = {0};
549 int ret;
550
551 cur_part_number = card->ext_csd.part_config & EXT_CSD_PART_CONFIG_ACC_MASK;
552 if (cur_part_number != user_part_number) {
553 switch_partition = true;
554 original_part_config = card->ext_csd.part_config;
555 cur_part_number = original_part_config & (~EXT_CSD_PART_CONFIG_ACC_MASK);
556 ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_PART_CONFIG, cur_part_number, card->ext_csd.part_time);
557 if (ret) {
558 return ret;
559 }
560
561 card->ext_csd.part_config = cur_part_number;
562 }
563 if ((dev_addr + blocks) >= mmc_capacity(card)) {
564 pr_info("[%s] %s range exceeds device capacity!\n", __func__, write ? "write" : "read");
565 ret = -1;
566 return ret;
567 }
568
569 size = blocks << card->csd.read_blkbits;
570 sg_init_one(&sg, buf, size);
571
572 mrq.cmd = &cmd;
573 mrq.data = &data;
574 mrq.stop = &stop;
575
576 mmc_prepare_mrq(card, &mrq, &sg, 1, dev_addr, blocks, 1 << card->csd.read_blkbits, write);
577
578 mmc_wait_for_req(card->host, &mrq);
579
580 ret = mmc_check_result(&mrq);
581 if (switch_partition) {
582 ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_PART_CONFIG, original_part_config,
583 card->ext_csd.part_time);
584 if (ret) {
585 return ret;
586 }
587 card->ext_csd.part_config = original_part_config;
588 }
589
590 return ret;
591 }
592
593 /* write tuning para on emmc, the offset is 0x14400 */
amlmmc_write_tuning_para(struct mmc_card * card,unsigned int dev_addr)594 static int amlmmc_write_tuning_para(struct mmc_card *card, unsigned int dev_addr)
595 {
596 unsigned int size;
597 struct mmc_host *mmc = card->host;
598 struct meson_host *host = mmc_priv(mmc);
599 struct aml_tuning_para *parameter = &host->para;
600 unsigned int *buf;
601 int para_size;
602 int blocks;
603
604 if (host->save_para == 0) {
605 return 0;
606 }
607
608 if (parameter->update == 0) {
609 return 0;
610 }
611 parameter->update = 0;
612
613 para_size = sizeof(struct aml_tuning_para);
614 blocks = (para_size - 1) / 512L + 1;
615 size = blocks << card->csd.read_blkbits;
616
617 buf = kmalloc(size, GFP_KERNEL);
618 memset(buf, 0, size);
619
620 memcpy(buf, parameter, sizeof(struct aml_tuning_para));
621
622 mmc_claim_host(card->host);
623 mmc_transfer(card, dev_addr, blocks, buf, 1);
624 mmc_release_host(card->host);
625
626 kfree(buf);
627 return 0;
628 }
629
mmc_partition_tbl_checksum_calc(struct partitions * part,int part_num)630 static int mmc_partition_tbl_checksum_calc(struct partitions *part, int part_num)
631 {
632 int i, j;
633 u32 checksum = 0, *p;
634
635 for (i = 0; i < part_num; i++) {
636 p = (u32 *)part;
637
638 for (j = sizeof(struct partitions) / sizeof(checksum); j > 0; j--) {
639 checksum += *p;
640 p++;
641 }
642 }
643
644 return checksum;
645 }
646
get_reserve_partition_off(struct mmc_card * card)647 static int get_reserve_partition_off(struct mmc_card *card) /* byte unit */
648 {
649 int off = -1;
650
651 off = MMC_BOOT_PARTITION_SIZE + MMC_BOOT_PARTITION_RESERVED;
652
653 return off;
654 }
655
get_reserve_partition_off_from_tbl(void)656 int get_reserve_partition_off_from_tbl(void)
657 {
658 int i;
659
660 for (i = 0; i < pt_fmt->part_num; i++) {
661 if (!strcmp(pt_fmt->partitions[i].name, MMC_RESERVED_NAME)) {
662 return pt_fmt->partitions[i].offset;
663 }
664 }
665 return -1;
666 }
667
mmc_read_partition_tbl(struct mmc_card * card,struct mmc_partitions_fmt * read_pt_fmt)668 static int mmc_read_partition_tbl(struct mmc_card *card, struct mmc_partitions_fmt *read_pt_fmt)
669 {
670 int ret = 0, start_blk, size, blk_cnt;
671 int bit = card->csd.read_blkbits;
672 int blk_size = 1 << bit; /* size of a block */
673 char *buf, *dst;
674
675 buf = kmalloc(blk_size, GFP_KERNEL);
676 if (!buf) {
677 ret = -ENOMEM;
678 goto exit_err;
679 }
680 memset(read_pt_fmt, 0, sizeof(struct mmc_partitions_fmt));
681 memset(buf, 0, blk_size);
682 start_blk = get_reserve_partition_off(card);
683 if (start_blk < 0) {
684 ret = -EINVAL;
685 goto exit_err;
686 }
687 start_blk >>= bit;
688 size = sizeof(struct mmc_partitions_fmt);
689 dst = (char *)read_pt_fmt;
690 if (size >= blk_size) {
691 blk_cnt = size >> bit;
692 ret = mmc_read_internal(card, start_blk, blk_cnt, dst);
693 if (ret) { /* error */
694 goto exit_err;
695 }
696 start_blk += blk_cnt;
697 dst += blk_cnt << bit;
698 size -= blk_cnt << bit;
699 }
700 if (size > 0) { /* the last block */
701 ret = mmc_read_internal(card, start_blk, 1, buf);
702 if (ret) {
703 goto exit_err;
704 }
705 memcpy(dst, buf, size);
706 }
707
708 if ((strncmp(read_pt_fmt->magic, MMC_PARTITIONS_MAGIC, sizeof(read_pt_fmt->magic)) == 0) &&
709 read_pt_fmt->part_num > 0 && read_pt_fmt->part_num <= MAX_MMC_PART_NUM &&
710 read_pt_fmt->checksum == mmc_partition_tbl_checksum_calc(read_pt_fmt->partitions, read_pt_fmt->part_num)) {
711 ret = 0; /* everything is OK now */
712 } else {
713 if (strncmp(read_pt_fmt->magic, MMC_PARTITIONS_MAGIC, sizeof(read_pt_fmt->magic)) != 0) {
714 pr_info("magic error: %s\n", read_pt_fmt->magic);
715 } else if ((read_pt_fmt->part_num < 0) || (read_pt_fmt->part_num > MAX_MMC_PART_NUM)) {
716 pr_info("partition number error: %d\n", read_pt_fmt->part_num);
717 } else {
718 pr_info("checksum error: pt_fmt->checksum=%d,calc_result=%d\n", read_pt_fmt->checksum,
719 mmc_partition_tbl_checksum_calc(read_pt_fmt->partitions, read_pt_fmt->part_num));
720 }
721
722 pr_info("[%s]: partition verified error\n", __func__);
723 ret = -1; /* the partition information is invalid */
724 }
725
726 exit_err:
727 kfree(buf);
728
729 pr_info("[%s] mmc read partition %s!\n", __func__, (ret == 0) ? "OK" : "ERROR");
730
731 return ret;
732 }
733
734 /* This function is copy and modified from kernel function add_partition() */
add_emmc_each_part(struct gendisk * disk,int partno,sector_t start,sector_t len,int flags,char * pname)735 static struct hd_struct *add_emmc_each_part(struct gendisk *disk, int partno, sector_t start, sector_t len, int flags,
736 char *pname)
737 {
738 struct hd_struct *p;
739 dev_t devt = MKDEV(0, 0);
740 struct device *ddev = disk_to_dev(disk);
741 struct device *pdev;
742 struct disk_part_tbl *ptbl;
743 const char *dname;
744 int err;
745 struct partition_meta_info *pinfo;
746
747 /*
748 * Partitions are not supported on zoned block devices that are used as
749 * such.
750 */
751 switch (disk->queue->limits.zoned) {
752 case BLK_ZONED_HM:
753 pr_warn("%s: partitions not supported on host managed zoned block device\n", disk->disk_name);
754 return ERR_PTR(-ENXIO);
755 case BLK_ZONED_HA:
756 pr_info("%s: disabling host aware zoned block device support due to partitions\n", disk->disk_name);
757 disk->queue->limits.zoned = BLK_ZONED_NONE;
758 break;
759 case BLK_ZONED_NONE:
760 break;
761 }
762
763 err = disk_expand_part_tbl(disk, partno);
764 if (err) {
765 return ERR_PTR(err);
766 }
767 ptbl = rcu_dereference_protected(disk->part_tbl, 1);
768 if (ptbl->part[partno]) {
769 return ERR_PTR(-EBUSY);
770 }
771
772 p = kzalloc(sizeof(*p), GFP_KERNEL);
773 if (!p) {
774 return ERR_PTR(-EBUSY);
775 }
776
777 p->dkstats = alloc_percpu(struct disk_stats);
778 if (!p->dkstats) {
779 err = -ENOMEM;
780 goto out_free;
781 }
782 hd_sects_seq_init(p);
783 pdev = part_to_dev(p);
784
785 p->start_sect = start;
786 p->nr_sects = len;
787 p->partno = partno;
788 p->policy = get_disk_ro(disk);
789
790 pinfo = kzalloc_node(sizeof(*pinfo), GFP_KERNEL, disk->node_id);
791 p->info = pinfo;
792 (void)sprintf(p->info->volname, "%s", pname);
793
794 dname = dev_name(ddev);
795 dev_set_name(pdev, "%s", pname);
796
797 device_initialize(pdev);
798 pdev->class = &block_class;
799 pdev->type = &part_type;
800 pdev->parent = ddev;
801
802 err = blk_alloc_devt(p, &devt);
803 if (err) {
804 goto out_free_info;
805 }
806 pdev->devt = devt;
807
808 /* delay uevent until 'holders' subdir is created */
809 dev_set_uevent_suppress(pdev, 1);
810 err = device_add(pdev);
811 if (err) {
812 goto out_put;
813 }
814
815 err = -ENOMEM;
816 p->holder_dir = kobject_create_and_add("holders", &pdev->kobj);
817 if (!p->holder_dir) {
818 goto out_del;
819 }
820
821 dev_set_uevent_suppress(pdev, 0);
822
823 /* everything is up and running, commence */
824 rcu_assign_pointer(ptbl->part[partno], p);
825
826 /* suppress uevent if the disk suppresses it */
827 if (!dev_get_uevent_suppress(ddev)) {
828 kobject_uevent(&pdev->kobj, KOBJ_ADD);
829 }
830
831 hd_ref_init(p);
832 return p;
833
834 out_free_info:
835 kfree(p->info);
836 out_free:
837 kfree(p);
838 return ERR_PTR(err);
839 out_del:
840 kobject_put(p->holder_dir);
841 device_del(pdev);
842 out_put:
843 put_device(pdev);
844 blk_free_devt(devt);
845 return ERR_PTR(err);
846 }
847
card_proc_info(struct seq_file * m,char * dev_name,int i)848 static inline int card_proc_info(struct seq_file *m, char *dev_name, int i)
849 {
850 struct partitions *this = &(pt_fmt->partitions[i]);
851
852 if (i >= pt_fmt->part_num) {
853 return 0;
854 }
855
856 seq_printf(m, "%s%02d: %9llx %9x \"%s\"\n", dev_name, i + 1, (unsigned long long)this->size, 512L * 1024L,
857 this->name);
858 return 0;
859 }
860
card_proc_show(struct seq_file * m,void * v)861 static int card_proc_show(struct seq_file *m, void *v)
862 {
863 int i;
864
865 seq_puts(m, "dev: size erasesize name\n");
866 for (i = 0; i < 16L; i++) {
867 card_proc_info(m, "inand", i);
868 }
869
870 return 0;
871 }
872
card_proc_open(struct inode * inode,struct file * file)873 static int card_proc_open(struct inode *inode, struct file *file)
874 {
875 return single_open(file, card_proc_show, NULL);
876 }
877
878 static const struct proc_ops card_proc_fops = {
879 .proc_open = card_proc_open,
880 .proc_read = seq_read,
881 .proc_lseek = seq_lseek,
882 .proc_release = single_release,
883 };
884
add_emmc_partition(struct gendisk * disk,struct mmc_partitions_fmt * add_pt_fmt)885 static int add_emmc_partition(struct gendisk *disk, struct mmc_partitions_fmt *add_pt_fmt)
886 {
887 unsigned int i;
888 struct hd_struct *ret = NULL;
889 uint64_t offset, size, cap;
890 struct partitions *pp;
891 struct proc_dir_entry *proc_card;
892
893 pr_info("%s\n", __func__);
894
895 cap = get_capacity(disk); /* unit:512 bytes */
896 for (i = 0; i < add_pt_fmt->part_num; i++) {
897 pp = &(add_pt_fmt->partitions[i]);
898 offset = pp->offset >> 9L; /* unit:512 bytes */
899 size = pp->size >> 9L; /* unit:512 bytes */
900 if ((offset + size) <= cap) {
901 ret = add_emmc_each_part(disk, 1 + i, offset, size, 0, pp->name);
902
903 pr_info("[%sp%02d] %20s offset 0x%012llx, size 0x%012llx %s\n", disk->disk_name, 1 + i, pp->name,
904 offset << 9L, size << 9L, IS_ERR(ret) ? "add fail" : "");
905 } else {
906 pr_info("[%s] %s: partition exceeds device capacity:\n", __func__, disk->disk_name);
907
908 pr_info("%20s offset 0x%012llx, size 0x%012llx\n", pp->name, offset << 9L, size << 9L);
909
910 break;
911 }
912 }
913 /* create /proc/inand */
914
915 proc_card = proc_create("inand", 0444L, NULL, &card_proc_fops);
916 if (!proc_card) {
917 pr_info("[%s] create /proc/inand fail.\n", __func__);
918 }
919
920 /* create /proc/ntd */
921 if (!proc_create("ntd", 0444L, NULL, &card_proc_fops)) {
922 pr_info("[%s] create /proc/ntd fail.\n", __func__);
923 }
924
925 return 0;
926 }
927
is_card_emmc(struct mmc_card * card)928 static int is_card_emmc(struct mmc_card *card)
929 {
930 struct mmc_host *mmc = card->host;
931 struct meson_host *host = mmc_priv(mmc);
932
933 /* emmc port, so it must be an eMMC or TSD */
934 if (aml_card_type_mmc(host)) {
935 return 0;
936 } else {
937 return 1;
938 }
939 }
940
emmc_version_get(struct class * class,struct class_attribute * attr,char * buf)941 static ssize_t emmc_version_get(struct class *class, struct class_attribute *attr, char *buf)
942 {
943 int num = 0;
944
945 return sprintf(buf, "%d", num);
946 }
947
show_partition_table(struct partitions * table)948 static void show_partition_table(struct partitions *table)
949 {
950 int i = 0;
951 struct partitions *par_table = NULL;
952
953 pr_info("show partition table:\n");
954 for (i = 0; i < MAX_MMC_PART_NUM; i++) {
955 par_table = &table[i];
956 if (par_table->size == -1) {
957 pr_info("part: %d, name : %10s, size : %-4s mask_flag %d\n", i, par_table->name, "end",
958 par_table->mask_flags);
959 } else {
960 pr_info("part: %d, name : %10s, size : %-4llx mask_flag %d\n", i, par_table->name, par_table->size,
961 par_table->mask_flags);
962 }
963 }
964 }
965
emmc_part_table_get(struct class * class,struct class_attribute * attr,char * buf)966 static ssize_t emmc_part_table_get(struct class *class, struct class_attribute *attr, char *buf)
967 {
968 struct partitions *part_table = NULL;
969 struct partitions *tmp_table = NULL;
970 int i = 0, part_num = 0;
971
972 tmp_table = pt_fmt->partitions;
973 part_table = kmalloc_array(MAX_MMC_PART_NUM, sizeof(struct partitions), GFP_KERNEL);
974 if (!part_table) {
975 pr_info("[%s] malloc failed for part_table!\n", __func__);
976 return -ENOMEM;
977 }
978
979 for (i = 0; i < MAX_MMC_PART_NUM; i++) {
980 if (tmp_table[i].mask_flags == STORE_CODE) {
981 strncpy(part_table[part_num].name, tmp_table[i].name, MAX_MMC_PART_NAME_LEN);
982
983 part_table[part_num].size = tmp_table[i].size;
984 part_table[part_num].offset = tmp_table[i].offset;
985
986 part_table[part_num].mask_flags = tmp_table[i].mask_flags;
987 part_num++;
988 }
989 }
990 for (i = 0; i < MAX_MMC_PART_NUM; i++) {
991 if (tmp_table[i].mask_flags == STORE_CACHE) {
992 strncpy(part_table[part_num].name, tmp_table[i].name, MAX_MMC_PART_NAME_LEN);
993
994 part_table[part_num].size = tmp_table[i].size;
995 part_table[part_num].offset = tmp_table[i].offset;
996
997 part_table[part_num].mask_flags = tmp_table[i].mask_flags;
998
999 part_num++;
1000 }
1001 }
1002 for (i = 0; i < MAX_MMC_PART_NUM; i++) {
1003 if (tmp_table[i].mask_flags == STORE_DATA) {
1004 strncpy(part_table[part_num].name, tmp_table[i].name, MAX_MMC_PART_NAME_LEN);
1005
1006 part_table[part_num].size = tmp_table[i].size;
1007 part_table[part_num].offset = tmp_table[i].offset;
1008 part_table[part_num].mask_flags = tmp_table[i].mask_flags;
1009
1010 if (!strncmp(part_table[part_num].name, "data", MAX_MMC_PART_NAME_LEN)) {
1011 /* last part size is FULL */
1012 part_table[part_num].size = -1;
1013 }
1014 part_num++;
1015 }
1016 }
1017
1018 show_partition_table(part_table);
1019 memcpy(buf, part_table, MAX_MMC_PART_NUM * sizeof(struct partitions));
1020
1021 kfree(part_table);
1022 part_table = NULL;
1023
1024 return MAX_MMC_PART_NUM * sizeof(struct partitions);
1025 }
1026
1027 static int store_device = -1;
store_device_flag_get(struct class * class,struct class_attribute * attr,char * buf)1028 static ssize_t store_device_flag_get(struct class *class, struct class_attribute *attr, char *buf)
1029 {
1030 if (store_device == -1) {
1031 pr_info("[%s] get store device flag something wrong !\n", __func__);
1032 }
1033
1034 return sprintf(buf, "%d", store_device);
1035 }
1036
get_bootloader_offset(struct class * class,struct class_attribute * attr,char * buf)1037 static ssize_t get_bootloader_offset(struct class *class, struct class_attribute *attr, char *buf)
1038 {
1039 int offset = 0;
1040
1041 offset = 512L;
1042 return sprintf(buf, "%d", offset);
1043 }
1044
1045 static struct class_attribute aml_version = __ATTR(version, 0444, emmc_version_get, NULL);
1046 static struct class_attribute aml_part_table = __ATTR(part_table, 0444, emmc_part_table_get, NULL);
1047 static struct class_attribute aml_store_device = __ATTR(store_device, 0444, store_device_flag_get, NULL);
1048 static struct class_attribute bootloader_offset = __ATTR(bl_off_bytes, 0444, get_bootloader_offset, NULL);
1049
add_fake_boot_partition(struct gendisk * disk,char * name,int idx)1050 int add_fake_boot_partition(struct gendisk *disk, char *name, int idx)
1051 {
1052 u64 boot_size = (u64)get_capacity(disk) - 1;
1053 char fake_name[80];
1054 int offset = 1;
1055 struct hd_struct *ret = NULL;
1056 int idx_tmp = idx;
1057
1058 idx_tmp ^= 1;
1059 (void)sprintf(fake_name, name, idx_tmp);
1060 ret = add_emmc_each_part(disk, 1, offset, boot_size, 0, fake_name);
1061 if (IS_ERR(ret)) {
1062 pr_info("%s added failed\n", fake_name);
1063 }
1064
1065 return 0;
1066 }
1067
aml_emmc_partition_ops(struct mmc_card * card,struct gendisk * disk)1068 int aml_emmc_partition_ops(struct mmc_card *card, struct gendisk *disk)
1069 {
1070 int ret = 0;
1071 struct disk_part_iter piter;
1072 struct hd_struct *part;
1073 struct class *aml_store_class = NULL;
1074 struct gpt_header *gpt_h = NULL;
1075 unsigned char *buffer = NULL;
1076
1077 pr_info("Enter %s\n", __func__);
1078 if (!is_card_emmc(card)) { /* not emmc, nothing to do */
1079 return 0;
1080 }
1081
1082 buffer = kmalloc(512L, GFP_KERNEL);
1083 if (!buffer) {
1084 return -ENOMEM;
1085 }
1086
1087 mmc_claim_host(card->host);
1088
1089 /* self adapting */
1090 ret = mmc_read_internal(card, 1, 1, buffer);
1091 if (ret) {
1092 pr_err("%s: save dtb error", __func__);
1093 kfree(buffer);
1094 goto out;
1095 }
1096
1097 gpt_h = (struct gpt_header *)buffer;
1098
1099 if (le64_to_cpu(gpt_h->signature) == GPT_HEADER_SIGNATURE) {
1100 kfree(buffer);
1101 mmc_release_host(card->host);
1102 return 0;
1103 }
1104
1105 kfree(buffer);
1106
1107 pt_fmt = kmalloc(sizeof(*pt_fmt), GFP_KERNEL);
1108 if (!pt_fmt) {
1109 mmc_release_host(card->host);
1110 return -ENOMEM;
1111 }
1112
1113 disk_part_iter_init(&piter, disk, DISK_PITER_INCL_EMPTY);
1114
1115 while ((part = disk_part_iter_next(&piter))) {
1116 pr_info("Delete invalid mbr partition part %p, part->partno %d\n", part, part->partno);
1117 delete_partition(part);
1118 }
1119 disk_part_iter_exit(&piter);
1120
1121 ret = mmc_read_partition_tbl(card, pt_fmt);
1122 if (ret == 0) { /* ok */
1123 ret = add_emmc_partition(disk, pt_fmt);
1124 }
1125 mmc_release_host(card->host);
1126
1127 if (ret == 0) { /* ok */
1128 ret = emmc_key_init(card);
1129 }
1130 if (ret) {
1131 goto out;
1132 }
1133
1134 amlmmc_dtb_init(card);
1135 amlmmc_write_tuning_para(card, MMC_TUNING_OFFSET);
1136
1137 aml_store_class = class_create(THIS_MODULE, "aml_store");
1138 if (IS_ERR(aml_store_class)) {
1139 pr_info("[%s] create aml_store_class class fail.\n", __func__);
1140 ret = -1;
1141 goto out;
1142 }
1143
1144 ret = class_create_file(aml_store_class, &aml_version);
1145 if (ret) {
1146 pr_info("[%s] can't create aml_store_class file .\n", __func__);
1147 goto out_class1;
1148 }
1149 ret = class_create_file(aml_store_class, &aml_part_table);
1150 if (ret) {
1151 pr_info("[%s] can't create aml_store_class file .\n", __func__);
1152 goto out_class2;
1153 }
1154 ret = class_create_file(aml_store_class, &aml_store_device);
1155 if (ret) {
1156 pr_info("[%s] can't create aml_store_class file .\n", __func__);
1157 goto out_class3;
1158 }
1159
1160 ret = class_create_file(aml_store_class, &bootloader_offset);
1161 if (ret) {
1162 pr_info("[%s] can't create aml_store_class file .\n", __func__);
1163 goto out_class3;
1164 }
1165
1166 pr_info("Exit %s %s.\n", __func__, (ret == 0) ? "OK" : "ERROR");
1167 return ret;
1168
1169 out_class3:
1170 class_remove_file(aml_store_class, &aml_part_table);
1171 out_class2:
1172 class_remove_file(aml_store_class, &aml_version);
1173 out_class1:
1174 class_destroy(aml_store_class);
1175 out:
1176 kfree(pt_fmt);
1177 return ret;
1178 }
1179