1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * mtd vendor storage
4 */
5
6 #include <linux/debugfs.h>
7 #include <linux/delay.h>
8 #include <linux/fs.h>
9 #include <linux/kernel.h>
10 #include <linux/list.h>
11 #include <linux/miscdevice.h>
12 #include <linux/module.h>
13 #include <linux/mtd/mtd.h>
14 #include <linux/platform_device.h>
15 #include <linux/slab.h>
16 #include <linux/soc/rockchip/rk_vendor_storage.h>
17 #include <linux/uaccess.h>
18 #include <linux/vmalloc.h>
19
20 #define MTD_VENDOR_PART_START 0
21 #define MTD_VENDOR_PART_SIZE 8
22 #define MTD_VENDOR_PART_NUM 1
23 #define MTD_VENDOR_TAG 0x524B5644
24
25 struct rk_vendor_req {
26 u32 tag;
27 u16 id;
28 u16 len;
29 u8 data[1024];
30 };
31
32 struct vendor_item {
33 u16 id;
34 u16 offset;
35 u16 size;
36 u16 flag;
37 };
38
39 struct vendor_info {
40 u32 tag;
41 u32 version;
42 u16 next_index;
43 u16 item_num;
44 u16 free_offset;
45 u16 free_size;
46 struct vendor_item item[62];
47 u8 data[MTD_VENDOR_PART_SIZE * 0X200 - 0X200 - 8];
48 u32 hash;
49 u32 version2;
50 };
51
52 struct mtd_nand_info {
53 u32 blk_offset;
54 u32 page_offset;
55 u32 version;
56 u32 ops_size;
57 };
58
59 #ifdef CONFIG_ROCKCHIP_VENDOR_STORAGE_UPDATE_LOADER
60 #define READ_SECTOR_IO _IOW('r', 0x04, unsigned int)
61 #define WRITE_SECTOR_IO _IOW('r', 0x05, unsigned int)
62 #define END_WRITE_SECTOR_IO _IOW('r', 0x52, unsigned int)
63 #define GET_FLASH_INFO_IO _IOW('r', 0x1A, unsigned int)
64 #define GET_BAD_BLOCK_IO _IOW('r', 0x03, unsigned int)
65 #define GET_LOCK_FLAG_IO _IOW('r', 0x53, unsigned int)
66 #endif
67
68 #define VENDOR_REQ_TAG 0x56524551
69 #define VENDOR_READ_IO _IOW('v', 0x01, unsigned int)
70 #define VENDOR_WRITE_IO _IOW('v', 0x02, unsigned int)
71
72 static u8 *g_idb_buffer;
73 static struct vendor_info *g_vendor;
74 static DEFINE_MUTEX(vendor_ops_mutex);
75 static struct mtd_info *mtd;
76 static const char *vendor_mtd_name = "vnvm";
77 static struct mtd_nand_info nand_info;
78 static struct platform_device *g_pdev;
79
mtd_vendor_nand_write(void)80 static int mtd_vendor_nand_write(void)
81 {
82 size_t bytes_write;
83 int err, count = 0;
84 struct erase_info ei;
85
86 while (1) {
87 if (nand_info.page_offset >= mtd->erasesize) {
88 nand_info.blk_offset += mtd->erasesize;
89 if (nand_info.blk_offset >= mtd->size) {
90 nand_info.blk_offset = 0;
91 }
92 if (mtd_block_isbad(mtd, nand_info.blk_offset)) {
93 continue;
94 }
95
96 memset(&ei, 0, sizeof(struct erase_info));
97 ei.addr = nand_info.blk_offset;
98 ei.len = mtd->erasesize;
99 if (mtd_erase(mtd, &ei)) {
100 continue;
101 }
102
103 nand_info.page_offset = 0;
104 }
105
106 err = mtd_write(mtd, nand_info.blk_offset + nand_info.page_offset, nand_info.ops_size, &bytes_write,
107 (u8 *)g_vendor);
108 nand_info.page_offset += nand_info.ops_size;
109 if (err) {
110 continue;
111 }
112
113 count++;
114 /* write 2 copies for reliability */
115 if (count < 2) {
116 continue;
117 }
118 break;
119 }
120
121 return 0;
122 }
123
mtd_vendor_storage_init(void)124 static int mtd_vendor_storage_init(void)
125 {
126 int err, offset;
127 size_t bytes_read;
128 struct erase_info ei;
129
130 mtd = get_mtd_device_nm(vendor_mtd_name);
131 if (IS_ERR(mtd)) {
132 return -EIO;
133 }
134
135 nand_info.page_offset = 0;
136 nand_info.blk_offset = 0;
137 nand_info.version = 0;
138 nand_info.ops_size = (sizeof(*g_vendor) + mtd->writesize - 1) / mtd->writesize;
139 nand_info.ops_size *= mtd->writesize;
140
141 for (offset = 0; offset < mtd->size; offset += mtd->erasesize) {
142 if (!mtd_block_isbad(mtd, offset)) {
143 err = mtd_read(mtd, offset, sizeof(*g_vendor), &bytes_read, (u8 *)g_vendor);
144 if (err && err != -EUCLEAN) {
145 continue;
146 }
147 if (bytes_read == sizeof(*g_vendor) && g_vendor->tag == MTD_VENDOR_TAG &&
148 g_vendor->version == g_vendor->version2) {
149 if (g_vendor->version > nand_info.version) {
150 nand_info.version = g_vendor->version;
151 nand_info.blk_offset = offset;
152 }
153 }
154 } else if (nand_info.blk_offset == offset) {
155 nand_info.blk_offset += mtd->erasesize;
156 }
157 }
158
159 if (nand_info.version) {
160 for (offset = mtd->erasesize - nand_info.ops_size; offset >= 0; offset -= nand_info.ops_size) {
161 err = mtd_read(mtd, nand_info.blk_offset + offset, sizeof(*g_vendor), &bytes_read, (u8 *)g_vendor);
162 /* the page is not programmed */
163 if (!err && bytes_read == sizeof(*g_vendor) && g_vendor->tag == 0xFFFFFFFF &&
164 g_vendor->version == 0xFFFFFFFF && g_vendor->version2 == 0xFFFFFFFF) {
165 continue;
166 }
167
168 /* point to the next free page */
169 if (nand_info.page_offset < offset) {
170 nand_info.page_offset = offset + nand_info.ops_size;
171 }
172
173 /* ecc error or io error */
174 if (err && err != -EUCLEAN) {
175 continue;
176 }
177
178 if (bytes_read == sizeof(*g_vendor) && g_vendor->tag == MTD_VENDOR_TAG &&
179 g_vendor->version == g_vendor->version2) {
180 nand_info.version = g_vendor->version;
181 break;
182 }
183 }
184 } else {
185 memset((u8 *)g_vendor, 0, sizeof(*g_vendor));
186 g_vendor->version = 1;
187 g_vendor->tag = MTD_VENDOR_TAG;
188 g_vendor->free_size = sizeof(g_vendor->data);
189 g_vendor->version2 = g_vendor->version;
190 for (offset = 0; offset < mtd->size; offset += mtd->erasesize) {
191 if (!mtd_block_isbad(mtd, offset)) {
192 memset(&ei, 0, sizeof(struct erase_info));
193 ei.addr = nand_info.blk_offset + offset;
194 ei.len = mtd->erasesize;
195 mtd_erase(mtd, &ei);
196 }
197 }
198 mtd_vendor_nand_write();
199 }
200
201 return 0;
202 }
203
mtd_vendor_read(u32 id,void * pbuf,u32 size)204 static int mtd_vendor_read(u32 id, void *pbuf, u32 size)
205 {
206 u32 i;
207
208 if (!g_vendor) {
209 return -ENOMEM;
210 }
211
212 for (i = 0; i < g_vendor->item_num; i++) {
213 if (g_vendor->item[i].id == id) {
214 if (size > g_vendor->item[i].size) {
215 size = g_vendor->item[i].size;
216 }
217 memcpy(pbuf, &g_vendor->data[g_vendor->item[i].offset], size);
218 return size;
219 }
220 }
221 return (-1);
222 }
223
mtd_vendor_write(u32 id,void * pbuf,u32 size)224 static int mtd_vendor_write(u32 id, void *pbuf, u32 size)
225 {
226 u32 i, j, align_size, alloc_size, item_num;
227 u32 offset, next_size;
228 u8 *p_data;
229 struct vendor_item *item;
230 struct vendor_item *next_item;
231
232 if (!g_vendor) {
233 return -ENOMEM;
234 }
235
236 p_data = g_vendor->data;
237 item_num = g_vendor->item_num;
238 align_size = ALIGN(size, 0x40); /* align to 64 bytes */
239 for (i = 0; i < item_num; i++) {
240 item = &g_vendor->item[i];
241 if (item->id == id) {
242 alloc_size = ALIGN(item->size, 0x40);
243 if (size > alloc_size) {
244 if (g_vendor->free_size < align_size) {
245 return -1;
246 }
247 offset = item->offset;
248 for (j = i; j < item_num - 1; j++) {
249 item = &g_vendor->item[j];
250 next_item = &g_vendor->item[j + 1];
251 item->id = next_item->id;
252 item->size = next_item->size;
253 item->offset = offset;
254 next_size = ALIGN(next_item->size, 0x40);
255 memcpy(&p_data[offset], &p_data[next_item->offset], next_size);
256 offset += next_size;
257 }
258 item = &g_vendor->item[j];
259 item->id = id;
260 item->offset = offset;
261 item->size = size;
262 memcpy(&p_data[item->offset], pbuf, size);
263 g_vendor->free_offset = offset + align_size;
264 g_vendor->free_size = sizeof(g_vendor->data) - g_vendor->free_offset;
265 } else {
266 memcpy(&p_data[item->offset], pbuf, size);
267 g_vendor->item[i].size = size;
268 }
269 g_vendor->version++;
270 g_vendor->version2 = g_vendor->version;
271 mtd_vendor_nand_write();
272 return 0;
273 }
274 }
275
276 if (g_vendor->free_size >= align_size) {
277 item = &g_vendor->item[g_vendor->item_num];
278 item->id = id;
279 item->offset = g_vendor->free_offset;
280 item->size = size;
281 g_vendor->free_offset += align_size;
282 g_vendor->free_size -= align_size;
283 memcpy(&g_vendor->data[item->offset], pbuf, size);
284 g_vendor->item_num++;
285 g_vendor->version++;
286 g_vendor->version2 = g_vendor->version;
287 mtd_vendor_nand_write();
288 return 0;
289 }
290 return (-1);
291 }
292
vendor_storage_open(struct inode * inode,struct file * file)293 static int vendor_storage_open(struct inode *inode, struct file *file)
294 {
295 return 0;
296 }
297
vendor_storage_release(struct inode * inode,struct file * file)298 static int vendor_storage_release(struct inode *inode, struct file *file)
299 {
300 return 0;
301 }
302
vendor_storage_ioctl(struct file * file,unsigned int cmd,unsigned long arg)303 static long vendor_storage_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
304 {
305 long ret = -1;
306 int size;
307 struct rk_vendor_req *v_req;
308 u32 *page_buf;
309
310 page_buf = kmalloc(0x1000, GFP_KERNEL);
311 if (!page_buf) {
312 return -ENOMEM;
313 }
314
315 mutex_lock(&vendor_ops_mutex);
316
317 v_req = (struct rk_vendor_req *)page_buf;
318
319 if (copy_from_user(page_buf, (void __user *)arg, 0x08)) {
320 ret = -EFAULT;
321 goto exit;
322 }
323
324 if (cmd == VENDOR_READ_IO) {
325 if (v_req->tag == VENDOR_REQ_TAG) {
326 size = mtd_vendor_read(v_req->id, v_req->data, v_req->len);
327 if (size != -1) {
328 v_req->len = size;
329 ret = 0;
330 }
331 if (copy_to_user((void __user *)arg, page_buf, v_req->len + 0x08)) {
332 ret = -EFAULT;
333 }
334 }
335 } else if (cmd == VENDOR_WRITE_IO) {
336 if (v_req->tag == VENDOR_REQ_TAG && (v_req->len < 0x1000 - 0x08)) {
337 if (copy_from_user(page_buf, (void __user *)arg, v_req->len + 0x08)) {
338 ret = -EFAULT;
339 goto exit;
340 }
341 ret = mtd_vendor_write(v_req->id, v_req->data, v_req->len);
342 }
343 } else {
344 ret = -EINVAL;
345 goto exit;
346 }
347 exit:
348 mutex_unlock(&vendor_ops_mutex);
349 kfree(page_buf);
350 return ret;
351 }
352
353 static const struct file_operations vendor_storage_fops = {
354 .open = vendor_storage_open,
355 .compat_ioctl = vendor_storage_ioctl,
356 .unlocked_ioctl = vendor_storage_ioctl,
357 .release = vendor_storage_release,
358 };
359
360 static struct miscdevice vendor_storage_dev = {
361 .minor = MISC_DYNAMIC_MINOR,
362 .name = "vendor_storage",
363 .fops = &vendor_storage_fops,
364 };
365
vendor_storage_probe(struct platform_device * pdev)366 static int vendor_storage_probe(struct platform_device *pdev)
367 {
368 struct device *dev = &pdev->dev;
369 int ret;
370
371 mtd = get_mtd_device_nm(vendor_mtd_name);
372 if (IS_ERR(mtd)) {
373 return -EPROBE_DEFER;
374 }
375
376 g_vendor = devm_kmalloc(dev, sizeof(*g_vendor), GFP_KERNEL | GFP_DMA);
377 if (!g_vendor) {
378 return -ENOMEM;
379 }
380
381 ret = mtd_vendor_storage_init();
382 if (ret) {
383 g_vendor = NULL;
384 return ret;
385 }
386
387 ret = misc_register(&vendor_storage_dev);
388 rk_vendor_register(mtd_vendor_read, mtd_vendor_write);
389
390 pr_err("mtd vendor storage:20200313 ret = %d\n", ret);
391
392 return ret;
393 }
394
vendor_storage_remove(struct platform_device * pdev)395 static int vendor_storage_remove(struct platform_device *pdev)
396 {
397 if (g_vendor) {
398 misc_deregister(&vendor_storage_dev);
399 g_vendor = NULL;
400 }
401
402 return 0;
403 }
404
405 static const struct platform_device_id vendor_storage_ids[] = {{
406 "mtd_vendor_storage",
407 }, {
408 }};
409
410 static struct platform_driver vendor_storage_driver = {
411 .probe = vendor_storage_probe,
412 .remove = vendor_storage_remove,
413 .driver =
414 {
415 .name = "mtd_vendor_storage",
416 },
417 .id_table = vendor_storage_ids,
418 };
419
vendor_storage_init(void)420 static int __init vendor_storage_init(void)
421 {
422 struct platform_device *pdev;
423 int ret;
424
425 g_idb_buffer = NULL;
426 ret = platform_driver_register(&vendor_storage_driver);
427 if (ret) {
428 return ret;
429 }
430
431 pdev = platform_device_register_simple("mtd_vendor_storage", -1, NULL, 0);
432 if (IS_ERR(pdev)) {
433 platform_driver_unregister(&vendor_storage_driver);
434 return PTR_ERR(pdev);
435 }
436 g_pdev = pdev;
437
438 return ret;
439 }
440
vendor_storage_deinit(void)441 static __exit void vendor_storage_deinit(void)
442 {
443 platform_device_unregister(g_pdev);
444 platform_driver_unregister(&vendor_storage_driver);
445 }
446
447 device_initcall_sync(vendor_storage_init);
448 module_exit(vendor_storage_deinit);
449 MODULE_LICENSE("GPL");
450