• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Common Flash Interface support:
3  *   AMD & Fujitsu Standard Vendor Command Set (ID 0x0002)
4  *
5  * Copyright (C) 2000 Crossnet Co. <info@crossnet.co.jp>
6  * Copyright (C) 2004 Arcom Control Systems Ltd <linux@arcom.com>
7  * Copyright (C) 2005 MontaVista Software Inc. <source@mvista.com>
8  *
9  * 2_by_8 routines added by Simon Munton
10  *
11  * 4_by_16 work by Carolyn J. Smith
12  *
13  * XIP support hooks by Vitaly Wool (based on code for Intel flash
14  * by Nicolas Pitre)
15  *
16  * 25/09/2008 Christopher Moore: TopBottom fixup for many Macronix with CFI V1.0
17  *
18  * Occasionally maintained by Thayne Harbaugh tharbaugh at lnxi dot com
19  *
20  * This code is GPL
21  */
22 
23 #include <linux/module.h>
24 #include <linux/types.h>
25 #include <linux/kernel.h>
26 #include <linux/sched.h>
27 #include <asm/io.h>
28 #include <asm/byteorder.h>
29 
30 #include <linux/errno.h>
31 #include <linux/slab.h>
32 #include <linux/delay.h>
33 #include <linux/interrupt.h>
34 #include <linux/reboot.h>
35 #include <linux/of.h>
36 #include <linux/of_platform.h>
37 #include <linux/mtd/map.h>
38 #include <linux/mtd/mtd.h>
39 #include <linux/mtd/cfi.h>
40 #include <linux/mtd/xip.h>
41 
42 #define AMD_BOOTLOC_BUG
43 #define FORCE_WORD_WRITE 0
44 
45 #define MAX_RETRIES 3
46 
47 #define SST49LF004B		0x0060
48 #define SST49LF040B		0x0050
49 #define SST49LF008A		0x005a
50 #define AT49BV6416		0x00d6
51 
52 /*
53  * Status Register bit description. Used by flash devices that don't
54  * support DQ polling (e.g. HyperFlash)
55  */
56 #define CFI_SR_DRB		BIT(7)
57 #define CFI_SR_ESB		BIT(5)
58 #define CFI_SR_PSB		BIT(4)
59 #define CFI_SR_WBASB		BIT(3)
60 #define CFI_SR_SLSB		BIT(1)
61 
62 enum cfi_quirks {
63 	CFI_QUIRK_DQ_TRUE_DATA = BIT(0),
64 };
65 
66 static int cfi_amdstd_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
67 static int cfi_amdstd_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
68 #if !FORCE_WORD_WRITE
69 static int cfi_amdstd_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
70 #endif
71 static int cfi_amdstd_erase_chip(struct mtd_info *, struct erase_info *);
72 static int cfi_amdstd_erase_varsize(struct mtd_info *, struct erase_info *);
73 static void cfi_amdstd_sync (struct mtd_info *);
74 static int cfi_amdstd_suspend (struct mtd_info *);
75 static void cfi_amdstd_resume (struct mtd_info *);
76 static int cfi_amdstd_reboot(struct notifier_block *, unsigned long, void *);
77 static int cfi_amdstd_get_fact_prot_info(struct mtd_info *, size_t,
78 					 size_t *, struct otp_info *);
79 static int cfi_amdstd_get_user_prot_info(struct mtd_info *, size_t,
80 					 size_t *, struct otp_info *);
81 static int cfi_amdstd_secsi_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
82 static int cfi_amdstd_read_fact_prot_reg(struct mtd_info *, loff_t, size_t,
83 					 size_t *, u_char *);
84 static int cfi_amdstd_read_user_prot_reg(struct mtd_info *, loff_t, size_t,
85 					 size_t *, u_char *);
86 static int cfi_amdstd_write_user_prot_reg(struct mtd_info *, loff_t, size_t,
87 					  size_t *, u_char *);
88 static int cfi_amdstd_lock_user_prot_reg(struct mtd_info *, loff_t, size_t);
89 
90 static int cfi_amdstd_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
91 				  size_t *retlen, const u_char *buf);
92 
93 static void cfi_amdstd_destroy(struct mtd_info *);
94 
95 struct mtd_info *cfi_cmdset_0002(struct map_info *, int);
96 static struct mtd_info *cfi_amdstd_setup (struct mtd_info *);
97 
98 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
99 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
100 #include "fwh_lock.h"
101 
102 static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
103 static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
104 
105 static int cfi_ppb_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
106 static int cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
107 static int cfi_ppb_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len);
108 
109 static struct mtd_chip_driver cfi_amdstd_chipdrv = {
110 	.probe		= NULL, /* Not usable directly */
111 	.destroy	= cfi_amdstd_destroy,
112 	.name		= "cfi_cmdset_0002",
113 	.module		= THIS_MODULE
114 };
115 
116 /*
117  * Use status register to poll for Erase/write completion when DQ is not
118  * supported. This is indicated by Bit[1:0] of SoftwareFeatures field in
119  * CFI Primary Vendor-Specific Extended Query table 1.5
120  */
cfi_use_status_reg(struct cfi_private * cfi)121 static int cfi_use_status_reg(struct cfi_private *cfi)
122 {
123 	struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
124 	u8 poll_mask = CFI_POLL_STATUS_REG | CFI_POLL_DQ;
125 
126 	return extp && extp->MinorVersion >= '5' &&
127 		(extp->SoftwareFeatures & poll_mask) == CFI_POLL_STATUS_REG;
128 }
129 
cfi_check_err_status(struct map_info * map,struct flchip * chip,unsigned long adr)130 static int cfi_check_err_status(struct map_info *map, struct flchip *chip,
131 				unsigned long adr)
132 {
133 	struct cfi_private *cfi = map->fldrv_priv;
134 	map_word status;
135 
136 	if (!cfi_use_status_reg(cfi))
137 		return 0;
138 
139 	cfi_send_gen_cmd(0x70, cfi->addr_unlock1, chip->start, map, cfi,
140 			 cfi->device_type, NULL);
141 	status = map_read(map, adr);
142 
143 	/* The error bits are invalid while the chip's busy */
144 	if (!map_word_bitsset(map, status, CMD(CFI_SR_DRB)))
145 		return 0;
146 
147 	if (map_word_bitsset(map, status, CMD(0x3a))) {
148 		unsigned long chipstatus = MERGESTATUS(status);
149 
150 		if (chipstatus & CFI_SR_ESB)
151 			pr_err("%s erase operation failed, status %lx\n",
152 			       map->name, chipstatus);
153 		if (chipstatus & CFI_SR_PSB)
154 			pr_err("%s program operation failed, status %lx\n",
155 			       map->name, chipstatus);
156 		if (chipstatus & CFI_SR_WBASB)
157 			pr_err("%s buffer program command aborted, status %lx\n",
158 			       map->name, chipstatus);
159 		if (chipstatus & CFI_SR_SLSB)
160 			pr_err("%s sector write protected, status %lx\n",
161 			       map->name, chipstatus);
162 
163 		/* Erase/Program status bits are set on the operation failure */
164 		if (chipstatus & (CFI_SR_ESB | CFI_SR_PSB))
165 			return 1;
166 	}
167 	return 0;
168 }
169 
170 /* #define DEBUG_CFI_FEATURES */
171 
172 
173 #ifdef DEBUG_CFI_FEATURES
cfi_tell_features(struct cfi_pri_amdstd * extp)174 static void cfi_tell_features(struct cfi_pri_amdstd *extp)
175 {
176 	const char* erase_suspend[3] = {
177 		"Not supported", "Read only", "Read/write"
178 	};
179 	const char* top_bottom[6] = {
180 		"No WP", "8x8KiB sectors at top & bottom, no WP",
181 		"Bottom boot", "Top boot",
182 		"Uniform, Bottom WP", "Uniform, Top WP"
183 	};
184 
185 	printk("  Silicon revision: %d\n", extp->SiliconRevision >> 1);
186 	printk("  Address sensitive unlock: %s\n",
187 	       (extp->SiliconRevision & 1) ? "Not required" : "Required");
188 
189 	if (extp->EraseSuspend < ARRAY_SIZE(erase_suspend))
190 		printk("  Erase Suspend: %s\n", erase_suspend[extp->EraseSuspend]);
191 	else
192 		printk("  Erase Suspend: Unknown value %d\n", extp->EraseSuspend);
193 
194 	if (extp->BlkProt == 0)
195 		printk("  Block protection: Not supported\n");
196 	else
197 		printk("  Block protection: %d sectors per group\n", extp->BlkProt);
198 
199 
200 	printk("  Temporary block unprotect: %s\n",
201 	       extp->TmpBlkUnprotect ? "Supported" : "Not supported");
202 	printk("  Block protect/unprotect scheme: %d\n", extp->BlkProtUnprot);
203 	printk("  Number of simultaneous operations: %d\n", extp->SimultaneousOps);
204 	printk("  Burst mode: %s\n",
205 	       extp->BurstMode ? "Supported" : "Not supported");
206 	if (extp->PageMode == 0)
207 		printk("  Page mode: Not supported\n");
208 	else
209 		printk("  Page mode: %d word page\n", extp->PageMode << 2);
210 
211 	printk("  Vpp Supply Minimum Program/Erase Voltage: %d.%d V\n",
212 	       extp->VppMin >> 4, extp->VppMin & 0xf);
213 	printk("  Vpp Supply Maximum Program/Erase Voltage: %d.%d V\n",
214 	       extp->VppMax >> 4, extp->VppMax & 0xf);
215 
216 	if (extp->TopBottom < ARRAY_SIZE(top_bottom))
217 		printk("  Top/Bottom Boot Block: %s\n", top_bottom[extp->TopBottom]);
218 	else
219 		printk("  Top/Bottom Boot Block: Unknown value %d\n", extp->TopBottom);
220 }
221 #endif
222 
223 #ifdef AMD_BOOTLOC_BUG
224 /* Wheee. Bring me the head of someone at AMD. */
fixup_amd_bootblock(struct mtd_info * mtd)225 static void fixup_amd_bootblock(struct mtd_info *mtd)
226 {
227 	struct map_info *map = mtd->priv;
228 	struct cfi_private *cfi = map->fldrv_priv;
229 	struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
230 	__u8 major = extp->MajorVersion;
231 	__u8 minor = extp->MinorVersion;
232 
233 	if (((major << 8) | minor) < 0x3131) {
234 		/* CFI version 1.0 => don't trust bootloc */
235 
236 		pr_debug("%s: JEDEC Vendor ID is 0x%02X Device ID is 0x%02X\n",
237 			map->name, cfi->mfr, cfi->id);
238 
239 		/* AFAICS all 29LV400 with a bottom boot block have a device ID
240 		 * of 0x22BA in 16-bit mode and 0xBA in 8-bit mode.
241 		 * These were badly detected as they have the 0x80 bit set
242 		 * so treat them as a special case.
243 		 */
244 		if (((cfi->id == 0xBA) || (cfi->id == 0x22BA)) &&
245 
246 			/* Macronix added CFI to their 2nd generation
247 			 * MX29LV400C B/T but AFAICS no other 29LV400 (AMD,
248 			 * Fujitsu, Spansion, EON, ESI and older Macronix)
249 			 * has CFI.
250 			 *
251 			 * Therefore also check the manufacturer.
252 			 * This reduces the risk of false detection due to
253 			 * the 8-bit device ID.
254 			 */
255 			(cfi->mfr == CFI_MFR_MACRONIX)) {
256 			pr_debug("%s: Macronix MX29LV400C with bottom boot block"
257 				" detected\n", map->name);
258 			extp->TopBottom = 2;	/* bottom boot */
259 		} else
260 		if (cfi->id & 0x80) {
261 			printk(KERN_WARNING "%s: JEDEC Device ID is 0x%02X. Assuming broken CFI table.\n", map->name, cfi->id);
262 			extp->TopBottom = 3;	/* top boot */
263 		} else {
264 			extp->TopBottom = 2;	/* bottom boot */
265 		}
266 
267 		pr_debug("%s: AMD CFI PRI V%c.%c has no boot block field;"
268 			" deduced %s from Device ID\n", map->name, major, minor,
269 			extp->TopBottom == 2 ? "bottom" : "top");
270 	}
271 }
272 #endif
273 
274 #if !FORCE_WORD_WRITE
fixup_use_write_buffers(struct mtd_info * mtd)275 static void fixup_use_write_buffers(struct mtd_info *mtd)
276 {
277 	struct map_info *map = mtd->priv;
278 	struct cfi_private *cfi = map->fldrv_priv;
279 	if (cfi->cfiq->BufWriteTimeoutTyp) {
280 		pr_debug("Using buffer write method\n");
281 		mtd->_write = cfi_amdstd_write_buffers;
282 	}
283 }
284 #endif /* !FORCE_WORD_WRITE */
285 
286 /* Atmel chips don't use the same PRI format as AMD chips */
fixup_convert_atmel_pri(struct mtd_info * mtd)287 static void fixup_convert_atmel_pri(struct mtd_info *mtd)
288 {
289 	struct map_info *map = mtd->priv;
290 	struct cfi_private *cfi = map->fldrv_priv;
291 	struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
292 	struct cfi_pri_atmel atmel_pri;
293 
294 	memcpy(&atmel_pri, extp, sizeof(atmel_pri));
295 	memset((char *)extp + 5, 0, sizeof(*extp) - 5);
296 
297 	if (atmel_pri.Features & 0x02)
298 		extp->EraseSuspend = 2;
299 
300 	/* Some chips got it backwards... */
301 	if (cfi->id == AT49BV6416) {
302 		if (atmel_pri.BottomBoot)
303 			extp->TopBottom = 3;
304 		else
305 			extp->TopBottom = 2;
306 	} else {
307 		if (atmel_pri.BottomBoot)
308 			extp->TopBottom = 2;
309 		else
310 			extp->TopBottom = 3;
311 	}
312 
313 	/* burst write mode not supported */
314 	cfi->cfiq->BufWriteTimeoutTyp = 0;
315 	cfi->cfiq->BufWriteTimeoutMax = 0;
316 }
317 
fixup_use_secsi(struct mtd_info * mtd)318 static void fixup_use_secsi(struct mtd_info *mtd)
319 {
320 	/* Setup for chips with a secsi area */
321 	mtd->_read_user_prot_reg = cfi_amdstd_secsi_read;
322 	mtd->_read_fact_prot_reg = cfi_amdstd_secsi_read;
323 }
324 
fixup_use_erase_chip(struct mtd_info * mtd)325 static void fixup_use_erase_chip(struct mtd_info *mtd)
326 {
327 	struct map_info *map = mtd->priv;
328 	struct cfi_private *cfi = map->fldrv_priv;
329 	if ((cfi->cfiq->NumEraseRegions == 1) &&
330 		((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0)) {
331 		mtd->_erase = cfi_amdstd_erase_chip;
332 	}
333 
334 }
335 
336 /*
337  * Some Atmel chips (e.g. the AT49BV6416) power-up with all sectors
338  * locked by default.
339  */
fixup_use_atmel_lock(struct mtd_info * mtd)340 static void fixup_use_atmel_lock(struct mtd_info *mtd)
341 {
342 	mtd->_lock = cfi_atmel_lock;
343 	mtd->_unlock = cfi_atmel_unlock;
344 	mtd->flags |= MTD_POWERUP_LOCK;
345 }
346 
fixup_old_sst_eraseregion(struct mtd_info * mtd)347 static void fixup_old_sst_eraseregion(struct mtd_info *mtd)
348 {
349 	struct map_info *map = mtd->priv;
350 	struct cfi_private *cfi = map->fldrv_priv;
351 
352 	/*
353 	 * These flashes report two separate eraseblock regions based on the
354 	 * sector_erase-size and block_erase-size, although they both operate on the
355 	 * same memory. This is not allowed according to CFI, so we just pick the
356 	 * sector_erase-size.
357 	 */
358 	cfi->cfiq->NumEraseRegions = 1;
359 }
360 
fixup_sst39vf(struct mtd_info * mtd)361 static void fixup_sst39vf(struct mtd_info *mtd)
362 {
363 	struct map_info *map = mtd->priv;
364 	struct cfi_private *cfi = map->fldrv_priv;
365 
366 	fixup_old_sst_eraseregion(mtd);
367 
368 	cfi->addr_unlock1 = 0x5555;
369 	cfi->addr_unlock2 = 0x2AAA;
370 }
371 
fixup_sst39vf_rev_b(struct mtd_info * mtd)372 static void fixup_sst39vf_rev_b(struct mtd_info *mtd)
373 {
374 	struct map_info *map = mtd->priv;
375 	struct cfi_private *cfi = map->fldrv_priv;
376 
377 	fixup_old_sst_eraseregion(mtd);
378 
379 	cfi->addr_unlock1 = 0x555;
380 	cfi->addr_unlock2 = 0x2AA;
381 
382 	cfi->sector_erase_cmd = CMD(0x50);
383 }
384 
fixup_sst38vf640x_sectorsize(struct mtd_info * mtd)385 static void fixup_sst38vf640x_sectorsize(struct mtd_info *mtd)
386 {
387 	struct map_info *map = mtd->priv;
388 	struct cfi_private *cfi = map->fldrv_priv;
389 
390 	fixup_sst39vf_rev_b(mtd);
391 
392 	/*
393 	 * CFI reports 1024 sectors (0x03ff+1) of 64KBytes (0x0100*256) where
394 	 * it should report a size of 8KBytes (0x0020*256).
395 	 */
396 	cfi->cfiq->EraseRegionInfo[0] = 0x002003ff;
397 	pr_warn("%s: Bad 38VF640x CFI data; adjusting sector size from 64 to 8KiB\n",
398 		mtd->name);
399 }
400 
fixup_s29gl064n_sectors(struct mtd_info * mtd)401 static void fixup_s29gl064n_sectors(struct mtd_info *mtd)
402 {
403 	struct map_info *map = mtd->priv;
404 	struct cfi_private *cfi = map->fldrv_priv;
405 
406 	if ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0x003f) {
407 		cfi->cfiq->EraseRegionInfo[0] |= 0x0040;
408 		pr_warn("%s: Bad S29GL064N CFI data; adjust from 64 to 128 sectors\n",
409 			mtd->name);
410 	}
411 }
412 
fixup_s29gl032n_sectors(struct mtd_info * mtd)413 static void fixup_s29gl032n_sectors(struct mtd_info *mtd)
414 {
415 	struct map_info *map = mtd->priv;
416 	struct cfi_private *cfi = map->fldrv_priv;
417 
418 	if ((cfi->cfiq->EraseRegionInfo[1] & 0xffff) == 0x007e) {
419 		cfi->cfiq->EraseRegionInfo[1] &= ~0x0040;
420 		pr_warn("%s: Bad S29GL032N CFI data; adjust from 127 to 63 sectors\n",
421 			mtd->name);
422 	}
423 }
424 
fixup_s29ns512p_sectors(struct mtd_info * mtd)425 static void fixup_s29ns512p_sectors(struct mtd_info *mtd)
426 {
427 	struct map_info *map = mtd->priv;
428 	struct cfi_private *cfi = map->fldrv_priv;
429 
430 	/*
431 	 *  S29NS512P flash uses more than 8bits to report number of sectors,
432 	 * which is not permitted by CFI.
433 	 */
434 	cfi->cfiq->EraseRegionInfo[0] = 0x020001ff;
435 	pr_warn("%s: Bad S29NS512P CFI data; adjust to 512 sectors\n",
436 		mtd->name);
437 }
438 
fixup_quirks(struct mtd_info * mtd)439 static void fixup_quirks(struct mtd_info *mtd)
440 {
441 	struct map_info *map = mtd->priv;
442 	struct cfi_private *cfi = map->fldrv_priv;
443 
444 	if (cfi->mfr == CFI_MFR_AMD && cfi->id == 0x0c01)
445 		cfi->quirks |= CFI_QUIRK_DQ_TRUE_DATA;
446 }
447 
448 /* Used to fix CFI-Tables of chips without Extended Query Tables */
449 static struct cfi_fixup cfi_nopri_fixup_table[] = {
450 	{ CFI_MFR_SST, 0x234a, fixup_sst39vf }, /* SST39VF1602 */
451 	{ CFI_MFR_SST, 0x234b, fixup_sst39vf }, /* SST39VF1601 */
452 	{ CFI_MFR_SST, 0x235a, fixup_sst39vf }, /* SST39VF3202 */
453 	{ CFI_MFR_SST, 0x235b, fixup_sst39vf }, /* SST39VF3201 */
454 	{ CFI_MFR_SST, 0x235c, fixup_sst39vf_rev_b }, /* SST39VF3202B */
455 	{ CFI_MFR_SST, 0x235d, fixup_sst39vf_rev_b }, /* SST39VF3201B */
456 	{ CFI_MFR_SST, 0x236c, fixup_sst39vf_rev_b }, /* SST39VF6402B */
457 	{ CFI_MFR_SST, 0x236d, fixup_sst39vf_rev_b }, /* SST39VF6401B */
458 	{ 0, 0, NULL }
459 };
460 
461 static struct cfi_fixup cfi_fixup_table[] = {
462 	{ CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri },
463 #ifdef AMD_BOOTLOC_BUG
464 	{ CFI_MFR_AMD, CFI_ID_ANY, fixup_amd_bootblock },
465 	{ CFI_MFR_AMIC, CFI_ID_ANY, fixup_amd_bootblock },
466 	{ CFI_MFR_MACRONIX, CFI_ID_ANY, fixup_amd_bootblock },
467 #endif
468 	{ CFI_MFR_AMD, 0x0050, fixup_use_secsi },
469 	{ CFI_MFR_AMD, 0x0053, fixup_use_secsi },
470 	{ CFI_MFR_AMD, 0x0055, fixup_use_secsi },
471 	{ CFI_MFR_AMD, 0x0056, fixup_use_secsi },
472 	{ CFI_MFR_AMD, 0x005C, fixup_use_secsi },
473 	{ CFI_MFR_AMD, 0x005F, fixup_use_secsi },
474 	{ CFI_MFR_AMD, 0x0c01, fixup_s29gl064n_sectors },
475 	{ CFI_MFR_AMD, 0x1301, fixup_s29gl064n_sectors },
476 	{ CFI_MFR_AMD, 0x1a00, fixup_s29gl032n_sectors },
477 	{ CFI_MFR_AMD, 0x1a01, fixup_s29gl032n_sectors },
478 	{ CFI_MFR_AMD, 0x3f00, fixup_s29ns512p_sectors },
479 	{ CFI_MFR_SST, 0x536a, fixup_sst38vf640x_sectorsize }, /* SST38VF6402 */
480 	{ CFI_MFR_SST, 0x536b, fixup_sst38vf640x_sectorsize }, /* SST38VF6401 */
481 	{ CFI_MFR_SST, 0x536c, fixup_sst38vf640x_sectorsize }, /* SST38VF6404 */
482 	{ CFI_MFR_SST, 0x536d, fixup_sst38vf640x_sectorsize }, /* SST38VF6403 */
483 #if !FORCE_WORD_WRITE
484 	{ CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers },
485 #endif
486 	{ CFI_MFR_ANY, CFI_ID_ANY, fixup_quirks },
487 	{ 0, 0, NULL }
488 };
489 static struct cfi_fixup jedec_fixup_table[] = {
490 	{ CFI_MFR_SST, SST49LF004B, fixup_use_fwh_lock },
491 	{ CFI_MFR_SST, SST49LF040B, fixup_use_fwh_lock },
492 	{ CFI_MFR_SST, SST49LF008A, fixup_use_fwh_lock },
493 	{ 0, 0, NULL }
494 };
495 
496 static struct cfi_fixup fixup_table[] = {
497 	/* The CFI vendor ids and the JEDEC vendor IDs appear
498 	 * to be common.  It is like the devices id's are as
499 	 * well.  This table is to pick all cases where
500 	 * we know that is the case.
501 	 */
502 	{ CFI_MFR_ANY, CFI_ID_ANY, fixup_use_erase_chip },
503 	{ CFI_MFR_ATMEL, AT49BV6416, fixup_use_atmel_lock },
504 	{ 0, 0, NULL }
505 };
506 
507 
cfi_fixup_major_minor(struct cfi_private * cfi,struct cfi_pri_amdstd * extp)508 static void cfi_fixup_major_minor(struct cfi_private *cfi,
509 				  struct cfi_pri_amdstd *extp)
510 {
511 	if (cfi->mfr == CFI_MFR_SAMSUNG) {
512 		if ((extp->MajorVersion == '0' && extp->MinorVersion == '0') ||
513 		    (extp->MajorVersion == '3' && extp->MinorVersion == '3')) {
514 			/*
515 			 * Samsung K8P2815UQB and K8D6x16UxM chips
516 			 * report major=0 / minor=0.
517 			 * K8D3x16UxC chips report major=3 / minor=3.
518 			 */
519 			printk(KERN_NOTICE "  Fixing Samsung's Amd/Fujitsu"
520 			       " Extended Query version to 1.%c\n",
521 			       extp->MinorVersion);
522 			extp->MajorVersion = '1';
523 		}
524 	}
525 
526 	/*
527 	 * SST 38VF640x chips report major=0xFF / minor=0xFF.
528 	 */
529 	if (cfi->mfr == CFI_MFR_SST && (cfi->id >> 4) == 0x0536) {
530 		extp->MajorVersion = '1';
531 		extp->MinorVersion = '0';
532 	}
533 }
534 
is_m29ew(struct cfi_private * cfi)535 static int is_m29ew(struct cfi_private *cfi)
536 {
537 	if (cfi->mfr == CFI_MFR_INTEL &&
538 	    ((cfi->device_type == CFI_DEVICETYPE_X8 && (cfi->id & 0xff) == 0x7e) ||
539 	     (cfi->device_type == CFI_DEVICETYPE_X16 && cfi->id == 0x227e)))
540 		return 1;
541 	return 0;
542 }
543 
544 /*
545  * From TN-13-07: Patching the Linux Kernel and U-Boot for M29 Flash, page 20:
546  * Some revisions of the M29EW suffer from erase suspend hang ups. In
547  * particular, it can occur when the sequence
548  * Erase Confirm -> Suspend -> Program -> Resume
549  * causes a lockup due to internal timing issues. The consequence is that the
550  * erase cannot be resumed without inserting a dummy command after programming
551  * and prior to resuming. [...] The work-around is to issue a dummy write cycle
552  * that writes an F0 command code before the RESUME command.
553  */
cfi_fixup_m29ew_erase_suspend(struct map_info * map,unsigned long adr)554 static void cfi_fixup_m29ew_erase_suspend(struct map_info *map,
555 					  unsigned long adr)
556 {
557 	struct cfi_private *cfi = map->fldrv_priv;
558 	/* before resume, insert a dummy 0xF0 cycle for Micron M29EW devices */
559 	if (is_m29ew(cfi))
560 		map_write(map, CMD(0xF0), adr);
561 }
562 
563 /*
564  * From TN-13-07: Patching the Linux Kernel and U-Boot for M29 Flash, page 22:
565  *
566  * Some revisions of the M29EW (for example, A1 and A2 step revisions)
567  * are affected by a problem that could cause a hang up when an ERASE SUSPEND
568  * command is issued after an ERASE RESUME operation without waiting for a
569  * minimum delay.  The result is that once the ERASE seems to be completed
570  * (no bits are toggling), the contents of the Flash memory block on which
571  * the erase was ongoing could be inconsistent with the expected values
572  * (typically, the array value is stuck to the 0xC0, 0xC4, 0x80, or 0x84
573  * values), causing a consequent failure of the ERASE operation.
574  * The occurrence of this issue could be high, especially when file system
575  * operations on the Flash are intensive.  As a result, it is recommended
576  * that a patch be applied.  Intensive file system operations can cause many
577  * calls to the garbage routine to free Flash space (also by erasing physical
578  * Flash blocks) and as a result, many consecutive SUSPEND and RESUME
579  * commands can occur.  The problem disappears when a delay is inserted after
580  * the RESUME command by using the udelay() function available in Linux.
581  * The DELAY value must be tuned based on the customer's platform.
582  * The maximum value that fixes the problem in all cases is 500us.
583  * But, in our experience, a delay of 30 µs to 50 µs is sufficient
584  * in most cases.
585  * We have chosen 500µs because this latency is acceptable.
586  */
cfi_fixup_m29ew_delay_after_resume(struct cfi_private * cfi)587 static void cfi_fixup_m29ew_delay_after_resume(struct cfi_private *cfi)
588 {
589 	/*
590 	 * Resolving the Delay After Resume Issue see Micron TN-13-07
591 	 * Worst case delay must be 500µs but 30-50µs should be ok as well
592 	 */
593 	if (is_m29ew(cfi))
594 		cfi_udelay(500);
595 }
596 
cfi_cmdset_0002(struct map_info * map,int primary)597 struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
598 {
599 	struct cfi_private *cfi = map->fldrv_priv;
600 	struct device_node __maybe_unused *np = map->device_node;
601 	struct mtd_info *mtd;
602 	int i;
603 
604 	mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
605 	if (!mtd)
606 		return NULL;
607 	mtd->priv = map;
608 	mtd->type = MTD_NORFLASH;
609 
610 	/* Fill in the default mtd operations */
611 	mtd->_erase   = cfi_amdstd_erase_varsize;
612 	mtd->_write   = cfi_amdstd_write_words;
613 	mtd->_read    = cfi_amdstd_read;
614 	mtd->_sync    = cfi_amdstd_sync;
615 	mtd->_suspend = cfi_amdstd_suspend;
616 	mtd->_resume  = cfi_amdstd_resume;
617 	mtd->_read_user_prot_reg = cfi_amdstd_read_user_prot_reg;
618 	mtd->_read_fact_prot_reg = cfi_amdstd_read_fact_prot_reg;
619 	mtd->_get_fact_prot_info = cfi_amdstd_get_fact_prot_info;
620 	mtd->_get_user_prot_info = cfi_amdstd_get_user_prot_info;
621 	mtd->_write_user_prot_reg = cfi_amdstd_write_user_prot_reg;
622 	mtd->_lock_user_prot_reg = cfi_amdstd_lock_user_prot_reg;
623 	mtd->flags   = MTD_CAP_NORFLASH;
624 	mtd->name    = map->name;
625 	mtd->writesize = 1;
626 	mtd->writebufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
627 
628 	pr_debug("MTD %s(): write buffer size %d\n", __func__,
629 			mtd->writebufsize);
630 
631 	mtd->_panic_write = cfi_amdstd_panic_write;
632 	mtd->reboot_notifier.notifier_call = cfi_amdstd_reboot;
633 
634 	if (cfi->cfi_mode==CFI_MODE_CFI){
635 		unsigned char bootloc;
636 		__u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
637 		struct cfi_pri_amdstd *extp;
638 
639 		extp = (struct cfi_pri_amdstd*)cfi_read_pri(map, adr, sizeof(*extp), "Amd/Fujitsu");
640 		if (extp) {
641 			/*
642 			 * It's a real CFI chip, not one for which the probe
643 			 * routine faked a CFI structure.
644 			 */
645 			cfi_fixup_major_minor(cfi, extp);
646 
647 			/*
648 			 * Valid primary extension versions are: 1.0, 1.1, 1.2, 1.3, 1.4, 1.5
649 			 * see: http://cs.ozerki.net/zap/pub/axim-x5/docs/cfi_r20.pdf, page 19
650 			 *      http://www.spansion.com/Support/AppNotes/cfi_100_20011201.pdf
651 			 *      http://www.spansion.com/Support/Datasheets/s29ws-p_00_a12_e.pdf
652 			 *      http://www.spansion.com/Support/Datasheets/S29GL_128S_01GS_00_02_e.pdf
653 			 */
654 			if (extp->MajorVersion != '1' ||
655 			    (extp->MajorVersion == '1' && (extp->MinorVersion < '0' || extp->MinorVersion > '5'))) {
656 				printk(KERN_ERR "  Unknown Amd/Fujitsu Extended Query "
657 				       "version %c.%c (%#02x/%#02x).\n",
658 				       extp->MajorVersion, extp->MinorVersion,
659 				       extp->MajorVersion, extp->MinorVersion);
660 				kfree(extp);
661 				kfree(mtd);
662 				return NULL;
663 			}
664 
665 			printk(KERN_INFO "  Amd/Fujitsu Extended Query version %c.%c.\n",
666 			       extp->MajorVersion, extp->MinorVersion);
667 
668 			/* Install our own private info structure */
669 			cfi->cmdset_priv = extp;
670 
671 			/* Apply cfi device specific fixups */
672 			cfi_fixup(mtd, cfi_fixup_table);
673 
674 #ifdef DEBUG_CFI_FEATURES
675 			/* Tell the user about it in lots of lovely detail */
676 			cfi_tell_features(extp);
677 #endif
678 
679 #ifdef CONFIG_OF
680 			if (np && of_property_read_bool(
681 				    np, "use-advanced-sector-protection")
682 			    && extp->BlkProtUnprot == 8) {
683 				printk(KERN_INFO "  Advanced Sector Protection (PPB Locking) supported\n");
684 				mtd->_lock = cfi_ppb_lock;
685 				mtd->_unlock = cfi_ppb_unlock;
686 				mtd->_is_locked = cfi_ppb_is_locked;
687 			}
688 #endif
689 
690 			bootloc = extp->TopBottom;
691 			if ((bootloc < 2) || (bootloc > 5)) {
692 				printk(KERN_WARNING "%s: CFI contains unrecognised boot "
693 				       "bank location (%d). Assuming bottom.\n",
694 				       map->name, bootloc);
695 				bootloc = 2;
696 			}
697 
698 			if (bootloc == 3 && cfi->cfiq->NumEraseRegions > 1) {
699 				printk(KERN_WARNING "%s: Swapping erase regions for top-boot CFI table.\n", map->name);
700 
701 				for (i=0; i<cfi->cfiq->NumEraseRegions / 2; i++) {
702 					int j = (cfi->cfiq->NumEraseRegions-1)-i;
703 
704 					swap(cfi->cfiq->EraseRegionInfo[i],
705 					     cfi->cfiq->EraseRegionInfo[j]);
706 				}
707 			}
708 			/* Set the default CFI lock/unlock addresses */
709 			cfi->addr_unlock1 = 0x555;
710 			cfi->addr_unlock2 = 0x2aa;
711 		}
712 		cfi_fixup(mtd, cfi_nopri_fixup_table);
713 
714 		if (!cfi->addr_unlock1 || !cfi->addr_unlock2) {
715 			kfree(mtd);
716 			return NULL;
717 		}
718 
719 	} /* CFI mode */
720 	else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
721 		/* Apply jedec specific fixups */
722 		cfi_fixup(mtd, jedec_fixup_table);
723 	}
724 	/* Apply generic fixups */
725 	cfi_fixup(mtd, fixup_table);
726 
727 	for (i=0; i< cfi->numchips; i++) {
728 		cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp;
729 		cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
730 		cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp;
731 		/*
732 		 * First calculate the timeout max according to timeout field
733 		 * of struct cfi_ident that probed from chip's CFI aera, if
734 		 * available. Specify a minimum of 2000us, in case the CFI data
735 		 * is wrong.
736 		 */
737 		if (cfi->cfiq->BufWriteTimeoutTyp &&
738 		    cfi->cfiq->BufWriteTimeoutMax)
739 			cfi->chips[i].buffer_write_time_max =
740 				1 << (cfi->cfiq->BufWriteTimeoutTyp +
741 				      cfi->cfiq->BufWriteTimeoutMax);
742 		else
743 			cfi->chips[i].buffer_write_time_max = 0;
744 
745 		cfi->chips[i].buffer_write_time_max =
746 			max(cfi->chips[i].buffer_write_time_max, 2000);
747 
748 		cfi->chips[i].ref_point_counter = 0;
749 		init_waitqueue_head(&(cfi->chips[i].wq));
750 	}
751 
752 	map->fldrv = &cfi_amdstd_chipdrv;
753 
754 	return cfi_amdstd_setup(mtd);
755 }
756 struct mtd_info *cfi_cmdset_0006(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0002")));
757 struct mtd_info *cfi_cmdset_0701(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0002")));
758 EXPORT_SYMBOL_GPL(cfi_cmdset_0002);
759 EXPORT_SYMBOL_GPL(cfi_cmdset_0006);
760 EXPORT_SYMBOL_GPL(cfi_cmdset_0701);
761 
cfi_amdstd_setup(struct mtd_info * mtd)762 static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd)
763 {
764 	struct map_info *map = mtd->priv;
765 	struct cfi_private *cfi = map->fldrv_priv;
766 	unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
767 	unsigned long offset = 0;
768 	int i,j;
769 
770 	printk(KERN_NOTICE "number of %s chips: %d\n",
771 	       (cfi->cfi_mode == CFI_MODE_CFI)?"CFI":"JEDEC",cfi->numchips);
772 	/* Select the correct geometry setup */
773 	mtd->size = devsize * cfi->numchips;
774 
775 	mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
776 	mtd->eraseregions = kmalloc_array(mtd->numeraseregions,
777 					  sizeof(struct mtd_erase_region_info),
778 					  GFP_KERNEL);
779 	if (!mtd->eraseregions)
780 		goto setup_err;
781 
782 	for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
783 		unsigned long ernum, ersize;
784 		ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
785 		ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
786 
787 		if (mtd->erasesize < ersize) {
788 			mtd->erasesize = ersize;
789 		}
790 		for (j=0; j<cfi->numchips; j++) {
791 			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
792 			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
793 			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
794 		}
795 		offset += (ersize * ernum);
796 	}
797 	if (offset != devsize) {
798 		/* Argh */
799 		printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
800 		goto setup_err;
801 	}
802 
803 	__module_get(THIS_MODULE);
804 	register_reboot_notifier(&mtd->reboot_notifier);
805 	return mtd;
806 
807  setup_err:
808 	kfree(mtd->eraseregions);
809 	kfree(mtd);
810 	kfree(cfi->cmdset_priv);
811 	return NULL;
812 }
813 
814 /*
815  * Return true if the chip is ready and has the correct value.
816  *
817  * Ready is one of: read mode, query mode, erase-suspend-read mode (in any
818  * non-suspended sector) and is indicated by no toggle bits toggling.
819  *
820  * Error are indicated by toggling bits or bits held with the wrong value,
821  * or with bits toggling.
822  *
823  * Note that anything more complicated than checking if no bits are toggling
824  * (including checking DQ5 for an error status) is tricky to get working
825  * correctly and is therefore not done	(particularly with interleaved chips
826  * as each chip must be checked independently of the others).
827  */
chip_ready(struct map_info * map,struct flchip * chip,unsigned long addr,map_word * expected)828 static int __xipram chip_ready(struct map_info *map, struct flchip *chip,
829 			       unsigned long addr, map_word *expected)
830 {
831 	struct cfi_private *cfi = map->fldrv_priv;
832 	map_word d, t;
833 	int ret;
834 
835 	if (cfi_use_status_reg(cfi)) {
836 		map_word ready = CMD(CFI_SR_DRB);
837 		/*
838 		 * For chips that support status register, check device
839 		 * ready bit
840 		 */
841 		cfi_send_gen_cmd(0x70, cfi->addr_unlock1, chip->start, map, cfi,
842 				 cfi->device_type, NULL);
843 		t = map_read(map, addr);
844 
845 		return map_word_andequal(map, t, ready, ready);
846 	}
847 
848 	d = map_read(map, addr);
849 	t = map_read(map, addr);
850 
851 	ret = map_word_equal(map, d, t);
852 
853 	if (!ret || !expected)
854 		return ret;
855 
856 	return map_word_equal(map, t, *expected);
857 }
858 
chip_good(struct map_info * map,struct flchip * chip,unsigned long addr,map_word * expected)859 static int __xipram chip_good(struct map_info *map, struct flchip *chip,
860 			      unsigned long addr, map_word *expected)
861 {
862 	struct cfi_private *cfi = map->fldrv_priv;
863 	map_word *datum = expected;
864 
865 	if (cfi->quirks & CFI_QUIRK_DQ_TRUE_DATA)
866 		datum = NULL;
867 
868 	return chip_ready(map, chip, addr, datum);
869 }
870 
get_chip(struct map_info * map,struct flchip * chip,unsigned long adr,int mode)871 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
872 {
873 	DECLARE_WAITQUEUE(wait, current);
874 	struct cfi_private *cfi = map->fldrv_priv;
875 	unsigned long timeo;
876 	struct cfi_pri_amdstd *cfip = (struct cfi_pri_amdstd *)cfi->cmdset_priv;
877 
878  resettime:
879 	timeo = jiffies + HZ;
880  retry:
881 	switch (chip->state) {
882 
883 	case FL_STATUS:
884 		for (;;) {
885 			if (chip_ready(map, chip, adr, NULL))
886 				break;
887 
888 			if (time_after(jiffies, timeo)) {
889 				printk(KERN_ERR "Waiting for chip to be ready timed out.\n");
890 				return -EIO;
891 			}
892 			mutex_unlock(&chip->mutex);
893 			cfi_udelay(1);
894 			mutex_lock(&chip->mutex);
895 			/* Someone else might have been playing with it. */
896 			goto retry;
897 		}
898 
899 	case FL_READY:
900 	case FL_CFI_QUERY:
901 	case FL_JEDEC_QUERY:
902 		return 0;
903 
904 	case FL_ERASING:
905 		if (!cfip || !(cfip->EraseSuspend & (0x1|0x2)) ||
906 		    !(mode == FL_READY || mode == FL_POINT ||
907 		    (mode == FL_WRITING && (cfip->EraseSuspend & 0x2))))
908 			goto sleep;
909 
910 		/* Do not allow suspend iff read/write to EB address */
911 		if ((adr & chip->in_progress_block_mask) ==
912 		    chip->in_progress_block_addr)
913 			goto sleep;
914 
915 		/* Erase suspend */
916 		/* It's harmless to issue the Erase-Suspend and Erase-Resume
917 		 * commands when the erase algorithm isn't in progress. */
918 		map_write(map, CMD(0xB0), chip->in_progress_block_addr);
919 		chip->oldstate = FL_ERASING;
920 		chip->state = FL_ERASE_SUSPENDING;
921 		chip->erase_suspended = 1;
922 		for (;;) {
923 			if (chip_ready(map, chip, adr, NULL))
924 				break;
925 
926 			if (time_after(jiffies, timeo)) {
927 				/* Should have suspended the erase by now.
928 				 * Send an Erase-Resume command as either
929 				 * there was an error (so leave the erase
930 				 * routine to recover from it) or we trying to
931 				 * use the erase-in-progress sector. */
932 				put_chip(map, chip, adr);
933 				printk(KERN_ERR "MTD %s(): chip not ready after erase suspend\n", __func__);
934 				return -EIO;
935 			}
936 
937 			mutex_unlock(&chip->mutex);
938 			cfi_udelay(1);
939 			mutex_lock(&chip->mutex);
940 			/* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
941 			   So we can just loop here. */
942 		}
943 		chip->state = FL_READY;
944 		return 0;
945 
946 	case FL_XIP_WHILE_ERASING:
947 		if (mode != FL_READY && mode != FL_POINT &&
948 		    (!cfip || !(cfip->EraseSuspend&2)))
949 			goto sleep;
950 		chip->oldstate = chip->state;
951 		chip->state = FL_READY;
952 		return 0;
953 
954 	case FL_SHUTDOWN:
955 		/* The machine is rebooting */
956 		return -EIO;
957 
958 	case FL_POINT:
959 		/* Only if there's no operation suspended... */
960 		if (mode == FL_READY && chip->oldstate == FL_READY)
961 			return 0;
962 		fallthrough;
963 	default:
964 	sleep:
965 		set_current_state(TASK_UNINTERRUPTIBLE);
966 		add_wait_queue(&chip->wq, &wait);
967 		mutex_unlock(&chip->mutex);
968 		schedule();
969 		remove_wait_queue(&chip->wq, &wait);
970 		mutex_lock(&chip->mutex);
971 		goto resettime;
972 	}
973 }
974 
975 
put_chip(struct map_info * map,struct flchip * chip,unsigned long adr)976 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
977 {
978 	struct cfi_private *cfi = map->fldrv_priv;
979 
980 	switch(chip->oldstate) {
981 	case FL_ERASING:
982 		cfi_fixup_m29ew_erase_suspend(map,
983 			chip->in_progress_block_addr);
984 		map_write(map, cfi->sector_erase_cmd, chip->in_progress_block_addr);
985 		cfi_fixup_m29ew_delay_after_resume(cfi);
986 		chip->oldstate = FL_READY;
987 		chip->state = FL_ERASING;
988 		break;
989 
990 	case FL_XIP_WHILE_ERASING:
991 		chip->state = chip->oldstate;
992 		chip->oldstate = FL_READY;
993 		break;
994 
995 	case FL_READY:
996 	case FL_STATUS:
997 		break;
998 	default:
999 		printk(KERN_ERR "MTD: put_chip() called with oldstate %d!!\n", chip->oldstate);
1000 	}
1001 	wake_up(&chip->wq);
1002 }
1003 
1004 #ifdef CONFIG_MTD_XIP
1005 
1006 /*
1007  * No interrupt what so ever can be serviced while the flash isn't in array
1008  * mode.  This is ensured by the xip_disable() and xip_enable() functions
1009  * enclosing any code path where the flash is known not to be in array mode.
1010  * And within a XIP disabled code path, only functions marked with __xipram
1011  * may be called and nothing else (it's a good thing to inspect generated
1012  * assembly to make sure inline functions were actually inlined and that gcc
1013  * didn't emit calls to its own support functions). Also configuring MTD CFI
1014  * support to a single buswidth and a single interleave is also recommended.
1015  */
1016 
xip_disable(struct map_info * map,struct flchip * chip,unsigned long adr)1017 static void xip_disable(struct map_info *map, struct flchip *chip,
1018 			unsigned long adr)
1019 {
1020 	/* TODO: chips with no XIP use should ignore and return */
1021 	(void) map_read(map, adr); /* ensure mmu mapping is up to date */
1022 	local_irq_disable();
1023 }
1024 
xip_enable(struct map_info * map,struct flchip * chip,unsigned long adr)1025 static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
1026 				unsigned long adr)
1027 {
1028 	struct cfi_private *cfi = map->fldrv_priv;
1029 
1030 	if (chip->state != FL_POINT && chip->state != FL_READY) {
1031 		map_write(map, CMD(0xf0), adr);
1032 		chip->state = FL_READY;
1033 	}
1034 	(void) map_read(map, adr);
1035 	xip_iprefetch();
1036 	local_irq_enable();
1037 }
1038 
1039 /*
1040  * When a delay is required for the flash operation to complete, the
1041  * xip_udelay() function is polling for both the given timeout and pending
1042  * (but still masked) hardware interrupts.  Whenever there is an interrupt
1043  * pending then the flash erase operation is suspended, array mode restored
1044  * and interrupts unmasked.  Task scheduling might also happen at that
1045  * point.  The CPU eventually returns from the interrupt or the call to
1046  * schedule() and the suspended flash operation is resumed for the remaining
1047  * of the delay period.
1048  *
1049  * Warning: this function _will_ fool interrupt latency tracing tools.
1050  */
1051 
xip_udelay(struct map_info * map,struct flchip * chip,unsigned long adr,int usec)1052 static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
1053 				unsigned long adr, int usec)
1054 {
1055 	struct cfi_private *cfi = map->fldrv_priv;
1056 	struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
1057 	map_word status, OK = CMD(0x80);
1058 	unsigned long suspended, start = xip_currtime();
1059 	flstate_t oldstate;
1060 
1061 	do {
1062 		cpu_relax();
1063 		if (xip_irqpending() && extp &&
1064 		    ((chip->state == FL_ERASING && (extp->EraseSuspend & 2))) &&
1065 		    (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
1066 			/*
1067 			 * Let's suspend the erase operation when supported.
1068 			 * Note that we currently don't try to suspend
1069 			 * interleaved chips if there is already another
1070 			 * operation suspended (imagine what happens
1071 			 * when one chip was already done with the current
1072 			 * operation while another chip suspended it, then
1073 			 * we resume the whole thing at once).  Yes, it
1074 			 * can happen!
1075 			 */
1076 			map_write(map, CMD(0xb0), adr);
1077 			usec -= xip_elapsed_since(start);
1078 			suspended = xip_currtime();
1079 			do {
1080 				if (xip_elapsed_since(suspended) > 100000) {
1081 					/*
1082 					 * The chip doesn't want to suspend
1083 					 * after waiting for 100 msecs.
1084 					 * This is a critical error but there
1085 					 * is not much we can do here.
1086 					 */
1087 					return;
1088 				}
1089 				status = map_read(map, adr);
1090 			} while (!map_word_andequal(map, status, OK, OK));
1091 
1092 			/* Suspend succeeded */
1093 			oldstate = chip->state;
1094 			if (!map_word_bitsset(map, status, CMD(0x40)))
1095 				break;
1096 			chip->state = FL_XIP_WHILE_ERASING;
1097 			chip->erase_suspended = 1;
1098 			map_write(map, CMD(0xf0), adr);
1099 			(void) map_read(map, adr);
1100 			xip_iprefetch();
1101 			local_irq_enable();
1102 			mutex_unlock(&chip->mutex);
1103 			xip_iprefetch();
1104 			cond_resched();
1105 
1106 			/*
1107 			 * We're back.  However someone else might have
1108 			 * decided to go write to the chip if we are in
1109 			 * a suspended erase state.  If so let's wait
1110 			 * until it's done.
1111 			 */
1112 			mutex_lock(&chip->mutex);
1113 			while (chip->state != FL_XIP_WHILE_ERASING) {
1114 				DECLARE_WAITQUEUE(wait, current);
1115 				set_current_state(TASK_UNINTERRUPTIBLE);
1116 				add_wait_queue(&chip->wq, &wait);
1117 				mutex_unlock(&chip->mutex);
1118 				schedule();
1119 				remove_wait_queue(&chip->wq, &wait);
1120 				mutex_lock(&chip->mutex);
1121 			}
1122 			/* Disallow XIP again */
1123 			local_irq_disable();
1124 
1125 			/* Correct Erase Suspend Hangups for M29EW */
1126 			cfi_fixup_m29ew_erase_suspend(map, adr);
1127 			/* Resume the write or erase operation */
1128 			map_write(map, cfi->sector_erase_cmd, adr);
1129 			chip->state = oldstate;
1130 			start = xip_currtime();
1131 		} else if (usec >= 1000000/HZ) {
1132 			/*
1133 			 * Try to save on CPU power when waiting delay
1134 			 * is at least a system timer tick period.
1135 			 * No need to be extremely accurate here.
1136 			 */
1137 			xip_cpu_idle();
1138 		}
1139 		status = map_read(map, adr);
1140 	} while (!map_word_andequal(map, status, OK, OK)
1141 		 && xip_elapsed_since(start) < usec);
1142 }
1143 
1144 #define UDELAY(map, chip, adr, usec)  xip_udelay(map, chip, adr, usec)
1145 
1146 /*
1147  * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
1148  * the flash is actively programming or erasing since we have to poll for
1149  * the operation to complete anyway.  We can't do that in a generic way with
1150  * a XIP setup so do it before the actual flash operation in this case
1151  * and stub it out from INVALIDATE_CACHE_UDELAY.
1152  */
1153 #define XIP_INVAL_CACHED_RANGE(map, from, size)  \
1154 	INVALIDATE_CACHED_RANGE(map, from, size)
1155 
1156 #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec)  \
1157 	UDELAY(map, chip, adr, usec)
1158 
1159 /*
1160  * Extra notes:
1161  *
1162  * Activating this XIP support changes the way the code works a bit.  For
1163  * example the code to suspend the current process when concurrent access
1164  * happens is never executed because xip_udelay() will always return with the
1165  * same chip state as it was entered with.  This is why there is no care for
1166  * the presence of add_wait_queue() or schedule() calls from within a couple
1167  * xip_disable()'d  areas of code, like in do_erase_oneblock for example.
1168  * The queueing and scheduling are always happening within xip_udelay().
1169  *
1170  * Similarly, get_chip() and put_chip() just happen to always be executed
1171  * with chip->state set to FL_READY (or FL_XIP_WHILE_*) where flash state
1172  * is in array mode, therefore never executing many cases therein and not
1173  * causing any problem with XIP.
1174  */
1175 
1176 #else
1177 
1178 #define xip_disable(map, chip, adr)
1179 #define xip_enable(map, chip, adr)
1180 #define XIP_INVAL_CACHED_RANGE(x...)
1181 
1182 #define UDELAY(map, chip, adr, usec)  \
1183 do {  \
1184 	mutex_unlock(&chip->mutex);  \
1185 	cfi_udelay(usec);  \
1186 	mutex_lock(&chip->mutex);  \
1187 } while (0)
1188 
1189 #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec)  \
1190 do {  \
1191 	mutex_unlock(&chip->mutex);  \
1192 	INVALIDATE_CACHED_RANGE(map, adr, len);  \
1193 	cfi_udelay(usec);  \
1194 	mutex_lock(&chip->mutex);  \
1195 } while (0)
1196 
1197 #endif
1198 
do_read_onechip(struct map_info * map,struct flchip * chip,loff_t adr,size_t len,u_char * buf)1199 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1200 {
1201 	unsigned long cmd_addr;
1202 	struct cfi_private *cfi = map->fldrv_priv;
1203 	int ret;
1204 
1205 	adr += chip->start;
1206 
1207 	/* Ensure cmd read/writes are aligned. */
1208 	cmd_addr = adr & ~(map_bankwidth(map)-1);
1209 
1210 	mutex_lock(&chip->mutex);
1211 	ret = get_chip(map, chip, cmd_addr, FL_READY);
1212 	if (ret) {
1213 		mutex_unlock(&chip->mutex);
1214 		return ret;
1215 	}
1216 
1217 	if (chip->state != FL_POINT && chip->state != FL_READY) {
1218 		map_write(map, CMD(0xf0), cmd_addr);
1219 		chip->state = FL_READY;
1220 	}
1221 
1222 	map_copy_from(map, buf, adr, len);
1223 
1224 	put_chip(map, chip, cmd_addr);
1225 
1226 	mutex_unlock(&chip->mutex);
1227 	return 0;
1228 }
1229 
1230 
cfi_amdstd_read(struct mtd_info * mtd,loff_t from,size_t len,size_t * retlen,u_char * buf)1231 static int cfi_amdstd_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1232 {
1233 	struct map_info *map = mtd->priv;
1234 	struct cfi_private *cfi = map->fldrv_priv;
1235 	unsigned long ofs;
1236 	int chipnum;
1237 	int ret = 0;
1238 
1239 	/* ofs: offset within the first chip that the first read should start */
1240 	chipnum = (from >> cfi->chipshift);
1241 	ofs = from - (chipnum <<  cfi->chipshift);
1242 
1243 	while (len) {
1244 		unsigned long thislen;
1245 
1246 		if (chipnum >= cfi->numchips)
1247 			break;
1248 
1249 		if ((len + ofs -1) >> cfi->chipshift)
1250 			thislen = (1<<cfi->chipshift) - ofs;
1251 		else
1252 			thislen = len;
1253 
1254 		ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1255 		if (ret)
1256 			break;
1257 
1258 		*retlen += thislen;
1259 		len -= thislen;
1260 		buf += thislen;
1261 
1262 		ofs = 0;
1263 		chipnum++;
1264 	}
1265 	return ret;
1266 }
1267 
1268 typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip,
1269 			loff_t adr, size_t len, u_char *buf, size_t grouplen);
1270 
otp_enter(struct map_info * map,struct flchip * chip,loff_t adr,size_t len)1271 static inline void otp_enter(struct map_info *map, struct flchip *chip,
1272 			     loff_t adr, size_t len)
1273 {
1274 	struct cfi_private *cfi = map->fldrv_priv;
1275 
1276 	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
1277 			 cfi->device_type, NULL);
1278 	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
1279 			 cfi->device_type, NULL);
1280 	cfi_send_gen_cmd(0x88, cfi->addr_unlock1, chip->start, map, cfi,
1281 			 cfi->device_type, NULL);
1282 
1283 	INVALIDATE_CACHED_RANGE(map, chip->start + adr, len);
1284 }
1285 
otp_exit(struct map_info * map,struct flchip * chip,loff_t adr,size_t len)1286 static inline void otp_exit(struct map_info *map, struct flchip *chip,
1287 			    loff_t adr, size_t len)
1288 {
1289 	struct cfi_private *cfi = map->fldrv_priv;
1290 
1291 	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
1292 			 cfi->device_type, NULL);
1293 	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
1294 			 cfi->device_type, NULL);
1295 	cfi_send_gen_cmd(0x90, cfi->addr_unlock1, chip->start, map, cfi,
1296 			 cfi->device_type, NULL);
1297 	cfi_send_gen_cmd(0x00, cfi->addr_unlock1, chip->start, map, cfi,
1298 			 cfi->device_type, NULL);
1299 
1300 	INVALIDATE_CACHED_RANGE(map, chip->start + adr, len);
1301 }
1302 
do_read_secsi_onechip(struct map_info * map,struct flchip * chip,loff_t adr,size_t len,u_char * buf,size_t grouplen)1303 static inline int do_read_secsi_onechip(struct map_info *map,
1304 					struct flchip *chip, loff_t adr,
1305 					size_t len, u_char *buf,
1306 					size_t grouplen)
1307 {
1308 	DECLARE_WAITQUEUE(wait, current);
1309 
1310  retry:
1311 	mutex_lock(&chip->mutex);
1312 
1313 	if (chip->state != FL_READY){
1314 		set_current_state(TASK_UNINTERRUPTIBLE);
1315 		add_wait_queue(&chip->wq, &wait);
1316 
1317 		mutex_unlock(&chip->mutex);
1318 
1319 		schedule();
1320 		remove_wait_queue(&chip->wq, &wait);
1321 
1322 		goto retry;
1323 	}
1324 
1325 	adr += chip->start;
1326 
1327 	chip->state = FL_READY;
1328 
1329 	otp_enter(map, chip, adr, len);
1330 	map_copy_from(map, buf, adr, len);
1331 	otp_exit(map, chip, adr, len);
1332 
1333 	wake_up(&chip->wq);
1334 	mutex_unlock(&chip->mutex);
1335 
1336 	return 0;
1337 }
1338 
cfi_amdstd_secsi_read(struct mtd_info * mtd,loff_t from,size_t len,size_t * retlen,u_char * buf)1339 static int cfi_amdstd_secsi_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1340 {
1341 	struct map_info *map = mtd->priv;
1342 	struct cfi_private *cfi = map->fldrv_priv;
1343 	unsigned long ofs;
1344 	int chipnum;
1345 	int ret = 0;
1346 
1347 	/* ofs: offset within the first chip that the first read should start */
1348 	/* 8 secsi bytes per chip */
1349 	chipnum=from>>3;
1350 	ofs=from & 7;
1351 
1352 	while (len) {
1353 		unsigned long thislen;
1354 
1355 		if (chipnum >= cfi->numchips)
1356 			break;
1357 
1358 		if ((len + ofs -1) >> 3)
1359 			thislen = (1<<3) - ofs;
1360 		else
1361 			thislen = len;
1362 
1363 		ret = do_read_secsi_onechip(map, &cfi->chips[chipnum], ofs,
1364 					    thislen, buf, 0);
1365 		if (ret)
1366 			break;
1367 
1368 		*retlen += thislen;
1369 		len -= thislen;
1370 		buf += thislen;
1371 
1372 		ofs = 0;
1373 		chipnum++;
1374 	}
1375 	return ret;
1376 }
1377 
1378 static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1379 				     unsigned long adr, map_word datum,
1380 				     int mode);
1381 
do_otp_write(struct map_info * map,struct flchip * chip,loff_t adr,size_t len,u_char * buf,size_t grouplen)1382 static int do_otp_write(struct map_info *map, struct flchip *chip, loff_t adr,
1383 			size_t len, u_char *buf, size_t grouplen)
1384 {
1385 	int ret;
1386 	while (len) {
1387 		unsigned long bus_ofs = adr & ~(map_bankwidth(map)-1);
1388 		int gap = adr - bus_ofs;
1389 		int n = min_t(int, len, map_bankwidth(map) - gap);
1390 		map_word datum = map_word_ff(map);
1391 
1392 		if (n != map_bankwidth(map)) {
1393 			/* partial write of a word, load old contents */
1394 			otp_enter(map, chip, bus_ofs, map_bankwidth(map));
1395 			datum = map_read(map, bus_ofs);
1396 			otp_exit(map, chip, bus_ofs, map_bankwidth(map));
1397 		}
1398 
1399 		datum = map_word_load_partial(map, datum, buf, gap, n);
1400 		ret = do_write_oneword(map, chip, bus_ofs, datum, FL_OTP_WRITE);
1401 		if (ret)
1402 			return ret;
1403 
1404 		adr += n;
1405 		buf += n;
1406 		len -= n;
1407 	}
1408 
1409 	return 0;
1410 }
1411 
do_otp_lock(struct map_info * map,struct flchip * chip,loff_t adr,size_t len,u_char * buf,size_t grouplen)1412 static int do_otp_lock(struct map_info *map, struct flchip *chip, loff_t adr,
1413 		       size_t len, u_char *buf, size_t grouplen)
1414 {
1415 	struct cfi_private *cfi = map->fldrv_priv;
1416 	uint8_t lockreg;
1417 	unsigned long timeo;
1418 	int ret;
1419 
1420 	/* make sure area matches group boundaries */
1421 	if ((adr != 0) || (len != grouplen))
1422 		return -EINVAL;
1423 
1424 	mutex_lock(&chip->mutex);
1425 	ret = get_chip(map, chip, chip->start, FL_LOCKING);
1426 	if (ret) {
1427 		mutex_unlock(&chip->mutex);
1428 		return ret;
1429 	}
1430 	chip->state = FL_LOCKING;
1431 
1432 	/* Enter lock register command */
1433 	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
1434 			 cfi->device_type, NULL);
1435 	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
1436 			 cfi->device_type, NULL);
1437 	cfi_send_gen_cmd(0x40, cfi->addr_unlock1, chip->start, map, cfi,
1438 			 cfi->device_type, NULL);
1439 
1440 	/* read lock register */
1441 	lockreg = cfi_read_query(map, 0);
1442 
1443 	/* set bit 0 to protect extended memory block */
1444 	lockreg &= ~0x01;
1445 
1446 	/* set bit 0 to protect extended memory block */
1447 	/* write lock register */
1448 	map_write(map, CMD(0xA0), chip->start);
1449 	map_write(map, CMD(lockreg), chip->start);
1450 
1451 	/* wait for chip to become ready */
1452 	timeo = jiffies + msecs_to_jiffies(2);
1453 	for (;;) {
1454 		if (chip_ready(map, chip, adr, NULL))
1455 			break;
1456 
1457 		if (time_after(jiffies, timeo)) {
1458 			pr_err("Waiting for chip to be ready timed out.\n");
1459 			ret = -EIO;
1460 			break;
1461 		}
1462 		UDELAY(map, chip, 0, 1);
1463 	}
1464 
1465 	/* exit protection commands */
1466 	map_write(map, CMD(0x90), chip->start);
1467 	map_write(map, CMD(0x00), chip->start);
1468 
1469 	chip->state = FL_READY;
1470 	put_chip(map, chip, chip->start);
1471 	mutex_unlock(&chip->mutex);
1472 
1473 	return ret;
1474 }
1475 
cfi_amdstd_otp_walk(struct mtd_info * mtd,loff_t from,size_t len,size_t * retlen,u_char * buf,otp_op_t action,int user_regs)1476 static int cfi_amdstd_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
1477 			       size_t *retlen, u_char *buf,
1478 			       otp_op_t action, int user_regs)
1479 {
1480 	struct map_info *map = mtd->priv;
1481 	struct cfi_private *cfi = map->fldrv_priv;
1482 	int ofs_factor = cfi->interleave * cfi->device_type;
1483 	unsigned long base;
1484 	int chipnum;
1485 	struct flchip *chip;
1486 	uint8_t otp, lockreg;
1487 	int ret;
1488 
1489 	size_t user_size, factory_size, otpsize;
1490 	loff_t user_offset, factory_offset, otpoffset;
1491 	int user_locked = 0, otplocked;
1492 
1493 	*retlen = 0;
1494 
1495 	for (chipnum = 0; chipnum < cfi->numchips; chipnum++) {
1496 		chip = &cfi->chips[chipnum];
1497 		factory_size = 0;
1498 		user_size = 0;
1499 
1500 		/* Micron M29EW family */
1501 		if (is_m29ew(cfi)) {
1502 			base = chip->start;
1503 
1504 			/* check whether secsi area is factory locked
1505 			   or user lockable */
1506 			mutex_lock(&chip->mutex);
1507 			ret = get_chip(map, chip, base, FL_CFI_QUERY);
1508 			if (ret) {
1509 				mutex_unlock(&chip->mutex);
1510 				return ret;
1511 			}
1512 			cfi_qry_mode_on(base, map, cfi);
1513 			otp = cfi_read_query(map, base + 0x3 * ofs_factor);
1514 			cfi_qry_mode_off(base, map, cfi);
1515 			put_chip(map, chip, base);
1516 			mutex_unlock(&chip->mutex);
1517 
1518 			if (otp & 0x80) {
1519 				/* factory locked */
1520 				factory_offset = 0;
1521 				factory_size = 0x100;
1522 			} else {
1523 				/* customer lockable */
1524 				user_offset = 0;
1525 				user_size = 0x100;
1526 
1527 				mutex_lock(&chip->mutex);
1528 				ret = get_chip(map, chip, base, FL_LOCKING);
1529 				if (ret) {
1530 					mutex_unlock(&chip->mutex);
1531 					return ret;
1532 				}
1533 
1534 				/* Enter lock register command */
1535 				cfi_send_gen_cmd(0xAA, cfi->addr_unlock1,
1536 						 chip->start, map, cfi,
1537 						 cfi->device_type, NULL);
1538 				cfi_send_gen_cmd(0x55, cfi->addr_unlock2,
1539 						 chip->start, map, cfi,
1540 						 cfi->device_type, NULL);
1541 				cfi_send_gen_cmd(0x40, cfi->addr_unlock1,
1542 						 chip->start, map, cfi,
1543 						 cfi->device_type, NULL);
1544 				/* read lock register */
1545 				lockreg = cfi_read_query(map, 0);
1546 				/* exit protection commands */
1547 				map_write(map, CMD(0x90), chip->start);
1548 				map_write(map, CMD(0x00), chip->start);
1549 				put_chip(map, chip, chip->start);
1550 				mutex_unlock(&chip->mutex);
1551 
1552 				user_locked = ((lockreg & 0x01) == 0x00);
1553 			}
1554 		}
1555 
1556 		otpsize = user_regs ? user_size : factory_size;
1557 		if (!otpsize)
1558 			continue;
1559 		otpoffset = user_regs ? user_offset : factory_offset;
1560 		otplocked = user_regs ? user_locked : 1;
1561 
1562 		if (!action) {
1563 			/* return otpinfo */
1564 			struct otp_info *otpinfo;
1565 			len -= sizeof(*otpinfo);
1566 			if (len <= 0)
1567 				return -ENOSPC;
1568 			otpinfo = (struct otp_info *)buf;
1569 			otpinfo->start = from;
1570 			otpinfo->length = otpsize;
1571 			otpinfo->locked = otplocked;
1572 			buf += sizeof(*otpinfo);
1573 			*retlen += sizeof(*otpinfo);
1574 			from += otpsize;
1575 		} else if ((from < otpsize) && (len > 0)) {
1576 			size_t size;
1577 			size = (len < otpsize - from) ? len : otpsize - from;
1578 			ret = action(map, chip, otpoffset + from, size, buf,
1579 				     otpsize);
1580 			if (ret < 0)
1581 				return ret;
1582 
1583 			buf += size;
1584 			len -= size;
1585 			*retlen += size;
1586 			from = 0;
1587 		} else {
1588 			from -= otpsize;
1589 		}
1590 	}
1591 	return 0;
1592 }
1593 
cfi_amdstd_get_fact_prot_info(struct mtd_info * mtd,size_t len,size_t * retlen,struct otp_info * buf)1594 static int cfi_amdstd_get_fact_prot_info(struct mtd_info *mtd, size_t len,
1595 					 size_t *retlen, struct otp_info *buf)
1596 {
1597 	return cfi_amdstd_otp_walk(mtd, 0, len, retlen, (u_char *)buf,
1598 				   NULL, 0);
1599 }
1600 
cfi_amdstd_get_user_prot_info(struct mtd_info * mtd,size_t len,size_t * retlen,struct otp_info * buf)1601 static int cfi_amdstd_get_user_prot_info(struct mtd_info *mtd, size_t len,
1602 					 size_t *retlen, struct otp_info *buf)
1603 {
1604 	return cfi_amdstd_otp_walk(mtd, 0, len, retlen, (u_char *)buf,
1605 				   NULL, 1);
1606 }
1607 
cfi_amdstd_read_fact_prot_reg(struct mtd_info * mtd,loff_t from,size_t len,size_t * retlen,u_char * buf)1608 static int cfi_amdstd_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
1609 					 size_t len, size_t *retlen,
1610 					 u_char *buf)
1611 {
1612 	return cfi_amdstd_otp_walk(mtd, from, len, retlen,
1613 				   buf, do_read_secsi_onechip, 0);
1614 }
1615 
cfi_amdstd_read_user_prot_reg(struct mtd_info * mtd,loff_t from,size_t len,size_t * retlen,u_char * buf)1616 static int cfi_amdstd_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
1617 					 size_t len, size_t *retlen,
1618 					 u_char *buf)
1619 {
1620 	return cfi_amdstd_otp_walk(mtd, from, len, retlen,
1621 				   buf, do_read_secsi_onechip, 1);
1622 }
1623 
cfi_amdstd_write_user_prot_reg(struct mtd_info * mtd,loff_t from,size_t len,size_t * retlen,u_char * buf)1624 static int cfi_amdstd_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
1625 					  size_t len, size_t *retlen,
1626 					  u_char *buf)
1627 {
1628 	return cfi_amdstd_otp_walk(mtd, from, len, retlen, buf,
1629 				   do_otp_write, 1);
1630 }
1631 
cfi_amdstd_lock_user_prot_reg(struct mtd_info * mtd,loff_t from,size_t len)1632 static int cfi_amdstd_lock_user_prot_reg(struct mtd_info *mtd, loff_t from,
1633 					 size_t len)
1634 {
1635 	size_t retlen;
1636 	return cfi_amdstd_otp_walk(mtd, from, len, &retlen, NULL,
1637 				   do_otp_lock, 1);
1638 }
1639 
do_write_oneword_once(struct map_info * map,struct flchip * chip,unsigned long adr,map_word datum,int mode,struct cfi_private * cfi)1640 static int __xipram do_write_oneword_once(struct map_info *map,
1641 					  struct flchip *chip,
1642 					  unsigned long adr, map_word datum,
1643 					  int mode, struct cfi_private *cfi)
1644 {
1645 	unsigned long timeo = jiffies + HZ;
1646 	/*
1647 	 * We use a 1ms + 1 jiffies generic timeout for writes (most devices
1648 	 * have a max write time of a few hundreds usec). However, we should
1649 	 * use the maximum timeout value given by the chip at probe time
1650 	 * instead.  Unfortunately, struct flchip does have a field for
1651 	 * maximum timeout, only for typical which can be far too short
1652 	 * depending of the conditions.	 The ' + 1' is to avoid having a
1653 	 * timeout of 0 jiffies if HZ is smaller than 1000.
1654 	 */
1655 	unsigned long uWriteTimeout = (HZ / 1000) + 1;
1656 	int ret = 0;
1657 
1658 	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1659 	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1660 	cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1661 	map_write(map, datum, adr);
1662 	chip->state = mode;
1663 
1664 	INVALIDATE_CACHE_UDELAY(map, chip,
1665 				adr, map_bankwidth(map),
1666 				chip->word_write_time);
1667 
1668 	/* See comment above for timeout value. */
1669 	timeo = jiffies + uWriteTimeout;
1670 	for (;;) {
1671 		if (chip->state != mode) {
1672 			/* Someone's suspended the write. Sleep */
1673 			DECLARE_WAITQUEUE(wait, current);
1674 
1675 			set_current_state(TASK_UNINTERRUPTIBLE);
1676 			add_wait_queue(&chip->wq, &wait);
1677 			mutex_unlock(&chip->mutex);
1678 			schedule();
1679 			remove_wait_queue(&chip->wq, &wait);
1680 			timeo = jiffies + (HZ / 2); /* FIXME */
1681 			mutex_lock(&chip->mutex);
1682 			continue;
1683 		}
1684 
1685 		/*
1686 		 * We check "time_after" and "!chip_good" before checking
1687 		 * "chip_good" to avoid the failure due to scheduling.
1688 		 */
1689 		if (time_after(jiffies, timeo) &&
1690 		    !chip_good(map, chip, adr, &datum)) {
1691 			xip_enable(map, chip, adr);
1692 			printk(KERN_WARNING "MTD %s(): software timeout\n", __func__);
1693 			xip_disable(map, chip, adr);
1694 			ret = -EIO;
1695 			break;
1696 		}
1697 
1698 		if (chip_good(map, chip, adr, &datum)) {
1699 			if (cfi_check_err_status(map, chip, adr))
1700 				ret = -EIO;
1701 			break;
1702 		}
1703 
1704 		/* Latency issues. Drop the lock, wait a while and retry */
1705 		UDELAY(map, chip, adr, 1);
1706 	}
1707 
1708 	return ret;
1709 }
1710 
do_write_oneword_start(struct map_info * map,struct flchip * chip,unsigned long adr,int mode)1711 static int __xipram do_write_oneword_start(struct map_info *map,
1712 					   struct flchip *chip,
1713 					   unsigned long adr, int mode)
1714 {
1715 	int ret;
1716 
1717 	mutex_lock(&chip->mutex);
1718 
1719 	ret = get_chip(map, chip, adr, mode);
1720 	if (ret) {
1721 		mutex_unlock(&chip->mutex);
1722 		return ret;
1723 	}
1724 
1725 	if (mode == FL_OTP_WRITE)
1726 		otp_enter(map, chip, adr, map_bankwidth(map));
1727 
1728 	return ret;
1729 }
1730 
do_write_oneword_done(struct map_info * map,struct flchip * chip,unsigned long adr,int mode)1731 static void __xipram do_write_oneword_done(struct map_info *map,
1732 					   struct flchip *chip,
1733 					   unsigned long adr, int mode)
1734 {
1735 	if (mode == FL_OTP_WRITE)
1736 		otp_exit(map, chip, adr, map_bankwidth(map));
1737 
1738 	chip->state = FL_READY;
1739 	DISABLE_VPP(map);
1740 	put_chip(map, chip, adr);
1741 
1742 	mutex_unlock(&chip->mutex);
1743 }
1744 
do_write_oneword_retry(struct map_info * map,struct flchip * chip,unsigned long adr,map_word datum,int mode)1745 static int __xipram do_write_oneword_retry(struct map_info *map,
1746 					   struct flchip *chip,
1747 					   unsigned long adr, map_word datum,
1748 					   int mode)
1749 {
1750 	struct cfi_private *cfi = map->fldrv_priv;
1751 	int ret = 0;
1752 	map_word oldd;
1753 	int retry_cnt = 0;
1754 
1755 	/*
1756 	 * Check for a NOP for the case when the datum to write is already
1757 	 * present - it saves time and works around buggy chips that corrupt
1758 	 * data at other locations when 0xff is written to a location that
1759 	 * already contains 0xff.
1760 	 */
1761 	oldd = map_read(map, adr);
1762 	if (map_word_equal(map, oldd, datum)) {
1763 		pr_debug("MTD %s(): NOP\n", __func__);
1764 		return ret;
1765 	}
1766 
1767 	XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1768 	ENABLE_VPP(map);
1769 	xip_disable(map, chip, adr);
1770 
1771  retry:
1772 	ret = do_write_oneword_once(map, chip, adr, datum, mode, cfi);
1773 	if (ret) {
1774 		/* reset on all failures. */
1775 		map_write(map, CMD(0xF0), chip->start);
1776 		/* FIXME - should have reset delay before continuing */
1777 
1778 		if (++retry_cnt <= MAX_RETRIES) {
1779 			ret = 0;
1780 			goto retry;
1781 		}
1782 	}
1783 	xip_enable(map, chip, adr);
1784 
1785 	return ret;
1786 }
1787 
do_write_oneword(struct map_info * map,struct flchip * chip,unsigned long adr,map_word datum,int mode)1788 static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1789 				     unsigned long adr, map_word datum,
1790 				     int mode)
1791 {
1792 	int ret;
1793 
1794 	adr += chip->start;
1795 
1796 	pr_debug("MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n", __func__, adr,
1797 		 datum.x[0]);
1798 
1799 	ret = do_write_oneword_start(map, chip, adr, mode);
1800 	if (ret)
1801 		return ret;
1802 
1803 	ret = do_write_oneword_retry(map, chip, adr, datum, mode);
1804 
1805 	do_write_oneword_done(map, chip, adr, mode);
1806 
1807 	return ret;
1808 }
1809 
1810 
cfi_amdstd_write_words(struct mtd_info * mtd,loff_t to,size_t len,size_t * retlen,const u_char * buf)1811 static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
1812 				  size_t *retlen, const u_char *buf)
1813 {
1814 	struct map_info *map = mtd->priv;
1815 	struct cfi_private *cfi = map->fldrv_priv;
1816 	int ret;
1817 	int chipnum;
1818 	unsigned long ofs, chipstart;
1819 	DECLARE_WAITQUEUE(wait, current);
1820 
1821 	chipnum = to >> cfi->chipshift;
1822 	ofs = to  - (chipnum << cfi->chipshift);
1823 	chipstart = cfi->chips[chipnum].start;
1824 
1825 	/* If it's not bus-aligned, do the first byte write */
1826 	if (ofs & (map_bankwidth(map)-1)) {
1827 		unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1828 		int i = ofs - bus_ofs;
1829 		int n = 0;
1830 		map_word tmp_buf;
1831 
1832  retry:
1833 		mutex_lock(&cfi->chips[chipnum].mutex);
1834 
1835 		if (cfi->chips[chipnum].state != FL_READY) {
1836 			set_current_state(TASK_UNINTERRUPTIBLE);
1837 			add_wait_queue(&cfi->chips[chipnum].wq, &wait);
1838 
1839 			mutex_unlock(&cfi->chips[chipnum].mutex);
1840 
1841 			schedule();
1842 			remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
1843 			goto retry;
1844 		}
1845 
1846 		/* Load 'tmp_buf' with old contents of flash */
1847 		tmp_buf = map_read(map, bus_ofs+chipstart);
1848 
1849 		mutex_unlock(&cfi->chips[chipnum].mutex);
1850 
1851 		/* Number of bytes to copy from buffer */
1852 		n = min_t(int, len, map_bankwidth(map)-i);
1853 
1854 		tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n);
1855 
1856 		ret = do_write_oneword(map, &cfi->chips[chipnum],
1857 				       bus_ofs, tmp_buf, FL_WRITING);
1858 		if (ret)
1859 			return ret;
1860 
1861 		ofs += n;
1862 		buf += n;
1863 		(*retlen) += n;
1864 		len -= n;
1865 
1866 		if (ofs >> cfi->chipshift) {
1867 			chipnum ++;
1868 			ofs = 0;
1869 			if (chipnum == cfi->numchips)
1870 				return 0;
1871 		}
1872 	}
1873 
1874 	/* We are now aligned, write as much as possible */
1875 	while(len >= map_bankwidth(map)) {
1876 		map_word datum;
1877 
1878 		datum = map_word_load(map, buf);
1879 
1880 		ret = do_write_oneword(map, &cfi->chips[chipnum],
1881 				       ofs, datum, FL_WRITING);
1882 		if (ret)
1883 			return ret;
1884 
1885 		ofs += map_bankwidth(map);
1886 		buf += map_bankwidth(map);
1887 		(*retlen) += map_bankwidth(map);
1888 		len -= map_bankwidth(map);
1889 
1890 		if (ofs >> cfi->chipshift) {
1891 			chipnum ++;
1892 			ofs = 0;
1893 			if (chipnum == cfi->numchips)
1894 				return 0;
1895 			chipstart = cfi->chips[chipnum].start;
1896 		}
1897 	}
1898 
1899 	/* Write the trailing bytes if any */
1900 	if (len & (map_bankwidth(map)-1)) {
1901 		map_word tmp_buf;
1902 
1903  retry1:
1904 		mutex_lock(&cfi->chips[chipnum].mutex);
1905 
1906 		if (cfi->chips[chipnum].state != FL_READY) {
1907 			set_current_state(TASK_UNINTERRUPTIBLE);
1908 			add_wait_queue(&cfi->chips[chipnum].wq, &wait);
1909 
1910 			mutex_unlock(&cfi->chips[chipnum].mutex);
1911 
1912 			schedule();
1913 			remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
1914 			goto retry1;
1915 		}
1916 
1917 		tmp_buf = map_read(map, ofs + chipstart);
1918 
1919 		mutex_unlock(&cfi->chips[chipnum].mutex);
1920 
1921 		tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len);
1922 
1923 		ret = do_write_oneword(map, &cfi->chips[chipnum],
1924 				       ofs, tmp_buf, FL_WRITING);
1925 		if (ret)
1926 			return ret;
1927 
1928 		(*retlen) += len;
1929 	}
1930 
1931 	return 0;
1932 }
1933 
1934 #if !FORCE_WORD_WRITE
do_write_buffer_wait(struct map_info * map,struct flchip * chip,unsigned long adr,map_word datum)1935 static int __xipram do_write_buffer_wait(struct map_info *map,
1936 					 struct flchip *chip, unsigned long adr,
1937 					 map_word datum)
1938 {
1939 	unsigned long timeo;
1940 	unsigned long u_write_timeout;
1941 	int ret = 0;
1942 
1943 	/*
1944 	 * Timeout is calculated according to CFI data, if available.
1945 	 * See more comments in cfi_cmdset_0002().
1946 	 */
1947 	u_write_timeout = usecs_to_jiffies(chip->buffer_write_time_max);
1948 	timeo = jiffies + u_write_timeout;
1949 
1950 	for (;;) {
1951 		if (chip->state != FL_WRITING) {
1952 			/* Someone's suspended the write. Sleep */
1953 			DECLARE_WAITQUEUE(wait, current);
1954 
1955 			set_current_state(TASK_UNINTERRUPTIBLE);
1956 			add_wait_queue(&chip->wq, &wait);
1957 			mutex_unlock(&chip->mutex);
1958 			schedule();
1959 			remove_wait_queue(&chip->wq, &wait);
1960 			timeo = jiffies + (HZ / 2); /* FIXME */
1961 			mutex_lock(&chip->mutex);
1962 			continue;
1963 		}
1964 
1965 		/*
1966 		 * We check "time_after" and "!chip_good" before checking
1967 		 * "chip_good" to avoid the failure due to scheduling.
1968 		 */
1969 		if (time_after(jiffies, timeo) &&
1970 		    !chip_good(map, chip, adr, &datum)) {
1971 			pr_err("MTD %s(): software timeout, address:0x%.8lx.\n",
1972 			       __func__, adr);
1973 			ret = -EIO;
1974 			break;
1975 		}
1976 
1977 		if (chip_good(map, chip, adr, &datum)) {
1978 			if (cfi_check_err_status(map, chip, adr))
1979 				ret = -EIO;
1980 			break;
1981 		}
1982 
1983 		/* Latency issues. Drop the lock, wait a while and retry */
1984 		UDELAY(map, chip, adr, 1);
1985 	}
1986 
1987 	return ret;
1988 }
1989 
do_write_buffer_reset(struct map_info * map,struct flchip * chip,struct cfi_private * cfi)1990 static void __xipram do_write_buffer_reset(struct map_info *map,
1991 					   struct flchip *chip,
1992 					   struct cfi_private *cfi)
1993 {
1994 	/*
1995 	 * Recovery from write-buffer programming failures requires
1996 	 * the write-to-buffer-reset sequence.  Since the last part
1997 	 * of the sequence also works as a normal reset, we can run
1998 	 * the same commands regardless of why we are here.
1999 	 * See e.g.
2000 	 * http://www.spansion.com/Support/Application%20Notes/MirrorBit_Write_Buffer_Prog_Page_Buffer_Read_AN.pdf
2001 	 */
2002 	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
2003 			 cfi->device_type, NULL);
2004 	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
2005 			 cfi->device_type, NULL);
2006 	cfi_send_gen_cmd(0xF0, cfi->addr_unlock1, chip->start, map, cfi,
2007 			 cfi->device_type, NULL);
2008 
2009 	/* FIXME - should have reset delay before continuing */
2010 }
2011 
2012 /*
2013  * FIXME: interleaved mode not tested, and probably not supported!
2014  */
do_write_buffer(struct map_info * map,struct flchip * chip,unsigned long adr,const u_char * buf,int len)2015 static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
2016 				    unsigned long adr, const u_char *buf,
2017 				    int len)
2018 {
2019 	struct cfi_private *cfi = map->fldrv_priv;
2020 	int ret;
2021 	unsigned long cmd_adr;
2022 	int z, words;
2023 	map_word datum;
2024 
2025 	adr += chip->start;
2026 	cmd_adr = adr;
2027 
2028 	mutex_lock(&chip->mutex);
2029 	ret = get_chip(map, chip, adr, FL_WRITING);
2030 	if (ret) {
2031 		mutex_unlock(&chip->mutex);
2032 		return ret;
2033 	}
2034 
2035 	datum = map_word_load(map, buf);
2036 
2037 	pr_debug("MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
2038 		 __func__, adr, datum.x[0]);
2039 
2040 	XIP_INVAL_CACHED_RANGE(map, adr, len);
2041 	ENABLE_VPP(map);
2042 	xip_disable(map, chip, cmd_adr);
2043 
2044 	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2045 	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
2046 
2047 	/* Write Buffer Load */
2048 	map_write(map, CMD(0x25), cmd_adr);
2049 
2050 	chip->state = FL_WRITING_TO_BUFFER;
2051 
2052 	/* Write length of data to come */
2053 	words = len / map_bankwidth(map);
2054 	map_write(map, CMD(words - 1), cmd_adr);
2055 	/* Write data */
2056 	z = 0;
2057 	while(z < words * map_bankwidth(map)) {
2058 		datum = map_word_load(map, buf);
2059 		map_write(map, datum, adr + z);
2060 
2061 		z += map_bankwidth(map);
2062 		buf += map_bankwidth(map);
2063 	}
2064 	z -= map_bankwidth(map);
2065 
2066 	adr += z;
2067 
2068 	/* Write Buffer Program Confirm: GO GO GO */
2069 	map_write(map, CMD(0x29), cmd_adr);
2070 	chip->state = FL_WRITING;
2071 
2072 	INVALIDATE_CACHE_UDELAY(map, chip,
2073 				adr, map_bankwidth(map),
2074 				chip->word_write_time);
2075 
2076 	ret = do_write_buffer_wait(map, chip, adr, datum);
2077 	if (ret)
2078 		do_write_buffer_reset(map, chip, cfi);
2079 
2080 	xip_enable(map, chip, adr);
2081 
2082 	chip->state = FL_READY;
2083 	DISABLE_VPP(map);
2084 	put_chip(map, chip, adr);
2085 	mutex_unlock(&chip->mutex);
2086 
2087 	return ret;
2088 }
2089 
2090 
cfi_amdstd_write_buffers(struct mtd_info * mtd,loff_t to,size_t len,size_t * retlen,const u_char * buf)2091 static int cfi_amdstd_write_buffers(struct mtd_info *mtd, loff_t to, size_t len,
2092 				    size_t *retlen, const u_char *buf)
2093 {
2094 	struct map_info *map = mtd->priv;
2095 	struct cfi_private *cfi = map->fldrv_priv;
2096 	int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
2097 	int ret;
2098 	int chipnum;
2099 	unsigned long ofs;
2100 
2101 	chipnum = to >> cfi->chipshift;
2102 	ofs = to  - (chipnum << cfi->chipshift);
2103 
2104 	/* If it's not bus-aligned, do the first word write */
2105 	if (ofs & (map_bankwidth(map)-1)) {
2106 		size_t local_len = (-ofs)&(map_bankwidth(map)-1);
2107 		if (local_len > len)
2108 			local_len = len;
2109 		ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift),
2110 					     local_len, retlen, buf);
2111 		if (ret)
2112 			return ret;
2113 		ofs += local_len;
2114 		buf += local_len;
2115 		len -= local_len;
2116 
2117 		if (ofs >> cfi->chipshift) {
2118 			chipnum ++;
2119 			ofs = 0;
2120 			if (chipnum == cfi->numchips)
2121 				return 0;
2122 		}
2123 	}
2124 
2125 	/* Write buffer is worth it only if more than one word to write... */
2126 	while (len >= map_bankwidth(map) * 2) {
2127 		/* We must not cross write block boundaries */
2128 		int size = wbufsize - (ofs & (wbufsize-1));
2129 
2130 		if (size > len)
2131 			size = len;
2132 		if (size % map_bankwidth(map))
2133 			size -= size % map_bankwidth(map);
2134 
2135 		ret = do_write_buffer(map, &cfi->chips[chipnum],
2136 				      ofs, buf, size);
2137 		if (ret)
2138 			return ret;
2139 
2140 		ofs += size;
2141 		buf += size;
2142 		(*retlen) += size;
2143 		len -= size;
2144 
2145 		if (ofs >> cfi->chipshift) {
2146 			chipnum ++;
2147 			ofs = 0;
2148 			if (chipnum == cfi->numchips)
2149 				return 0;
2150 		}
2151 	}
2152 
2153 	if (len) {
2154 		size_t retlen_dregs = 0;
2155 
2156 		ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift),
2157 					     len, &retlen_dregs, buf);
2158 
2159 		*retlen += retlen_dregs;
2160 		return ret;
2161 	}
2162 
2163 	return 0;
2164 }
2165 #endif /* !FORCE_WORD_WRITE */
2166 
2167 /*
2168  * Wait for the flash chip to become ready to write data
2169  *
2170  * This is only called during the panic_write() path. When panic_write()
2171  * is called, the kernel is in the process of a panic, and will soon be
2172  * dead. Therefore we don't take any locks, and attempt to get access
2173  * to the chip as soon as possible.
2174  */
cfi_amdstd_panic_wait(struct map_info * map,struct flchip * chip,unsigned long adr)2175 static int cfi_amdstd_panic_wait(struct map_info *map, struct flchip *chip,
2176 				 unsigned long adr)
2177 {
2178 	struct cfi_private *cfi = map->fldrv_priv;
2179 	int retries = 10;
2180 	int i;
2181 
2182 	/*
2183 	 * If the driver thinks the chip is idle, and no toggle bits
2184 	 * are changing, then the chip is actually idle for sure.
2185 	 */
2186 	if (chip->state == FL_READY && chip_ready(map, chip, adr, NULL))
2187 		return 0;
2188 
2189 	/*
2190 	 * Try several times to reset the chip and then wait for it
2191 	 * to become idle. The upper limit of a few milliseconds of
2192 	 * delay isn't a big problem: the kernel is dying anyway. It
2193 	 * is more important to save the messages.
2194 	 */
2195 	while (retries > 0) {
2196 		const unsigned long timeo = (HZ / 1000) + 1;
2197 
2198 		/* send the reset command */
2199 		map_write(map, CMD(0xF0), chip->start);
2200 
2201 		/* wait for the chip to become ready */
2202 		for (i = 0; i < jiffies_to_usecs(timeo); i++) {
2203 			if (chip_ready(map, chip, adr, NULL))
2204 				return 0;
2205 
2206 			udelay(1);
2207 		}
2208 
2209 		retries--;
2210 	}
2211 
2212 	/* the chip never became ready */
2213 	return -EBUSY;
2214 }
2215 
2216 /*
2217  * Write out one word of data to a single flash chip during a kernel panic
2218  *
2219  * This is only called during the panic_write() path. When panic_write()
2220  * is called, the kernel is in the process of a panic, and will soon be
2221  * dead. Therefore we don't take any locks, and attempt to get access
2222  * to the chip as soon as possible.
2223  *
2224  * The implementation of this routine is intentionally similar to
2225  * do_write_oneword(), in order to ease code maintenance.
2226  */
do_panic_write_oneword(struct map_info * map,struct flchip * chip,unsigned long adr,map_word datum)2227 static int do_panic_write_oneword(struct map_info *map, struct flchip *chip,
2228 				  unsigned long adr, map_word datum)
2229 {
2230 	const unsigned long uWriteTimeout = (HZ / 1000) + 1;
2231 	struct cfi_private *cfi = map->fldrv_priv;
2232 	int retry_cnt = 0;
2233 	map_word oldd;
2234 	int ret;
2235 	int i;
2236 
2237 	adr += chip->start;
2238 
2239 	ret = cfi_amdstd_panic_wait(map, chip, adr);
2240 	if (ret)
2241 		return ret;
2242 
2243 	pr_debug("MTD %s(): PANIC WRITE 0x%.8lx(0x%.8lx)\n",
2244 			__func__, adr, datum.x[0]);
2245 
2246 	/*
2247 	 * Check for a NOP for the case when the datum to write is already
2248 	 * present - it saves time and works around buggy chips that corrupt
2249 	 * data at other locations when 0xff is written to a location that
2250 	 * already contains 0xff.
2251 	 */
2252 	oldd = map_read(map, adr);
2253 	if (map_word_equal(map, oldd, datum)) {
2254 		pr_debug("MTD %s(): NOP\n", __func__);
2255 		goto op_done;
2256 	}
2257 
2258 	ENABLE_VPP(map);
2259 
2260 retry:
2261 	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2262 	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
2263 	cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2264 	map_write(map, datum, adr);
2265 
2266 	for (i = 0; i < jiffies_to_usecs(uWriteTimeout); i++) {
2267 		if (chip_ready(map, chip, adr, NULL))
2268 			break;
2269 
2270 		udelay(1);
2271 	}
2272 
2273 	if (!chip_ready(map, chip, adr, &datum) ||
2274 	    cfi_check_err_status(map, chip, adr)) {
2275 		/* reset on all failures. */
2276 		map_write(map, CMD(0xF0), chip->start);
2277 		/* FIXME - should have reset delay before continuing */
2278 
2279 		if (++retry_cnt <= MAX_RETRIES)
2280 			goto retry;
2281 
2282 		ret = -EIO;
2283 	}
2284 
2285 op_done:
2286 	DISABLE_VPP(map);
2287 	return ret;
2288 }
2289 
2290 /*
2291  * Write out some data during a kernel panic
2292  *
2293  * This is used by the mtdoops driver to save the dying messages from a
2294  * kernel which has panic'd.
2295  *
2296  * This routine ignores all of the locking used throughout the rest of the
2297  * driver, in order to ensure that the data gets written out no matter what
2298  * state this driver (and the flash chip itself) was in when the kernel crashed.
2299  *
2300  * The implementation of this routine is intentionally similar to
2301  * cfi_amdstd_write_words(), in order to ease code maintenance.
2302  */
cfi_amdstd_panic_write(struct mtd_info * mtd,loff_t to,size_t len,size_t * retlen,const u_char * buf)2303 static int cfi_amdstd_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
2304 				  size_t *retlen, const u_char *buf)
2305 {
2306 	struct map_info *map = mtd->priv;
2307 	struct cfi_private *cfi = map->fldrv_priv;
2308 	unsigned long ofs, chipstart;
2309 	int ret;
2310 	int chipnum;
2311 
2312 	chipnum = to >> cfi->chipshift;
2313 	ofs = to - (chipnum << cfi->chipshift);
2314 	chipstart = cfi->chips[chipnum].start;
2315 
2316 	/* If it's not bus aligned, do the first byte write */
2317 	if (ofs & (map_bankwidth(map) - 1)) {
2318 		unsigned long bus_ofs = ofs & ~(map_bankwidth(map) - 1);
2319 		int i = ofs - bus_ofs;
2320 		int n = 0;
2321 		map_word tmp_buf;
2322 
2323 		ret = cfi_amdstd_panic_wait(map, &cfi->chips[chipnum], bus_ofs);
2324 		if (ret)
2325 			return ret;
2326 
2327 		/* Load 'tmp_buf' with old contents of flash */
2328 		tmp_buf = map_read(map, bus_ofs + chipstart);
2329 
2330 		/* Number of bytes to copy from buffer */
2331 		n = min_t(int, len, map_bankwidth(map) - i);
2332 
2333 		tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n);
2334 
2335 		ret = do_panic_write_oneword(map, &cfi->chips[chipnum],
2336 					     bus_ofs, tmp_buf);
2337 		if (ret)
2338 			return ret;
2339 
2340 		ofs += n;
2341 		buf += n;
2342 		(*retlen) += n;
2343 		len -= n;
2344 
2345 		if (ofs >> cfi->chipshift) {
2346 			chipnum++;
2347 			ofs = 0;
2348 			if (chipnum == cfi->numchips)
2349 				return 0;
2350 		}
2351 	}
2352 
2353 	/* We are now aligned, write as much as possible */
2354 	while (len >= map_bankwidth(map)) {
2355 		map_word datum;
2356 
2357 		datum = map_word_load(map, buf);
2358 
2359 		ret = do_panic_write_oneword(map, &cfi->chips[chipnum],
2360 					     ofs, datum);
2361 		if (ret)
2362 			return ret;
2363 
2364 		ofs += map_bankwidth(map);
2365 		buf += map_bankwidth(map);
2366 		(*retlen) += map_bankwidth(map);
2367 		len -= map_bankwidth(map);
2368 
2369 		if (ofs >> cfi->chipshift) {
2370 			chipnum++;
2371 			ofs = 0;
2372 			if (chipnum == cfi->numchips)
2373 				return 0;
2374 
2375 			chipstart = cfi->chips[chipnum].start;
2376 		}
2377 	}
2378 
2379 	/* Write the trailing bytes if any */
2380 	if (len & (map_bankwidth(map) - 1)) {
2381 		map_word tmp_buf;
2382 
2383 		ret = cfi_amdstd_panic_wait(map, &cfi->chips[chipnum], ofs);
2384 		if (ret)
2385 			return ret;
2386 
2387 		tmp_buf = map_read(map, ofs + chipstart);
2388 
2389 		tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len);
2390 
2391 		ret = do_panic_write_oneword(map, &cfi->chips[chipnum],
2392 					     ofs, tmp_buf);
2393 		if (ret)
2394 			return ret;
2395 
2396 		(*retlen) += len;
2397 	}
2398 
2399 	return 0;
2400 }
2401 
2402 
2403 /*
2404  * Handle devices with one erase region, that only implement
2405  * the chip erase command.
2406  */
do_erase_chip(struct map_info * map,struct flchip * chip)2407 static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
2408 {
2409 	struct cfi_private *cfi = map->fldrv_priv;
2410 	unsigned long timeo = jiffies + HZ;
2411 	unsigned long int adr;
2412 	DECLARE_WAITQUEUE(wait, current);
2413 	int ret;
2414 	int retry_cnt = 0;
2415 	map_word datum = map_word_ff(map);
2416 
2417 	adr = cfi->addr_unlock1;
2418 
2419 	mutex_lock(&chip->mutex);
2420 	ret = get_chip(map, chip, adr, FL_ERASING);
2421 	if (ret) {
2422 		mutex_unlock(&chip->mutex);
2423 		return ret;
2424 	}
2425 
2426 	pr_debug("MTD %s(): ERASE 0x%.8lx\n",
2427 	       __func__, chip->start);
2428 
2429 	XIP_INVAL_CACHED_RANGE(map, adr, map->size);
2430 	ENABLE_VPP(map);
2431 	xip_disable(map, chip, adr);
2432 
2433  retry:
2434 	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2435 	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
2436 	cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2437 	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2438 	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
2439 	cfi_send_gen_cmd(0x10, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2440 
2441 	chip->state = FL_ERASING;
2442 	chip->erase_suspended = 0;
2443 	chip->in_progress_block_addr = adr;
2444 	chip->in_progress_block_mask = ~(map->size - 1);
2445 
2446 	INVALIDATE_CACHE_UDELAY(map, chip,
2447 				adr, map->size,
2448 				chip->erase_time*500);
2449 
2450 	timeo = jiffies + (HZ*20);
2451 
2452 	for (;;) {
2453 		if (chip->state != FL_ERASING) {
2454 			/* Someone's suspended the erase. Sleep */
2455 			set_current_state(TASK_UNINTERRUPTIBLE);
2456 			add_wait_queue(&chip->wq, &wait);
2457 			mutex_unlock(&chip->mutex);
2458 			schedule();
2459 			remove_wait_queue(&chip->wq, &wait);
2460 			mutex_lock(&chip->mutex);
2461 			continue;
2462 		}
2463 		if (chip->erase_suspended) {
2464 			/* This erase was suspended and resumed.
2465 			   Adjust the timeout */
2466 			timeo = jiffies + (HZ*20); /* FIXME */
2467 			chip->erase_suspended = 0;
2468 		}
2469 
2470 		if (chip_ready(map, chip, adr, &datum)) {
2471 			if (cfi_check_err_status(map, chip, adr))
2472 				ret = -EIO;
2473 			break;
2474 		}
2475 
2476 		if (time_after(jiffies, timeo)) {
2477 			printk(KERN_WARNING "MTD %s(): software timeout\n",
2478 			       __func__);
2479 			ret = -EIO;
2480 			break;
2481 		}
2482 
2483 		/* Latency issues. Drop the lock, wait a while and retry */
2484 		UDELAY(map, chip, adr, 1000000/HZ);
2485 	}
2486 	/* Did we succeed? */
2487 	if (ret) {
2488 		/* reset on all failures. */
2489 		map_write(map, CMD(0xF0), chip->start);
2490 		/* FIXME - should have reset delay before continuing */
2491 
2492 		if (++retry_cnt <= MAX_RETRIES) {
2493 			ret = 0;
2494 			goto retry;
2495 		}
2496 	}
2497 
2498 	chip->state = FL_READY;
2499 	xip_enable(map, chip, adr);
2500 	DISABLE_VPP(map);
2501 	put_chip(map, chip, adr);
2502 	mutex_unlock(&chip->mutex);
2503 
2504 	return ret;
2505 }
2506 
2507 
do_erase_oneblock(struct map_info * map,struct flchip * chip,unsigned long adr,int len,void * thunk)2508 static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr, int len, void *thunk)
2509 {
2510 	struct cfi_private *cfi = map->fldrv_priv;
2511 	unsigned long timeo = jiffies + HZ;
2512 	DECLARE_WAITQUEUE(wait, current);
2513 	int ret;
2514 	int retry_cnt = 0;
2515 	map_word datum = map_word_ff(map);
2516 
2517 	adr += chip->start;
2518 
2519 	mutex_lock(&chip->mutex);
2520 	ret = get_chip(map, chip, adr, FL_ERASING);
2521 	if (ret) {
2522 		mutex_unlock(&chip->mutex);
2523 		return ret;
2524 	}
2525 
2526 	pr_debug("MTD %s(): ERASE 0x%.8lx\n",
2527 		 __func__, adr);
2528 
2529 	XIP_INVAL_CACHED_RANGE(map, adr, len);
2530 	ENABLE_VPP(map);
2531 	xip_disable(map, chip, adr);
2532 
2533  retry:
2534 	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2535 	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
2536 	cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2537 	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2538 	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
2539 	map_write(map, cfi->sector_erase_cmd, adr);
2540 
2541 	chip->state = FL_ERASING;
2542 	chip->erase_suspended = 0;
2543 	chip->in_progress_block_addr = adr;
2544 	chip->in_progress_block_mask = ~(len - 1);
2545 
2546 	INVALIDATE_CACHE_UDELAY(map, chip,
2547 				adr, len,
2548 				chip->erase_time*500);
2549 
2550 	timeo = jiffies + (HZ*20);
2551 
2552 	for (;;) {
2553 		if (chip->state != FL_ERASING) {
2554 			/* Someone's suspended the erase. Sleep */
2555 			set_current_state(TASK_UNINTERRUPTIBLE);
2556 			add_wait_queue(&chip->wq, &wait);
2557 			mutex_unlock(&chip->mutex);
2558 			schedule();
2559 			remove_wait_queue(&chip->wq, &wait);
2560 			mutex_lock(&chip->mutex);
2561 			continue;
2562 		}
2563 		if (chip->erase_suspended) {
2564 			/* This erase was suspended and resumed.
2565 			   Adjust the timeout */
2566 			timeo = jiffies + (HZ*20); /* FIXME */
2567 			chip->erase_suspended = 0;
2568 		}
2569 
2570 		if (chip_ready(map, chip, adr, &datum)) {
2571 			if (cfi_check_err_status(map, chip, adr))
2572 				ret = -EIO;
2573 			break;
2574 		}
2575 
2576 		if (time_after(jiffies, timeo)) {
2577 			printk(KERN_WARNING "MTD %s(): software timeout\n",
2578 			       __func__);
2579 			ret = -EIO;
2580 			break;
2581 		}
2582 
2583 		/* Latency issues. Drop the lock, wait a while and retry */
2584 		UDELAY(map, chip, adr, 1000000/HZ);
2585 	}
2586 	/* Did we succeed? */
2587 	if (ret) {
2588 		/* reset on all failures. */
2589 		map_write(map, CMD(0xF0), chip->start);
2590 		/* FIXME - should have reset delay before continuing */
2591 
2592 		if (++retry_cnt <= MAX_RETRIES) {
2593 			ret = 0;
2594 			goto retry;
2595 		}
2596 	}
2597 
2598 	chip->state = FL_READY;
2599 	xip_enable(map, chip, adr);
2600 	DISABLE_VPP(map);
2601 	put_chip(map, chip, adr);
2602 	mutex_unlock(&chip->mutex);
2603 	return ret;
2604 }
2605 
2606 
cfi_amdstd_erase_varsize(struct mtd_info * mtd,struct erase_info * instr)2607 static int cfi_amdstd_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
2608 {
2609 	return cfi_varsize_frob(mtd, do_erase_oneblock, instr->addr,
2610 				instr->len, NULL);
2611 }
2612 
2613 
cfi_amdstd_erase_chip(struct mtd_info * mtd,struct erase_info * instr)2614 static int cfi_amdstd_erase_chip(struct mtd_info *mtd, struct erase_info *instr)
2615 {
2616 	struct map_info *map = mtd->priv;
2617 	struct cfi_private *cfi = map->fldrv_priv;
2618 
2619 	if (instr->addr != 0)
2620 		return -EINVAL;
2621 
2622 	if (instr->len != mtd->size)
2623 		return -EINVAL;
2624 
2625 	return do_erase_chip(map, &cfi->chips[0]);
2626 }
2627 
do_atmel_lock(struct map_info * map,struct flchip * chip,unsigned long adr,int len,void * thunk)2628 static int do_atmel_lock(struct map_info *map, struct flchip *chip,
2629 			 unsigned long adr, int len, void *thunk)
2630 {
2631 	struct cfi_private *cfi = map->fldrv_priv;
2632 	int ret;
2633 
2634 	mutex_lock(&chip->mutex);
2635 	ret = get_chip(map, chip, adr + chip->start, FL_LOCKING);
2636 	if (ret)
2637 		goto out_unlock;
2638 	chip->state = FL_LOCKING;
2639 
2640 	pr_debug("MTD %s(): LOCK 0x%08lx len %d\n", __func__, adr, len);
2641 
2642 	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
2643 			 cfi->device_type, NULL);
2644 	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
2645 			 cfi->device_type, NULL);
2646 	cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi,
2647 			 cfi->device_type, NULL);
2648 	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
2649 			 cfi->device_type, NULL);
2650 	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
2651 			 cfi->device_type, NULL);
2652 	map_write(map, CMD(0x40), chip->start + adr);
2653 
2654 	chip->state = FL_READY;
2655 	put_chip(map, chip, adr + chip->start);
2656 	ret = 0;
2657 
2658 out_unlock:
2659 	mutex_unlock(&chip->mutex);
2660 	return ret;
2661 }
2662 
do_atmel_unlock(struct map_info * map,struct flchip * chip,unsigned long adr,int len,void * thunk)2663 static int do_atmel_unlock(struct map_info *map, struct flchip *chip,
2664 			   unsigned long adr, int len, void *thunk)
2665 {
2666 	struct cfi_private *cfi = map->fldrv_priv;
2667 	int ret;
2668 
2669 	mutex_lock(&chip->mutex);
2670 	ret = get_chip(map, chip, adr + chip->start, FL_UNLOCKING);
2671 	if (ret)
2672 		goto out_unlock;
2673 	chip->state = FL_UNLOCKING;
2674 
2675 	pr_debug("MTD %s(): LOCK 0x%08lx len %d\n", __func__, adr, len);
2676 
2677 	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
2678 			 cfi->device_type, NULL);
2679 	map_write(map, CMD(0x70), adr);
2680 
2681 	chip->state = FL_READY;
2682 	put_chip(map, chip, adr + chip->start);
2683 	ret = 0;
2684 
2685 out_unlock:
2686 	mutex_unlock(&chip->mutex);
2687 	return ret;
2688 }
2689 
cfi_atmel_lock(struct mtd_info * mtd,loff_t ofs,uint64_t len)2690 static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2691 {
2692 	return cfi_varsize_frob(mtd, do_atmel_lock, ofs, len, NULL);
2693 }
2694 
cfi_atmel_unlock(struct mtd_info * mtd,loff_t ofs,uint64_t len)2695 static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2696 {
2697 	return cfi_varsize_frob(mtd, do_atmel_unlock, ofs, len, NULL);
2698 }
2699 
2700 /*
2701  * Advanced Sector Protection - PPB (Persistent Protection Bit) locking
2702  */
2703 
2704 struct ppb_lock {
2705 	struct flchip *chip;
2706 	unsigned long adr;
2707 	int locked;
2708 };
2709 
2710 #define DO_XXLOCK_ONEBLOCK_LOCK		((void *)1)
2711 #define DO_XXLOCK_ONEBLOCK_UNLOCK	((void *)2)
2712 #define DO_XXLOCK_ONEBLOCK_GETLOCK	((void *)3)
2713 
do_ppb_xxlock(struct map_info * map,struct flchip * chip,unsigned long adr,int len,void * thunk)2714 static int __maybe_unused do_ppb_xxlock(struct map_info *map,
2715 					struct flchip *chip,
2716 					unsigned long adr, int len, void *thunk)
2717 {
2718 	struct cfi_private *cfi = map->fldrv_priv;
2719 	unsigned long timeo;
2720 	int ret;
2721 
2722 	adr += chip->start;
2723 	mutex_lock(&chip->mutex);
2724 	ret = get_chip(map, chip, adr, FL_LOCKING);
2725 	if (ret) {
2726 		mutex_unlock(&chip->mutex);
2727 		return ret;
2728 	}
2729 
2730 	pr_debug("MTD %s(): XXLOCK 0x%08lx len %d\n", __func__, adr, len);
2731 
2732 	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
2733 			 cfi->device_type, NULL);
2734 	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
2735 			 cfi->device_type, NULL);
2736 	/* PPB entry command */
2737 	cfi_send_gen_cmd(0xC0, cfi->addr_unlock1, chip->start, map, cfi,
2738 			 cfi->device_type, NULL);
2739 
2740 	if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
2741 		chip->state = FL_LOCKING;
2742 		map_write(map, CMD(0xA0), adr);
2743 		map_write(map, CMD(0x00), adr);
2744 	} else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
2745 		/*
2746 		 * Unlocking of one specific sector is not supported, so we
2747 		 * have to unlock all sectors of this device instead
2748 		 */
2749 		chip->state = FL_UNLOCKING;
2750 		map_write(map, CMD(0x80), chip->start);
2751 		map_write(map, CMD(0x30), chip->start);
2752 	} else if (thunk == DO_XXLOCK_ONEBLOCK_GETLOCK) {
2753 		chip->state = FL_JEDEC_QUERY;
2754 		/* Return locked status: 0->locked, 1->unlocked */
2755 		ret = !cfi_read_query(map, adr);
2756 	} else
2757 		BUG();
2758 
2759 	/*
2760 	 * Wait for some time as unlocking of all sectors takes quite long
2761 	 */
2762 	timeo = jiffies + msecs_to_jiffies(2000);	/* 2s max (un)locking */
2763 	for (;;) {
2764 		if (chip_ready(map, chip, adr, NULL))
2765 			break;
2766 
2767 		if (time_after(jiffies, timeo)) {
2768 			printk(KERN_ERR "Waiting for chip to be ready timed out.\n");
2769 			ret = -EIO;
2770 			break;
2771 		}
2772 
2773 		UDELAY(map, chip, adr, 1);
2774 	}
2775 
2776 	/* Exit BC commands */
2777 	map_write(map, CMD(0x90), chip->start);
2778 	map_write(map, CMD(0x00), chip->start);
2779 
2780 	chip->state = FL_READY;
2781 	put_chip(map, chip, adr);
2782 	mutex_unlock(&chip->mutex);
2783 
2784 	return ret;
2785 }
2786 
cfi_ppb_lock(struct mtd_info * mtd,loff_t ofs,uint64_t len)2787 static int __maybe_unused cfi_ppb_lock(struct mtd_info *mtd, loff_t ofs,
2788 				       uint64_t len)
2789 {
2790 	return cfi_varsize_frob(mtd, do_ppb_xxlock, ofs, len,
2791 				DO_XXLOCK_ONEBLOCK_LOCK);
2792 }
2793 
cfi_ppb_unlock(struct mtd_info * mtd,loff_t ofs,uint64_t len)2794 static int __maybe_unused cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs,
2795 					 uint64_t len)
2796 {
2797 	struct mtd_erase_region_info *regions = mtd->eraseregions;
2798 	struct map_info *map = mtd->priv;
2799 	struct cfi_private *cfi = map->fldrv_priv;
2800 	struct ppb_lock *sect;
2801 	unsigned long adr;
2802 	loff_t offset;
2803 	uint64_t length;
2804 	int chipnum;
2805 	int i;
2806 	int sectors;
2807 	int ret;
2808 	int max_sectors;
2809 
2810 	/*
2811 	 * PPB unlocking always unlocks all sectors of the flash chip.
2812 	 * We need to re-lock all previously locked sectors. So lets
2813 	 * first check the locking status of all sectors and save
2814 	 * it for future use.
2815 	 */
2816 	max_sectors = 0;
2817 	for (i = 0; i < mtd->numeraseregions; i++)
2818 		max_sectors += regions[i].numblocks;
2819 
2820 	sect = kcalloc(max_sectors, sizeof(struct ppb_lock), GFP_KERNEL);
2821 	if (!sect)
2822 		return -ENOMEM;
2823 
2824 	/*
2825 	 * This code to walk all sectors is a slightly modified version
2826 	 * of the cfi_varsize_frob() code.
2827 	 */
2828 	i = 0;
2829 	chipnum = 0;
2830 	adr = 0;
2831 	sectors = 0;
2832 	offset = 0;
2833 	length = mtd->size;
2834 
2835 	while (length) {
2836 		int size = regions[i].erasesize;
2837 
2838 		/*
2839 		 * Only test sectors that shall not be unlocked. The other
2840 		 * sectors shall be unlocked, so lets keep their locking
2841 		 * status at "unlocked" (locked=0) for the final re-locking.
2842 		 */
2843 		if ((offset < ofs) || (offset >= (ofs + len))) {
2844 			sect[sectors].chip = &cfi->chips[chipnum];
2845 			sect[sectors].adr = adr;
2846 			sect[sectors].locked = do_ppb_xxlock(
2847 				map, &cfi->chips[chipnum], adr, 0,
2848 				DO_XXLOCK_ONEBLOCK_GETLOCK);
2849 		}
2850 
2851 		adr += size;
2852 		offset += size;
2853 		length -= size;
2854 
2855 		if (offset == regions[i].offset + size * regions[i].numblocks)
2856 			i++;
2857 
2858 		if (adr >> cfi->chipshift) {
2859 			if (offset >= (ofs + len))
2860 				break;
2861 			adr = 0;
2862 			chipnum++;
2863 
2864 			if (chipnum >= cfi->numchips)
2865 				break;
2866 		}
2867 
2868 		sectors++;
2869 		if (sectors >= max_sectors) {
2870 			printk(KERN_ERR "Only %d sectors for PPB locking supported!\n",
2871 			       max_sectors);
2872 			kfree(sect);
2873 			return -EINVAL;
2874 		}
2875 	}
2876 
2877 	/* Now unlock the whole chip */
2878 	ret = cfi_varsize_frob(mtd, do_ppb_xxlock, ofs, len,
2879 			       DO_XXLOCK_ONEBLOCK_UNLOCK);
2880 	if (ret) {
2881 		kfree(sect);
2882 		return ret;
2883 	}
2884 
2885 	/*
2886 	 * PPB unlocking always unlocks all sectors of the flash chip.
2887 	 * We need to re-lock all previously locked sectors.
2888 	 */
2889 	for (i = 0; i < sectors; i++) {
2890 		if (sect[i].locked)
2891 			do_ppb_xxlock(map, sect[i].chip, sect[i].adr, 0,
2892 				      DO_XXLOCK_ONEBLOCK_LOCK);
2893 	}
2894 
2895 	kfree(sect);
2896 	return ret;
2897 }
2898 
cfi_ppb_is_locked(struct mtd_info * mtd,loff_t ofs,uint64_t len)2899 static int __maybe_unused cfi_ppb_is_locked(struct mtd_info *mtd, loff_t ofs,
2900 					    uint64_t len)
2901 {
2902 	return cfi_varsize_frob(mtd, do_ppb_xxlock, ofs, len,
2903 				DO_XXLOCK_ONEBLOCK_GETLOCK) ? 1 : 0;
2904 }
2905 
cfi_amdstd_sync(struct mtd_info * mtd)2906 static void cfi_amdstd_sync (struct mtd_info *mtd)
2907 {
2908 	struct map_info *map = mtd->priv;
2909 	struct cfi_private *cfi = map->fldrv_priv;
2910 	int i;
2911 	struct flchip *chip;
2912 	int ret = 0;
2913 	DECLARE_WAITQUEUE(wait, current);
2914 
2915 	for (i=0; !ret && i<cfi->numchips; i++) {
2916 		chip = &cfi->chips[i];
2917 
2918 	retry:
2919 		mutex_lock(&chip->mutex);
2920 
2921 		switch(chip->state) {
2922 		case FL_READY:
2923 		case FL_STATUS:
2924 		case FL_CFI_QUERY:
2925 		case FL_JEDEC_QUERY:
2926 			chip->oldstate = chip->state;
2927 			chip->state = FL_SYNCING;
2928 			/* No need to wake_up() on this state change -
2929 			 * as the whole point is that nobody can do anything
2930 			 * with the chip now anyway.
2931 			 */
2932 			fallthrough;
2933 		case FL_SYNCING:
2934 			mutex_unlock(&chip->mutex);
2935 			break;
2936 
2937 		default:
2938 			/* Not an idle state */
2939 			set_current_state(TASK_UNINTERRUPTIBLE);
2940 			add_wait_queue(&chip->wq, &wait);
2941 
2942 			mutex_unlock(&chip->mutex);
2943 
2944 			schedule();
2945 
2946 			remove_wait_queue(&chip->wq, &wait);
2947 
2948 			goto retry;
2949 		}
2950 	}
2951 
2952 	/* Unlock the chips again */
2953 
2954 	for (i--; i >=0; i--) {
2955 		chip = &cfi->chips[i];
2956 
2957 		mutex_lock(&chip->mutex);
2958 
2959 		if (chip->state == FL_SYNCING) {
2960 			chip->state = chip->oldstate;
2961 			wake_up(&chip->wq);
2962 		}
2963 		mutex_unlock(&chip->mutex);
2964 	}
2965 }
2966 
2967 
cfi_amdstd_suspend(struct mtd_info * mtd)2968 static int cfi_amdstd_suspend(struct mtd_info *mtd)
2969 {
2970 	struct map_info *map = mtd->priv;
2971 	struct cfi_private *cfi = map->fldrv_priv;
2972 	int i;
2973 	struct flchip *chip;
2974 	int ret = 0;
2975 
2976 	for (i=0; !ret && i<cfi->numchips; i++) {
2977 		chip = &cfi->chips[i];
2978 
2979 		mutex_lock(&chip->mutex);
2980 
2981 		switch(chip->state) {
2982 		case FL_READY:
2983 		case FL_STATUS:
2984 		case FL_CFI_QUERY:
2985 		case FL_JEDEC_QUERY:
2986 			chip->oldstate = chip->state;
2987 			chip->state = FL_PM_SUSPENDED;
2988 			/* No need to wake_up() on this state change -
2989 			 * as the whole point is that nobody can do anything
2990 			 * with the chip now anyway.
2991 			 */
2992 		case FL_PM_SUSPENDED:
2993 			break;
2994 
2995 		default:
2996 			ret = -EAGAIN;
2997 			break;
2998 		}
2999 		mutex_unlock(&chip->mutex);
3000 	}
3001 
3002 	/* Unlock the chips again */
3003 
3004 	if (ret) {
3005 		for (i--; i >=0; i--) {
3006 			chip = &cfi->chips[i];
3007 
3008 			mutex_lock(&chip->mutex);
3009 
3010 			if (chip->state == FL_PM_SUSPENDED) {
3011 				chip->state = chip->oldstate;
3012 				wake_up(&chip->wq);
3013 			}
3014 			mutex_unlock(&chip->mutex);
3015 		}
3016 	}
3017 
3018 	return ret;
3019 }
3020 
3021 
cfi_amdstd_resume(struct mtd_info * mtd)3022 static void cfi_amdstd_resume(struct mtd_info *mtd)
3023 {
3024 	struct map_info *map = mtd->priv;
3025 	struct cfi_private *cfi = map->fldrv_priv;
3026 	int i;
3027 	struct flchip *chip;
3028 
3029 	for (i=0; i<cfi->numchips; i++) {
3030 
3031 		chip = &cfi->chips[i];
3032 
3033 		mutex_lock(&chip->mutex);
3034 
3035 		if (chip->state == FL_PM_SUSPENDED) {
3036 			chip->state = FL_READY;
3037 			map_write(map, CMD(0xF0), chip->start);
3038 			wake_up(&chip->wq);
3039 		}
3040 		else
3041 			printk(KERN_ERR "Argh. Chip not in PM_SUSPENDED state upon resume()\n");
3042 
3043 		mutex_unlock(&chip->mutex);
3044 	}
3045 }
3046 
3047 
3048 /*
3049  * Ensure that the flash device is put back into read array mode before
3050  * unloading the driver or rebooting.  On some systems, rebooting while
3051  * the flash is in query/program/erase mode will prevent the CPU from
3052  * fetching the bootloader code, requiring a hard reset or power cycle.
3053  */
cfi_amdstd_reset(struct mtd_info * mtd)3054 static int cfi_amdstd_reset(struct mtd_info *mtd)
3055 {
3056 	struct map_info *map = mtd->priv;
3057 	struct cfi_private *cfi = map->fldrv_priv;
3058 	int i, ret;
3059 	struct flchip *chip;
3060 
3061 	for (i = 0; i < cfi->numchips; i++) {
3062 
3063 		chip = &cfi->chips[i];
3064 
3065 		mutex_lock(&chip->mutex);
3066 
3067 		ret = get_chip(map, chip, chip->start, FL_SHUTDOWN);
3068 		if (!ret) {
3069 			map_write(map, CMD(0xF0), chip->start);
3070 			chip->state = FL_SHUTDOWN;
3071 			put_chip(map, chip, chip->start);
3072 		}
3073 
3074 		mutex_unlock(&chip->mutex);
3075 	}
3076 
3077 	return 0;
3078 }
3079 
3080 
cfi_amdstd_reboot(struct notifier_block * nb,unsigned long val,void * v)3081 static int cfi_amdstd_reboot(struct notifier_block *nb, unsigned long val,
3082 			       void *v)
3083 {
3084 	struct mtd_info *mtd;
3085 
3086 	mtd = container_of(nb, struct mtd_info, reboot_notifier);
3087 	cfi_amdstd_reset(mtd);
3088 	return NOTIFY_DONE;
3089 }
3090 
3091 
cfi_amdstd_destroy(struct mtd_info * mtd)3092 static void cfi_amdstd_destroy(struct mtd_info *mtd)
3093 {
3094 	struct map_info *map = mtd->priv;
3095 	struct cfi_private *cfi = map->fldrv_priv;
3096 
3097 	cfi_amdstd_reset(mtd);
3098 	unregister_reboot_notifier(&mtd->reboot_notifier);
3099 	kfree(cfi->cmdset_priv);
3100 	kfree(cfi->cfiq);
3101 	kfree(cfi);
3102 	kfree(mtd->eraseregions);
3103 }
3104 
3105 MODULE_LICENSE("GPL");
3106 MODULE_AUTHOR("Crossnet Co. <info@crossnet.co.jp> et al.");
3107 MODULE_DESCRIPTION("MTD chip driver for AMD/Fujitsu flash chips");
3108 MODULE_ALIAS("cfi_cmdset_0006");
3109 MODULE_ALIAS("cfi_cmdset_0701");
3110