• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Common Flash Interface support:
3  *   Intel Extended Vendor Command Set (ID 0x0001)
4  *
5  * (C) 2000 Red Hat. GPL'd
6  *
7  *
8  * 10/10/2000	Nicolas Pitre <nico@fluxnic.net>
9  * 	- completely revamped method functions so they are aware and
10  * 	  independent of the flash geometry (buswidth, interleave, etc.)
11  * 	- scalability vs code size is completely set at compile-time
12  * 	  (see include/linux/mtd/cfi.h for selection)
13  *	- optimized write buffer method
14  * 02/05/2002	Christopher Hoover <ch@hpl.hp.com>/<ch@murgatroid.com>
15  *	- reworked lock/unlock/erase support for var size flash
16  * 21/03/2007   Rodolfo Giometti <giometti@linux.it>
17  * 	- auto unlock sectors on resume for auto locking flash on power up
18  */
19 
20 #include <linux/module.h>
21 #include <linux/types.h>
22 #include <linux/kernel.h>
23 #include <linux/sched.h>
24 #include <asm/io.h>
25 #include <asm/byteorder.h>
26 
27 #include <linux/errno.h>
28 #include <linux/slab.h>
29 #include <linux/delay.h>
30 #include <linux/interrupt.h>
31 #include <linux/reboot.h>
32 #include <linux/bitmap.h>
33 #include <linux/mtd/xip.h>
34 #include <linux/mtd/map.h>
35 #include <linux/mtd/mtd.h>
36 #include <linux/mtd/cfi.h>
37 
38 /* #define CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE */
39 /* #define CMDSET0001_DISABLE_WRITE_SUSPEND */
40 
41 // debugging, turns off buffer write mode if set to 1
42 #define FORCE_WORD_WRITE 0
43 
44 /* Intel chips */
45 #define I82802AB	0x00ad
46 #define I82802AC	0x00ac
47 #define PF38F4476	0x881c
48 #define M28F00AP30	0x8963
49 /* STMicroelectronics chips */
50 #define M50LPW080       0x002F
51 #define M50FLW080A	0x0080
52 #define M50FLW080B	0x0081
53 /* Atmel chips */
54 #define AT49BV640D	0x02de
55 #define AT49BV640DT	0x02db
56 /* Sharp chips */
57 #define LH28F640BFHE_PTTL90	0x00b0
58 #define LH28F640BFHE_PBTL90	0x00b1
59 #define LH28F640BFHE_PTTL70A	0x00b2
60 #define LH28F640BFHE_PBTL70A	0x00b3
61 
62 static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
63 static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
64 static int cfi_intelext_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
65 static int cfi_intelext_writev(struct mtd_info *, const struct kvec *, unsigned long, loff_t, size_t *);
66 static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *);
67 static void cfi_intelext_sync (struct mtd_info *);
68 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
69 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
70 static int cfi_intelext_is_locked(struct mtd_info *mtd, loff_t ofs,
71 				  uint64_t len);
72 #ifdef CONFIG_MTD_OTP
73 static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
74 static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
75 static int cfi_intelext_write_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
76 static int cfi_intelext_lock_user_prot_reg (struct mtd_info *, loff_t, size_t);
77 static int cfi_intelext_get_fact_prot_info(struct mtd_info *, size_t,
78 					   size_t *, struct otp_info *);
79 static int cfi_intelext_get_user_prot_info(struct mtd_info *, size_t,
80 					   size_t *, struct otp_info *);
81 #endif
82 static int cfi_intelext_suspend (struct mtd_info *);
83 static void cfi_intelext_resume (struct mtd_info *);
84 static int cfi_intelext_reboot (struct notifier_block *, unsigned long, void *);
85 
86 static void cfi_intelext_destroy(struct mtd_info *);
87 
88 struct mtd_info *cfi_cmdset_0001(struct map_info *, int);
89 
90 static struct mtd_info *cfi_intelext_setup (struct mtd_info *);
91 static int cfi_intelext_partition_fixup(struct mtd_info *, struct cfi_private **);
92 
93 static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len,
94 		     size_t *retlen, void **virt, resource_size_t *phys);
95 static int cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len);
96 
97 static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
98 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
99 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
100 #include "fwh_lock.h"
101 
102 
103 
104 /*
105  *  *********** SETUP AND PROBE BITS  ***********
106  */
107 
108 static struct mtd_chip_driver cfi_intelext_chipdrv = {
109 	.probe		= NULL, /* Not usable directly */
110 	.destroy	= cfi_intelext_destroy,
111 	.name		= "cfi_cmdset_0001",
112 	.module		= THIS_MODULE
113 };
114 
115 /* #define DEBUG_LOCK_BITS */
116 /* #define DEBUG_CFI_FEATURES */
117 
118 #ifdef DEBUG_CFI_FEATURES
cfi_tell_features(struct cfi_pri_intelext * extp)119 static void cfi_tell_features(struct cfi_pri_intelext *extp)
120 {
121 	int i;
122 	printk("  Extended Query version %c.%c\n", extp->MajorVersion, extp->MinorVersion);
123 	printk("  Feature/Command Support:      %4.4X\n", extp->FeatureSupport);
124 	printk("     - Chip Erase:              %s\n", extp->FeatureSupport&1?"supported":"unsupported");
125 	printk("     - Suspend Erase:           %s\n", extp->FeatureSupport&2?"supported":"unsupported");
126 	printk("     - Suspend Program:         %s\n", extp->FeatureSupport&4?"supported":"unsupported");
127 	printk("     - Legacy Lock/Unlock:      %s\n", extp->FeatureSupport&8?"supported":"unsupported");
128 	printk("     - Queued Erase:            %s\n", extp->FeatureSupport&16?"supported":"unsupported");
129 	printk("     - Instant block lock:      %s\n", extp->FeatureSupport&32?"supported":"unsupported");
130 	printk("     - Protection Bits:         %s\n", extp->FeatureSupport&64?"supported":"unsupported");
131 	printk("     - Page-mode read:          %s\n", extp->FeatureSupport&128?"supported":"unsupported");
132 	printk("     - Synchronous read:        %s\n", extp->FeatureSupport&256?"supported":"unsupported");
133 	printk("     - Simultaneous operations: %s\n", extp->FeatureSupport&512?"supported":"unsupported");
134 	printk("     - Extended Flash Array:    %s\n", extp->FeatureSupport&1024?"supported":"unsupported");
135 	for (i=11; i<32; i++) {
136 		if (extp->FeatureSupport & (1<<i))
137 			printk("     - Unknown Bit %X:      supported\n", i);
138 	}
139 
140 	printk("  Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
141 	printk("     - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
142 	for (i=1; i<8; i++) {
143 		if (extp->SuspendCmdSupport & (1<<i))
144 			printk("     - Unknown Bit %X:               supported\n", i);
145 	}
146 
147 	printk("  Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
148 	printk("     - Lock Bit Active:      %s\n", extp->BlkStatusRegMask&1?"yes":"no");
149 	printk("     - Lock-Down Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no");
150 	for (i=2; i<3; i++) {
151 		if (extp->BlkStatusRegMask & (1<<i))
152 			printk("     - Unknown Bit %X Active: yes\n",i);
153 	}
154 	printk("     - EFA Lock Bit:         %s\n", extp->BlkStatusRegMask&16?"yes":"no");
155 	printk("     - EFA Lock-Down Bit:    %s\n", extp->BlkStatusRegMask&32?"yes":"no");
156 	for (i=6; i<16; i++) {
157 		if (extp->BlkStatusRegMask & (1<<i))
158 			printk("     - Unknown Bit %X Active: yes\n",i);
159 	}
160 
161 	printk("  Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
162 	       extp->VccOptimal >> 4, extp->VccOptimal & 0xf);
163 	if (extp->VppOptimal)
164 		printk("  Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
165 		       extp->VppOptimal >> 4, extp->VppOptimal & 0xf);
166 }
167 #endif
168 
169 /* Atmel chips don't use the same PRI format as Intel chips */
fixup_convert_atmel_pri(struct mtd_info * mtd)170 static void fixup_convert_atmel_pri(struct mtd_info *mtd)
171 {
172 	struct map_info *map = mtd->priv;
173 	struct cfi_private *cfi = map->fldrv_priv;
174 	struct cfi_pri_intelext *extp = cfi->cmdset_priv;
175 	struct cfi_pri_atmel atmel_pri;
176 	uint32_t features = 0;
177 
178 	/* Reverse byteswapping */
179 	extp->FeatureSupport = cpu_to_le32(extp->FeatureSupport);
180 	extp->BlkStatusRegMask = cpu_to_le16(extp->BlkStatusRegMask);
181 	extp->ProtRegAddr = cpu_to_le16(extp->ProtRegAddr);
182 
183 	memcpy(&atmel_pri, extp, sizeof(atmel_pri));
184 	memset((char *)extp + 5, 0, sizeof(*extp) - 5);
185 
186 	printk(KERN_ERR "atmel Features: %02x\n", atmel_pri.Features);
187 
188 	if (atmel_pri.Features & 0x01) /* chip erase supported */
189 		features |= (1<<0);
190 	if (atmel_pri.Features & 0x02) /* erase suspend supported */
191 		features |= (1<<1);
192 	if (atmel_pri.Features & 0x04) /* program suspend supported */
193 		features |= (1<<2);
194 	if (atmel_pri.Features & 0x08) /* simultaneous operations supported */
195 		features |= (1<<9);
196 	if (atmel_pri.Features & 0x20) /* page mode read supported */
197 		features |= (1<<7);
198 	if (atmel_pri.Features & 0x40) /* queued erase supported */
199 		features |= (1<<4);
200 	if (atmel_pri.Features & 0x80) /* Protection bits supported */
201 		features |= (1<<6);
202 
203 	extp->FeatureSupport = features;
204 
205 	/* burst write mode not supported */
206 	cfi->cfiq->BufWriteTimeoutTyp = 0;
207 	cfi->cfiq->BufWriteTimeoutMax = 0;
208 }
209 
fixup_at49bv640dx_lock(struct mtd_info * mtd)210 static void fixup_at49bv640dx_lock(struct mtd_info *mtd)
211 {
212 	struct map_info *map = mtd->priv;
213 	struct cfi_private *cfi = map->fldrv_priv;
214 	struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
215 
216 	cfip->FeatureSupport |= (1 << 5);
217 	mtd->flags |= MTD_POWERUP_LOCK;
218 }
219 
220 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
221 /* Some Intel Strata Flash prior to FPO revision C has bugs in this area */
fixup_intel_strataflash(struct mtd_info * mtd)222 static void fixup_intel_strataflash(struct mtd_info *mtd)
223 {
224 	struct map_info *map = mtd->priv;
225 	struct cfi_private *cfi = map->fldrv_priv;
226 	struct cfi_pri_intelext *extp = cfi->cmdset_priv;
227 
228 	printk(KERN_WARNING "cfi_cmdset_0001: Suspend "
229 	                    "erase on write disabled.\n");
230 	extp->SuspendCmdSupport &= ~1;
231 }
232 #endif
233 
234 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
fixup_no_write_suspend(struct mtd_info * mtd)235 static void fixup_no_write_suspend(struct mtd_info *mtd)
236 {
237 	struct map_info *map = mtd->priv;
238 	struct cfi_private *cfi = map->fldrv_priv;
239 	struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
240 
241 	if (cfip && (cfip->FeatureSupport&4)) {
242 		cfip->FeatureSupport &= ~4;
243 		printk(KERN_WARNING "cfi_cmdset_0001: write suspend disabled\n");
244 	}
245 }
246 #endif
247 
fixup_st_m28w320ct(struct mtd_info * mtd)248 static void fixup_st_m28w320ct(struct mtd_info *mtd)
249 {
250 	struct map_info *map = mtd->priv;
251 	struct cfi_private *cfi = map->fldrv_priv;
252 
253 	cfi->cfiq->BufWriteTimeoutTyp = 0;	/* Not supported */
254 	cfi->cfiq->BufWriteTimeoutMax = 0;	/* Not supported */
255 }
256 
fixup_st_m28w320cb(struct mtd_info * mtd)257 static void fixup_st_m28w320cb(struct mtd_info *mtd)
258 {
259 	struct map_info *map = mtd->priv;
260 	struct cfi_private *cfi = map->fldrv_priv;
261 
262 	/* Note this is done after the region info is endian swapped */
263 	cfi->cfiq->EraseRegionInfo[1] =
264 		(cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;
265 };
266 
is_LH28F640BF(struct cfi_private * cfi)267 static int is_LH28F640BF(struct cfi_private *cfi)
268 {
269 	/* Sharp LH28F640BF Family */
270 	if (cfi->mfr == CFI_MFR_SHARP && (
271 	    cfi->id == LH28F640BFHE_PTTL90 || cfi->id == LH28F640BFHE_PBTL90 ||
272 	    cfi->id == LH28F640BFHE_PTTL70A || cfi->id == LH28F640BFHE_PBTL70A))
273 		return 1;
274 	return 0;
275 }
276 
fixup_LH28F640BF(struct mtd_info * mtd)277 static void fixup_LH28F640BF(struct mtd_info *mtd)
278 {
279 	struct map_info *map = mtd->priv;
280 	struct cfi_private *cfi = map->fldrv_priv;
281 	struct cfi_pri_intelext *extp = cfi->cmdset_priv;
282 
283 	/* Reset the Partition Configuration Register on LH28F640BF
284 	 * to a single partition (PCR = 0x000): PCR is embedded into A0-A15. */
285 	if (is_LH28F640BF(cfi)) {
286 		printk(KERN_INFO "Reset Partition Config. Register: 1 Partition of 4 planes\n");
287 		map_write(map, CMD(0x60), 0);
288 		map_write(map, CMD(0x04), 0);
289 
290 		/* We have set one single partition thus
291 		 * Simultaneous Operations are not allowed */
292 		printk(KERN_INFO "cfi_cmdset_0001: Simultaneous Operations disabled\n");
293 		extp->FeatureSupport &= ~512;
294 	}
295 }
296 
fixup_use_point(struct mtd_info * mtd)297 static void fixup_use_point(struct mtd_info *mtd)
298 {
299 	struct map_info *map = mtd->priv;
300 	if (!mtd->_point && map_is_linear(map)) {
301 		mtd->_point   = cfi_intelext_point;
302 		mtd->_unpoint = cfi_intelext_unpoint;
303 	}
304 }
305 
fixup_use_write_buffers(struct mtd_info * mtd)306 static void fixup_use_write_buffers(struct mtd_info *mtd)
307 {
308 	struct map_info *map = mtd->priv;
309 	struct cfi_private *cfi = map->fldrv_priv;
310 	if (cfi->cfiq->BufWriteTimeoutTyp) {
311 		printk(KERN_INFO "Using buffer write method\n" );
312 		mtd->_write = cfi_intelext_write_buffers;
313 		mtd->_writev = cfi_intelext_writev;
314 	}
315 }
316 
317 /*
318  * Some chips power-up with all sectors locked by default.
319  */
fixup_unlock_powerup_lock(struct mtd_info * mtd)320 static void fixup_unlock_powerup_lock(struct mtd_info *mtd)
321 {
322 	struct map_info *map = mtd->priv;
323 	struct cfi_private *cfi = map->fldrv_priv;
324 	struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
325 
326 	if (cfip->FeatureSupport&32) {
327 		printk(KERN_INFO "Using auto-unlock on power-up/resume\n" );
328 		mtd->flags |= MTD_POWERUP_LOCK;
329 	}
330 }
331 
332 static struct cfi_fixup cfi_fixup_table[] = {
333 	{ CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri },
334 	{ CFI_MFR_ATMEL, AT49BV640D, fixup_at49bv640dx_lock },
335 	{ CFI_MFR_ATMEL, AT49BV640DT, fixup_at49bv640dx_lock },
336 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
337 	{ CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash },
338 #endif
339 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
340 	{ CFI_MFR_ANY, CFI_ID_ANY, fixup_no_write_suspend },
341 #endif
342 #if !FORCE_WORD_WRITE
343 	{ CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers },
344 #endif
345 	{ CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct },
346 	{ CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb },
347 	{ CFI_MFR_INTEL, CFI_ID_ANY, fixup_unlock_powerup_lock },
348 	{ CFI_MFR_SHARP, CFI_ID_ANY, fixup_unlock_powerup_lock },
349 	{ CFI_MFR_SHARP, CFI_ID_ANY, fixup_LH28F640BF },
350 	{ 0, 0, NULL }
351 };
352 
353 static struct cfi_fixup jedec_fixup_table[] = {
354 	{ CFI_MFR_INTEL, I82802AB,   fixup_use_fwh_lock },
355 	{ CFI_MFR_INTEL, I82802AC,   fixup_use_fwh_lock },
356 	{ CFI_MFR_ST,    M50LPW080,  fixup_use_fwh_lock },
357 	{ CFI_MFR_ST,    M50FLW080A, fixup_use_fwh_lock },
358 	{ CFI_MFR_ST,    M50FLW080B, fixup_use_fwh_lock },
359 	{ 0, 0, NULL }
360 };
361 static struct cfi_fixup fixup_table[] = {
362 	/* The CFI vendor ids and the JEDEC vendor IDs appear
363 	 * to be common.  It is like the devices id's are as
364 	 * well.  This table is to pick all cases where
365 	 * we know that is the case.
366 	 */
367 	{ CFI_MFR_ANY, CFI_ID_ANY, fixup_use_point },
368 	{ 0, 0, NULL }
369 };
370 
cfi_fixup_major_minor(struct cfi_private * cfi,struct cfi_pri_intelext * extp)371 static void cfi_fixup_major_minor(struct cfi_private *cfi,
372 						struct cfi_pri_intelext *extp)
373 {
374 	if (cfi->mfr == CFI_MFR_INTEL &&
375 			cfi->id == PF38F4476 && extp->MinorVersion == '3')
376 		extp->MinorVersion = '1';
377 }
378 
cfi_is_micron_28F00AP30(struct cfi_private * cfi,struct flchip * chip)379 static int cfi_is_micron_28F00AP30(struct cfi_private *cfi, struct flchip *chip)
380 {
381 	/*
382 	 * Micron(was Numonyx) 1Gbit bottom boot are buggy w.r.t
383 	 * Erase Supend for their small Erase Blocks(0x8000)
384 	 */
385 	if (cfi->mfr == CFI_MFR_INTEL && cfi->id == M28F00AP30)
386 		return 1;
387 	return 0;
388 }
389 
390 static inline struct cfi_pri_intelext *
read_pri_intelext(struct map_info * map,__u16 adr)391 read_pri_intelext(struct map_info *map, __u16 adr)
392 {
393 	struct cfi_private *cfi = map->fldrv_priv;
394 	struct cfi_pri_intelext *extp;
395 	unsigned int extra_size = 0;
396 	unsigned int extp_size = sizeof(*extp);
397 
398  again:
399 	extp = (struct cfi_pri_intelext *)cfi_read_pri(map, adr, extp_size, "Intel/Sharp");
400 	if (!extp)
401 		return NULL;
402 
403 	cfi_fixup_major_minor(cfi, extp);
404 
405 	if (extp->MajorVersion != '1' ||
406 	    (extp->MinorVersion < '0' || extp->MinorVersion > '5')) {
407 		printk(KERN_ERR "  Unknown Intel/Sharp Extended Query "
408 		       "version %c.%c.\n",  extp->MajorVersion,
409 		       extp->MinorVersion);
410 		kfree(extp);
411 		return NULL;
412 	}
413 
414 	/* Do some byteswapping if necessary */
415 	extp->FeatureSupport = le32_to_cpu(extp->FeatureSupport);
416 	extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask);
417 	extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr);
418 
419 	if (extp->MinorVersion >= '0') {
420 		extra_size = 0;
421 
422 		/* Protection Register info */
423 		extra_size += (extp->NumProtectionFields - 1) *
424 			      sizeof(struct cfi_intelext_otpinfo);
425 	}
426 
427 	if (extp->MinorVersion >= '1') {
428 		/* Burst Read info */
429 		extra_size += 2;
430 		if (extp_size < sizeof(*extp) + extra_size)
431 			goto need_more;
432 		extra_size += extp->extra[extra_size - 1];
433 	}
434 
435 	if (extp->MinorVersion >= '3') {
436 		int nb_parts, i;
437 
438 		/* Number of hardware-partitions */
439 		extra_size += 1;
440 		if (extp_size < sizeof(*extp) + extra_size)
441 			goto need_more;
442 		nb_parts = extp->extra[extra_size - 1];
443 
444 		/* skip the sizeof(partregion) field in CFI 1.4 */
445 		if (extp->MinorVersion >= '4')
446 			extra_size += 2;
447 
448 		for (i = 0; i < nb_parts; i++) {
449 			struct cfi_intelext_regioninfo *rinfo;
450 			rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[extra_size];
451 			extra_size += sizeof(*rinfo);
452 			if (extp_size < sizeof(*extp) + extra_size)
453 				goto need_more;
454 			rinfo->NumIdentPartitions=le16_to_cpu(rinfo->NumIdentPartitions);
455 			extra_size += (rinfo->NumBlockTypes - 1)
456 				      * sizeof(struct cfi_intelext_blockinfo);
457 		}
458 
459 		if (extp->MinorVersion >= '4')
460 			extra_size += sizeof(struct cfi_intelext_programming_regioninfo);
461 
462 		if (extp_size < sizeof(*extp) + extra_size) {
463 			need_more:
464 			extp_size = sizeof(*extp) + extra_size;
465 			kfree(extp);
466 			if (extp_size > 4096) {
467 				printk(KERN_ERR
468 					"%s: cfi_pri_intelext is too fat\n",
469 					__func__);
470 				return NULL;
471 			}
472 			goto again;
473 		}
474 	}
475 
476 	return extp;
477 }
478 
cfi_cmdset_0001(struct map_info * map,int primary)479 struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
480 {
481 	struct cfi_private *cfi = map->fldrv_priv;
482 	struct mtd_info *mtd;
483 	int i;
484 
485 	mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
486 	if (!mtd)
487 		return NULL;
488 	mtd->priv = map;
489 	mtd->type = MTD_NORFLASH;
490 
491 	/* Fill in the default mtd operations */
492 	mtd->_erase   = cfi_intelext_erase_varsize;
493 	mtd->_read    = cfi_intelext_read;
494 	mtd->_write   = cfi_intelext_write_words;
495 	mtd->_sync    = cfi_intelext_sync;
496 	mtd->_lock    = cfi_intelext_lock;
497 	mtd->_unlock  = cfi_intelext_unlock;
498 	mtd->_is_locked = cfi_intelext_is_locked;
499 	mtd->_suspend = cfi_intelext_suspend;
500 	mtd->_resume  = cfi_intelext_resume;
501 	mtd->flags   = MTD_CAP_NORFLASH;
502 	mtd->name    = map->name;
503 	mtd->writesize = 1;
504 	mtd->writebufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
505 
506 	mtd->reboot_notifier.notifier_call = cfi_intelext_reboot;
507 
508 	if (cfi->cfi_mode == CFI_MODE_CFI) {
509 		/*
510 		 * It's a real CFI chip, not one for which the probe
511 		 * routine faked a CFI structure. So we read the feature
512 		 * table from it.
513 		 */
514 		__u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
515 		struct cfi_pri_intelext *extp;
516 
517 		extp = read_pri_intelext(map, adr);
518 		if (!extp) {
519 			kfree(mtd);
520 			return NULL;
521 		}
522 
523 		/* Install our own private info structure */
524 		cfi->cmdset_priv = extp;
525 
526 		cfi_fixup(mtd, cfi_fixup_table);
527 
528 #ifdef DEBUG_CFI_FEATURES
529 		/* Tell the user about it in lots of lovely detail */
530 		cfi_tell_features(extp);
531 #endif
532 
533 		if(extp->SuspendCmdSupport & 1) {
534 			printk(KERN_NOTICE "cfi_cmdset_0001: Erase suspend on write enabled\n");
535 		}
536 	}
537 	else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
538 		/* Apply jedec specific fixups */
539 		cfi_fixup(mtd, jedec_fixup_table);
540 	}
541 	/* Apply generic fixups */
542 	cfi_fixup(mtd, fixup_table);
543 
544 	for (i=0; i< cfi->numchips; i++) {
545 		if (cfi->cfiq->WordWriteTimeoutTyp)
546 			cfi->chips[i].word_write_time =
547 				1<<cfi->cfiq->WordWriteTimeoutTyp;
548 		else
549 			cfi->chips[i].word_write_time = 50000;
550 
551 		if (cfi->cfiq->BufWriteTimeoutTyp)
552 			cfi->chips[i].buffer_write_time =
553 				1<<cfi->cfiq->BufWriteTimeoutTyp;
554 		/* No default; if it isn't specified, we won't use it */
555 
556 		if (cfi->cfiq->BlockEraseTimeoutTyp)
557 			cfi->chips[i].erase_time =
558 				1000<<cfi->cfiq->BlockEraseTimeoutTyp;
559 		else
560 			cfi->chips[i].erase_time = 2000000;
561 
562 		if (cfi->cfiq->WordWriteTimeoutTyp &&
563 		    cfi->cfiq->WordWriteTimeoutMax)
564 			cfi->chips[i].word_write_time_max =
565 				1<<(cfi->cfiq->WordWriteTimeoutTyp +
566 				    cfi->cfiq->WordWriteTimeoutMax);
567 		else
568 			cfi->chips[i].word_write_time_max = 50000 * 8;
569 
570 		if (cfi->cfiq->BufWriteTimeoutTyp &&
571 		    cfi->cfiq->BufWriteTimeoutMax)
572 			cfi->chips[i].buffer_write_time_max =
573 				1<<(cfi->cfiq->BufWriteTimeoutTyp +
574 				    cfi->cfiq->BufWriteTimeoutMax);
575 
576 		if (cfi->cfiq->BlockEraseTimeoutTyp &&
577 		    cfi->cfiq->BlockEraseTimeoutMax)
578 			cfi->chips[i].erase_time_max =
579 				1000<<(cfi->cfiq->BlockEraseTimeoutTyp +
580 				       cfi->cfiq->BlockEraseTimeoutMax);
581 		else
582 			cfi->chips[i].erase_time_max = 2000000 * 8;
583 
584 		cfi->chips[i].ref_point_counter = 0;
585 		init_waitqueue_head(&(cfi->chips[i].wq));
586 	}
587 
588 	map->fldrv = &cfi_intelext_chipdrv;
589 
590 	return cfi_intelext_setup(mtd);
591 }
592 struct mtd_info *cfi_cmdset_0003(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
593 struct mtd_info *cfi_cmdset_0200(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
594 EXPORT_SYMBOL_GPL(cfi_cmdset_0001);
595 EXPORT_SYMBOL_GPL(cfi_cmdset_0003);
596 EXPORT_SYMBOL_GPL(cfi_cmdset_0200);
597 
cfi_intelext_setup(struct mtd_info * mtd)598 static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
599 {
600 	struct map_info *map = mtd->priv;
601 	struct cfi_private *cfi = map->fldrv_priv;
602 	unsigned long offset = 0;
603 	int i,j;
604 	unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
605 
606 	//printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
607 
608 	mtd->size = devsize * cfi->numchips;
609 
610 	mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
611 	mtd->eraseregions = kcalloc(mtd->numeraseregions,
612 				    sizeof(struct mtd_erase_region_info),
613 				    GFP_KERNEL);
614 	if (!mtd->eraseregions)
615 		goto setup_err;
616 
617 	for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
618 		unsigned long ernum, ersize;
619 		ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
620 		ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
621 
622 		if (mtd->erasesize < ersize) {
623 			mtd->erasesize = ersize;
624 		}
625 		for (j=0; j<cfi->numchips; j++) {
626 			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
627 			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
628 			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
629 			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].lockmap = kmalloc(ernum / 8 + 1, GFP_KERNEL);
630 			if (!mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].lockmap)
631 				goto setup_err;
632 		}
633 		offset += (ersize * ernum);
634 	}
635 
636 	if (offset != devsize) {
637 		/* Argh */
638 		printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
639 		goto setup_err;
640 	}
641 
642 	for (i=0; i<mtd->numeraseregions;i++){
643 		printk(KERN_DEBUG "erase region %d: offset=0x%llx,size=0x%x,blocks=%d\n",
644 		       i,(unsigned long long)mtd->eraseregions[i].offset,
645 		       mtd->eraseregions[i].erasesize,
646 		       mtd->eraseregions[i].numblocks);
647 	}
648 
649 #ifdef CONFIG_MTD_OTP
650 	mtd->_read_fact_prot_reg = cfi_intelext_read_fact_prot_reg;
651 	mtd->_read_user_prot_reg = cfi_intelext_read_user_prot_reg;
652 	mtd->_write_user_prot_reg = cfi_intelext_write_user_prot_reg;
653 	mtd->_lock_user_prot_reg = cfi_intelext_lock_user_prot_reg;
654 	mtd->_get_fact_prot_info = cfi_intelext_get_fact_prot_info;
655 	mtd->_get_user_prot_info = cfi_intelext_get_user_prot_info;
656 #endif
657 
658 	/* This function has the potential to distort the reality
659 	   a bit and therefore should be called last. */
660 	if (cfi_intelext_partition_fixup(mtd, &cfi) != 0)
661 		goto setup_err;
662 
663 	__module_get(THIS_MODULE);
664 	register_reboot_notifier(&mtd->reboot_notifier);
665 	return mtd;
666 
667  setup_err:
668 	if (mtd->eraseregions)
669 		for (i=0; i<cfi->cfiq->NumEraseRegions; i++)
670 			for (j=0; j<cfi->numchips; j++)
671 				kfree(mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].lockmap);
672 	kfree(mtd->eraseregions);
673 	kfree(mtd);
674 	kfree(cfi->cmdset_priv);
675 	return NULL;
676 }
677 
cfi_intelext_partition_fixup(struct mtd_info * mtd,struct cfi_private ** pcfi)678 static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
679 					struct cfi_private **pcfi)
680 {
681 	struct map_info *map = mtd->priv;
682 	struct cfi_private *cfi = *pcfi;
683 	struct cfi_pri_intelext *extp = cfi->cmdset_priv;
684 
685 	/*
686 	 * Probing of multi-partition flash chips.
687 	 *
688 	 * To support multiple partitions when available, we simply arrange
689 	 * for each of them to have their own flchip structure even if they
690 	 * are on the same physical chip.  This means completely recreating
691 	 * a new cfi_private structure right here which is a blatent code
692 	 * layering violation, but this is still the least intrusive
693 	 * arrangement at this point. This can be rearranged in the future
694 	 * if someone feels motivated enough.  --nico
695 	 */
696 	if (extp && extp->MajorVersion == '1' && extp->MinorVersion >= '3'
697 	    && extp->FeatureSupport & (1 << 9)) {
698 		struct cfi_private *newcfi;
699 		struct flchip *chip;
700 		struct flchip_shared *shared;
701 		int offs, numregions, numparts, partshift, numvirtchips, i, j;
702 
703 		/* Protection Register info */
704 		offs = (extp->NumProtectionFields - 1) *
705 		       sizeof(struct cfi_intelext_otpinfo);
706 
707 		/* Burst Read info */
708 		offs += extp->extra[offs+1]+2;
709 
710 		/* Number of partition regions */
711 		numregions = extp->extra[offs];
712 		offs += 1;
713 
714 		/* skip the sizeof(partregion) field in CFI 1.4 */
715 		if (extp->MinorVersion >= '4')
716 			offs += 2;
717 
718 		/* Number of hardware partitions */
719 		numparts = 0;
720 		for (i = 0; i < numregions; i++) {
721 			struct cfi_intelext_regioninfo *rinfo;
722 			rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[offs];
723 			numparts += rinfo->NumIdentPartitions;
724 			offs += sizeof(*rinfo)
725 				+ (rinfo->NumBlockTypes - 1) *
726 				  sizeof(struct cfi_intelext_blockinfo);
727 		}
728 
729 		if (!numparts)
730 			numparts = 1;
731 
732 		/* Programming Region info */
733 		if (extp->MinorVersion >= '4') {
734 			struct cfi_intelext_programming_regioninfo *prinfo;
735 			prinfo = (struct cfi_intelext_programming_regioninfo *)&extp->extra[offs];
736 			mtd->writesize = cfi->interleave << prinfo->ProgRegShift;
737 			mtd->flags &= ~MTD_BIT_WRITEABLE;
738 			printk(KERN_DEBUG "%s: program region size/ctrl_valid/ctrl_inval = %d/%d/%d\n",
739 			       map->name, mtd->writesize,
740 			       cfi->interleave * prinfo->ControlValid,
741 			       cfi->interleave * prinfo->ControlInvalid);
742 		}
743 
744 		/*
745 		 * All functions below currently rely on all chips having
746 		 * the same geometry so we'll just assume that all hardware
747 		 * partitions are of the same size too.
748 		 */
749 		partshift = cfi->chipshift - __ffs(numparts);
750 
751 		if ((1 << partshift) < mtd->erasesize) {
752 			printk( KERN_ERR
753 				"%s: bad number of hw partitions (%d)\n",
754 				__func__, numparts);
755 			return -EINVAL;
756 		}
757 
758 		numvirtchips = cfi->numchips * numparts;
759 		newcfi = kmalloc(struct_size(newcfi, chips, numvirtchips),
760 				 GFP_KERNEL);
761 		if (!newcfi)
762 			return -ENOMEM;
763 		shared = kmalloc_array(cfi->numchips,
764 				       sizeof(struct flchip_shared),
765 				       GFP_KERNEL);
766 		if (!shared) {
767 			kfree(newcfi);
768 			return -ENOMEM;
769 		}
770 		memcpy(newcfi, cfi, sizeof(struct cfi_private));
771 		newcfi->numchips = numvirtchips;
772 		newcfi->chipshift = partshift;
773 
774 		chip = &newcfi->chips[0];
775 		for (i = 0; i < cfi->numchips; i++) {
776 			shared[i].writing = shared[i].erasing = NULL;
777 			mutex_init(&shared[i].lock);
778 			for (j = 0; j < numparts; j++) {
779 				*chip = cfi->chips[i];
780 				chip->start += j << partshift;
781 				chip->priv = &shared[i];
782 				/* those should be reset too since
783 				   they create memory references. */
784 				init_waitqueue_head(&chip->wq);
785 				mutex_init(&chip->mutex);
786 				chip++;
787 			}
788 		}
789 
790 		printk(KERN_DEBUG "%s: %d set(s) of %d interleaved chips "
791 				  "--> %d partitions of %d KiB\n",
792 				  map->name, cfi->numchips, cfi->interleave,
793 				  newcfi->numchips, 1<<(newcfi->chipshift-10));
794 
795 		map->fldrv_priv = newcfi;
796 		*pcfi = newcfi;
797 		kfree(cfi);
798 	}
799 
800 	return 0;
801 }
802 
803 /*
804  *  *********** CHIP ACCESS FUNCTIONS ***********
805  */
chip_ready(struct map_info * map,struct flchip * chip,unsigned long adr,int mode)806 static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
807 {
808 	DECLARE_WAITQUEUE(wait, current);
809 	struct cfi_private *cfi = map->fldrv_priv;
810 	map_word status, status_OK = CMD(0x80), status_PWS = CMD(0x01);
811 	struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
812 	unsigned long timeo = jiffies + HZ;
813 
814 	/* Prevent setting state FL_SYNCING for chip in suspended state. */
815 	if (mode == FL_SYNCING && chip->oldstate != FL_READY)
816 		goto sleep;
817 
818 	switch (chip->state) {
819 
820 	case FL_STATUS:
821 		for (;;) {
822 			status = map_read(map, adr);
823 			if (map_word_andequal(map, status, status_OK, status_OK))
824 				break;
825 
826 			/* At this point we're fine with write operations
827 			   in other partitions as they don't conflict. */
828 			if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS))
829 				break;
830 
831 			mutex_unlock(&chip->mutex);
832 			cfi_udelay(1);
833 			mutex_lock(&chip->mutex);
834 			/* Someone else might have been playing with it. */
835 			return -EAGAIN;
836 		}
837 		/* Fall through */
838 	case FL_READY:
839 	case FL_CFI_QUERY:
840 	case FL_JEDEC_QUERY:
841 		return 0;
842 
843 	case FL_ERASING:
844 		if (!cfip ||
845 		    !(cfip->FeatureSupport & 2) ||
846 		    !(mode == FL_READY || mode == FL_POINT ||
847 		     (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1))))
848 			goto sleep;
849 
850 		/* Do not allow suspend iff read/write to EB address */
851 		if ((adr & chip->in_progress_block_mask) ==
852 		    chip->in_progress_block_addr)
853 			goto sleep;
854 
855 		/* do not suspend small EBs, buggy Micron Chips */
856 		if (cfi_is_micron_28F00AP30(cfi, chip) &&
857 		    (chip->in_progress_block_mask == ~(0x8000-1)))
858 			goto sleep;
859 
860 		/* Erase suspend */
861 		map_write(map, CMD(0xB0), chip->in_progress_block_addr);
862 
863 		/* If the flash has finished erasing, then 'erase suspend'
864 		 * appears to make some (28F320) flash devices switch to
865 		 * 'read' mode.  Make sure that we switch to 'read status'
866 		 * mode so we get the right data. --rmk
867 		 */
868 		map_write(map, CMD(0x70), chip->in_progress_block_addr);
869 		chip->oldstate = FL_ERASING;
870 		chip->state = FL_ERASE_SUSPENDING;
871 		chip->erase_suspended = 1;
872 		for (;;) {
873 			status = map_read(map, chip->in_progress_block_addr);
874 			if (map_word_andequal(map, status, status_OK, status_OK))
875 			        break;
876 
877 			if (time_after(jiffies, timeo)) {
878 				/* Urgh. Resume and pretend we weren't here.
879 				 * Make sure we're in 'read status' mode if it had finished */
880 				put_chip(map, chip, adr);
881 				printk(KERN_ERR "%s: Chip not ready after erase "
882 				       "suspended: status = 0x%lx\n", map->name, status.x[0]);
883 				return -EIO;
884 			}
885 
886 			mutex_unlock(&chip->mutex);
887 			cfi_udelay(1);
888 			mutex_lock(&chip->mutex);
889 			/* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
890 			   So we can just loop here. */
891 		}
892 		chip->state = FL_STATUS;
893 		return 0;
894 
895 	case FL_XIP_WHILE_ERASING:
896 		if (mode != FL_READY && mode != FL_POINT &&
897 		    (mode != FL_WRITING || !cfip || !(cfip->SuspendCmdSupport&1)))
898 			goto sleep;
899 		chip->oldstate = chip->state;
900 		chip->state = FL_READY;
901 		return 0;
902 
903 	case FL_SHUTDOWN:
904 		/* The machine is rebooting now,so no one can get chip anymore */
905 		return -EIO;
906 	case FL_POINT:
907 		/* Only if there's no operation suspended... */
908 		if (mode == FL_READY && chip->oldstate == FL_READY)
909 			return 0;
910 		/* Fall through */
911 	default:
912 	sleep:
913 		set_current_state(TASK_UNINTERRUPTIBLE);
914 		add_wait_queue(&chip->wq, &wait);
915 		mutex_unlock(&chip->mutex);
916 		schedule();
917 		remove_wait_queue(&chip->wq, &wait);
918 		mutex_lock(&chip->mutex);
919 		return -EAGAIN;
920 	}
921 }
922 
get_chip(struct map_info * map,struct flchip * chip,unsigned long adr,int mode)923 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
924 {
925 	int ret;
926 	DECLARE_WAITQUEUE(wait, current);
927 
928  retry:
929 	if (chip->priv &&
930 	    (mode == FL_WRITING || mode == FL_ERASING || mode == FL_OTP_WRITE
931 	    || mode == FL_SHUTDOWN) && chip->state != FL_SYNCING) {
932 		/*
933 		 * OK. We have possibility for contention on the write/erase
934 		 * operations which are global to the real chip and not per
935 		 * partition.  So let's fight it over in the partition which
936 		 * currently has authority on the operation.
937 		 *
938 		 * The rules are as follows:
939 		 *
940 		 * - any write operation must own shared->writing.
941 		 *
942 		 * - any erase operation must own _both_ shared->writing and
943 		 *   shared->erasing.
944 		 *
945 		 * - contention arbitration is handled in the owner's context.
946 		 *
947 		 * The 'shared' struct can be read and/or written only when
948 		 * its lock is taken.
949 		 */
950 		struct flchip_shared *shared = chip->priv;
951 		struct flchip *contender;
952 		mutex_lock(&shared->lock);
953 		contender = shared->writing;
954 		if (contender && contender != chip) {
955 			/*
956 			 * The engine to perform desired operation on this
957 			 * partition is already in use by someone else.
958 			 * Let's fight over it in the context of the chip
959 			 * currently using it.  If it is possible to suspend,
960 			 * that other partition will do just that, otherwise
961 			 * it'll happily send us to sleep.  In any case, when
962 			 * get_chip returns success we're clear to go ahead.
963 			 */
964 			ret = mutex_trylock(&contender->mutex);
965 			mutex_unlock(&shared->lock);
966 			if (!ret)
967 				goto retry;
968 			mutex_unlock(&chip->mutex);
969 			ret = chip_ready(map, contender, contender->start, mode);
970 			mutex_lock(&chip->mutex);
971 
972 			if (ret == -EAGAIN) {
973 				mutex_unlock(&contender->mutex);
974 				goto retry;
975 			}
976 			if (ret) {
977 				mutex_unlock(&contender->mutex);
978 				return ret;
979 			}
980 			mutex_lock(&shared->lock);
981 
982 			/* We should not own chip if it is already
983 			 * in FL_SYNCING state. Put contender and retry. */
984 			if (chip->state == FL_SYNCING) {
985 				put_chip(map, contender, contender->start);
986 				mutex_unlock(&contender->mutex);
987 				goto retry;
988 			}
989 			mutex_unlock(&contender->mutex);
990 		}
991 
992 		/* Check if we already have suspended erase
993 		 * on this chip. Sleep. */
994 		if (mode == FL_ERASING && shared->erasing
995 		    && shared->erasing->oldstate == FL_ERASING) {
996 			mutex_unlock(&shared->lock);
997 			set_current_state(TASK_UNINTERRUPTIBLE);
998 			add_wait_queue(&chip->wq, &wait);
999 			mutex_unlock(&chip->mutex);
1000 			schedule();
1001 			remove_wait_queue(&chip->wq, &wait);
1002 			mutex_lock(&chip->mutex);
1003 			goto retry;
1004 		}
1005 
1006 		/* We now own it */
1007 		shared->writing = chip;
1008 		if (mode == FL_ERASING)
1009 			shared->erasing = chip;
1010 		mutex_unlock(&shared->lock);
1011 	}
1012 	ret = chip_ready(map, chip, adr, mode);
1013 	if (ret == -EAGAIN)
1014 		goto retry;
1015 
1016 	return ret;
1017 }
1018 
put_chip(struct map_info * map,struct flchip * chip,unsigned long adr)1019 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
1020 {
1021 	struct cfi_private *cfi = map->fldrv_priv;
1022 
1023 	if (chip->priv) {
1024 		struct flchip_shared *shared = chip->priv;
1025 		mutex_lock(&shared->lock);
1026 		if (shared->writing == chip && chip->oldstate == FL_READY) {
1027 			/* We own the ability to write, but we're done */
1028 			shared->writing = shared->erasing;
1029 			if (shared->writing && shared->writing != chip) {
1030 				/* give back ownership to who we loaned it from */
1031 				struct flchip *loaner = shared->writing;
1032 				mutex_lock(&loaner->mutex);
1033 				mutex_unlock(&shared->lock);
1034 				mutex_unlock(&chip->mutex);
1035 				put_chip(map, loaner, loaner->start);
1036 				mutex_lock(&chip->mutex);
1037 				mutex_unlock(&loaner->mutex);
1038 				wake_up(&chip->wq);
1039 				return;
1040 			}
1041 			shared->erasing = NULL;
1042 			shared->writing = NULL;
1043 		} else if (shared->erasing == chip && shared->writing != chip) {
1044 			/*
1045 			 * We own the ability to erase without the ability
1046 			 * to write, which means the erase was suspended
1047 			 * and some other partition is currently writing.
1048 			 * Don't let the switch below mess things up since
1049 			 * we don't have ownership to resume anything.
1050 			 */
1051 			mutex_unlock(&shared->lock);
1052 			wake_up(&chip->wq);
1053 			return;
1054 		}
1055 		mutex_unlock(&shared->lock);
1056 	}
1057 
1058 	switch(chip->oldstate) {
1059 	case FL_ERASING:
1060 		/* What if one interleaved chip has finished and the
1061 		   other hasn't? The old code would leave the finished
1062 		   one in READY mode. That's bad, and caused -EROFS
1063 		   errors to be returned from do_erase_oneblock because
1064 		   that's the only bit it checked for at the time.
1065 		   As the state machine appears to explicitly allow
1066 		   sending the 0x70 (Read Status) command to an erasing
1067 		   chip and expecting it to be ignored, that's what we
1068 		   do. */
1069 		map_write(map, CMD(0xd0), chip->in_progress_block_addr);
1070 		map_write(map, CMD(0x70), chip->in_progress_block_addr);
1071 		chip->oldstate = FL_READY;
1072 		chip->state = FL_ERASING;
1073 		break;
1074 
1075 	case FL_XIP_WHILE_ERASING:
1076 		chip->state = chip->oldstate;
1077 		chip->oldstate = FL_READY;
1078 		break;
1079 
1080 	case FL_READY:
1081 	case FL_STATUS:
1082 	case FL_JEDEC_QUERY:
1083 		break;
1084 	default:
1085 		printk(KERN_ERR "%s: put_chip() called with oldstate %d!!\n", map->name, chip->oldstate);
1086 	}
1087 	wake_up(&chip->wq);
1088 }
1089 
1090 #ifdef CONFIG_MTD_XIP
1091 
1092 /*
1093  * No interrupt what so ever can be serviced while the flash isn't in array
1094  * mode.  This is ensured by the xip_disable() and xip_enable() functions
1095  * enclosing any code path where the flash is known not to be in array mode.
1096  * And within a XIP disabled code path, only functions marked with __xipram
1097  * may be called and nothing else (it's a good thing to inspect generated
1098  * assembly to make sure inline functions were actually inlined and that gcc
1099  * didn't emit calls to its own support functions). Also configuring MTD CFI
1100  * support to a single buswidth and a single interleave is also recommended.
1101  */
1102 
xip_disable(struct map_info * map,struct flchip * chip,unsigned long adr)1103 static void xip_disable(struct map_info *map, struct flchip *chip,
1104 			unsigned long adr)
1105 {
1106 	/* TODO: chips with no XIP use should ignore and return */
1107 	(void) map_read(map, adr); /* ensure mmu mapping is up to date */
1108 	local_irq_disable();
1109 }
1110 
xip_enable(struct map_info * map,struct flchip * chip,unsigned long adr)1111 static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
1112 				unsigned long adr)
1113 {
1114 	struct cfi_private *cfi = map->fldrv_priv;
1115 	if (chip->state != FL_POINT && chip->state != FL_READY) {
1116 		map_write(map, CMD(0xff), adr);
1117 		chip->state = FL_READY;
1118 	}
1119 	(void) map_read(map, adr);
1120 	xip_iprefetch();
1121 	local_irq_enable();
1122 }
1123 
1124 /*
1125  * When a delay is required for the flash operation to complete, the
1126  * xip_wait_for_operation() function is polling for both the given timeout
1127  * and pending (but still masked) hardware interrupts.  Whenever there is an
1128  * interrupt pending then the flash erase or write operation is suspended,
1129  * array mode restored and interrupts unmasked.  Task scheduling might also
1130  * happen at that point.  The CPU eventually returns from the interrupt or
1131  * the call to schedule() and the suspended flash operation is resumed for
1132  * the remaining of the delay period.
1133  *
1134  * Warning: this function _will_ fool interrupt latency tracing tools.
1135  */
1136 
xip_wait_for_operation(struct map_info * map,struct flchip * chip,unsigned long adr,unsigned int chip_op_time_max)1137 static int __xipram xip_wait_for_operation(
1138 		struct map_info *map, struct flchip *chip,
1139 		unsigned long adr, unsigned int chip_op_time_max)
1140 {
1141 	struct cfi_private *cfi = map->fldrv_priv;
1142 	struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
1143 	map_word status, OK = CMD(0x80);
1144 	unsigned long usec, suspended, start, done;
1145 	flstate_t oldstate, newstate;
1146 
1147        	start = xip_currtime();
1148 	usec = chip_op_time_max;
1149 	if (usec == 0)
1150 		usec = 500000;
1151 	done = 0;
1152 
1153 	do {
1154 		cpu_relax();
1155 		if (xip_irqpending() && cfip &&
1156 		    ((chip->state == FL_ERASING && (cfip->FeatureSupport&2)) ||
1157 		     (chip->state == FL_WRITING && (cfip->FeatureSupport&4))) &&
1158 		    (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
1159 			/*
1160 			 * Let's suspend the erase or write operation when
1161 			 * supported.  Note that we currently don't try to
1162 			 * suspend interleaved chips if there is already
1163 			 * another operation suspended (imagine what happens
1164 			 * when one chip was already done with the current
1165 			 * operation while another chip suspended it, then
1166 			 * we resume the whole thing at once).  Yes, it
1167 			 * can happen!
1168 			 */
1169 			usec -= done;
1170 			map_write(map, CMD(0xb0), adr);
1171 			map_write(map, CMD(0x70), adr);
1172 			suspended = xip_currtime();
1173 			do {
1174 				if (xip_elapsed_since(suspended) > 100000) {
1175 					/*
1176 					 * The chip doesn't want to suspend
1177 					 * after waiting for 100 msecs.
1178 					 * This is a critical error but there
1179 					 * is not much we can do here.
1180 					 */
1181 					return -EIO;
1182 				}
1183 				status = map_read(map, adr);
1184 			} while (!map_word_andequal(map, status, OK, OK));
1185 
1186 			/* Suspend succeeded */
1187 			oldstate = chip->state;
1188 			if (oldstate == FL_ERASING) {
1189 				if (!map_word_bitsset(map, status, CMD(0x40)))
1190 					break;
1191 				newstate = FL_XIP_WHILE_ERASING;
1192 				chip->erase_suspended = 1;
1193 			} else {
1194 				if (!map_word_bitsset(map, status, CMD(0x04)))
1195 					break;
1196 				newstate = FL_XIP_WHILE_WRITING;
1197 				chip->write_suspended = 1;
1198 			}
1199 			chip->state = newstate;
1200 			map_write(map, CMD(0xff), adr);
1201 			(void) map_read(map, adr);
1202 			xip_iprefetch();
1203 			local_irq_enable();
1204 			mutex_unlock(&chip->mutex);
1205 			xip_iprefetch();
1206 			cond_resched();
1207 
1208 			/*
1209 			 * We're back.  However someone else might have
1210 			 * decided to go write to the chip if we are in
1211 			 * a suspended erase state.  If so let's wait
1212 			 * until it's done.
1213 			 */
1214 			mutex_lock(&chip->mutex);
1215 			while (chip->state != newstate) {
1216 				DECLARE_WAITQUEUE(wait, current);
1217 				set_current_state(TASK_UNINTERRUPTIBLE);
1218 				add_wait_queue(&chip->wq, &wait);
1219 				mutex_unlock(&chip->mutex);
1220 				schedule();
1221 				remove_wait_queue(&chip->wq, &wait);
1222 				mutex_lock(&chip->mutex);
1223 			}
1224 			/* Disallow XIP again */
1225 			local_irq_disable();
1226 
1227 			/* Resume the write or erase operation */
1228 			map_write(map, CMD(0xd0), adr);
1229 			map_write(map, CMD(0x70), adr);
1230 			chip->state = oldstate;
1231 			start = xip_currtime();
1232 		} else if (usec >= 1000000/HZ) {
1233 			/*
1234 			 * Try to save on CPU power when waiting delay
1235 			 * is at least a system timer tick period.
1236 			 * No need to be extremely accurate here.
1237 			 */
1238 			xip_cpu_idle();
1239 		}
1240 		status = map_read(map, adr);
1241 		done = xip_elapsed_since(start);
1242 	} while (!map_word_andequal(map, status, OK, OK)
1243 		 && done < usec);
1244 
1245 	return (done >= usec) ? -ETIME : 0;
1246 }
1247 
1248 /*
1249  * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
1250  * the flash is actively programming or erasing since we have to poll for
1251  * the operation to complete anyway.  We can't do that in a generic way with
1252  * a XIP setup so do it before the actual flash operation in this case
1253  * and stub it out from INVAL_CACHE_AND_WAIT.
1254  */
1255 #define XIP_INVAL_CACHED_RANGE(map, from, size)  \
1256 	INVALIDATE_CACHED_RANGE(map, from, size)
1257 
1258 #define INVAL_CACHE_AND_WAIT(map, chip, cmd_adr, inval_adr, inval_len, usec, usec_max) \
1259 	xip_wait_for_operation(map, chip, cmd_adr, usec_max)
1260 
1261 #else
1262 
1263 #define xip_disable(map, chip, adr)
1264 #define xip_enable(map, chip, adr)
1265 #define XIP_INVAL_CACHED_RANGE(x...)
1266 #define INVAL_CACHE_AND_WAIT inval_cache_and_wait_for_operation
1267 
inval_cache_and_wait_for_operation(struct map_info * map,struct flchip * chip,unsigned long cmd_adr,unsigned long inval_adr,int inval_len,unsigned int chip_op_time,unsigned int chip_op_time_max)1268 static int inval_cache_and_wait_for_operation(
1269 		struct map_info *map, struct flchip *chip,
1270 		unsigned long cmd_adr, unsigned long inval_adr, int inval_len,
1271 		unsigned int chip_op_time, unsigned int chip_op_time_max)
1272 {
1273 	struct cfi_private *cfi = map->fldrv_priv;
1274 	map_word status, status_OK = CMD(0x80);
1275 	int chip_state = chip->state;
1276 	unsigned int timeo, sleep_time, reset_timeo;
1277 
1278 	mutex_unlock(&chip->mutex);
1279 	if (inval_len)
1280 		INVALIDATE_CACHED_RANGE(map, inval_adr, inval_len);
1281 	mutex_lock(&chip->mutex);
1282 
1283 	timeo = chip_op_time_max;
1284 	if (!timeo)
1285 		timeo = 500000;
1286 	reset_timeo = timeo;
1287 	sleep_time = chip_op_time / 2;
1288 
1289 	for (;;) {
1290 		if (chip->state != chip_state) {
1291 			/* Someone's suspended the operation: sleep */
1292 			DECLARE_WAITQUEUE(wait, current);
1293 			set_current_state(TASK_UNINTERRUPTIBLE);
1294 			add_wait_queue(&chip->wq, &wait);
1295 			mutex_unlock(&chip->mutex);
1296 			schedule();
1297 			remove_wait_queue(&chip->wq, &wait);
1298 			mutex_lock(&chip->mutex);
1299 			continue;
1300 		}
1301 
1302 		status = map_read(map, cmd_adr);
1303 		if (map_word_andequal(map, status, status_OK, status_OK))
1304 			break;
1305 
1306 		if (chip->erase_suspended && chip_state == FL_ERASING)  {
1307 			/* Erase suspend occurred while sleep: reset timeout */
1308 			timeo = reset_timeo;
1309 			chip->erase_suspended = 0;
1310 		}
1311 		if (chip->write_suspended && chip_state == FL_WRITING)  {
1312 			/* Write suspend occurred while sleep: reset timeout */
1313 			timeo = reset_timeo;
1314 			chip->write_suspended = 0;
1315 		}
1316 		if (!timeo) {
1317 			map_write(map, CMD(0x70), cmd_adr);
1318 			chip->state = FL_STATUS;
1319 			return -ETIME;
1320 		}
1321 
1322 		/* OK Still waiting. Drop the lock, wait a while and retry. */
1323 		mutex_unlock(&chip->mutex);
1324 		if (sleep_time >= 1000000/HZ) {
1325 			/*
1326 			 * Half of the normal delay still remaining
1327 			 * can be performed with a sleeping delay instead
1328 			 * of busy waiting.
1329 			 */
1330 			msleep(sleep_time/1000);
1331 			timeo -= sleep_time;
1332 			sleep_time = 1000000/HZ;
1333 		} else {
1334 			udelay(1);
1335 			cond_resched();
1336 			timeo--;
1337 		}
1338 		mutex_lock(&chip->mutex);
1339 	}
1340 
1341 	/* Done and happy. */
1342  	chip->state = FL_STATUS;
1343 	return 0;
1344 }
1345 
1346 #endif
1347 
1348 #define WAIT_TIMEOUT(map, chip, adr, udelay, udelay_max) \
1349 	INVAL_CACHE_AND_WAIT(map, chip, adr, 0, 0, udelay, udelay_max);
1350 
1351 
do_point_onechip(struct map_info * map,struct flchip * chip,loff_t adr,size_t len)1352 static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len)
1353 {
1354 	unsigned long cmd_addr;
1355 	struct cfi_private *cfi = map->fldrv_priv;
1356 	int ret = 0;
1357 
1358 	adr += chip->start;
1359 
1360 	/* Ensure cmd read/writes are aligned. */
1361 	cmd_addr = adr & ~(map_bankwidth(map)-1);
1362 
1363 	mutex_lock(&chip->mutex);
1364 
1365 	ret = get_chip(map, chip, cmd_addr, FL_POINT);
1366 
1367 	if (!ret) {
1368 		if (chip->state != FL_POINT && chip->state != FL_READY)
1369 			map_write(map, CMD(0xff), cmd_addr);
1370 
1371 		chip->state = FL_POINT;
1372 		chip->ref_point_counter++;
1373 	}
1374 	mutex_unlock(&chip->mutex);
1375 
1376 	return ret;
1377 }
1378 
cfi_intelext_point(struct mtd_info * mtd,loff_t from,size_t len,size_t * retlen,void ** virt,resource_size_t * phys)1379 static int cfi_intelext_point(struct mtd_info *mtd, loff_t from, size_t len,
1380 		size_t *retlen, void **virt, resource_size_t *phys)
1381 {
1382 	struct map_info *map = mtd->priv;
1383 	struct cfi_private *cfi = map->fldrv_priv;
1384 	unsigned long ofs, last_end = 0;
1385 	int chipnum;
1386 	int ret = 0;
1387 
1388 	if (!map->virt)
1389 		return -EINVAL;
1390 
1391 	/* Now lock the chip(s) to POINT state */
1392 
1393 	/* ofs: offset within the first chip that the first read should start */
1394 	chipnum = (from >> cfi->chipshift);
1395 	ofs = from - (chipnum << cfi->chipshift);
1396 
1397 	*virt = map->virt + cfi->chips[chipnum].start + ofs;
1398 	if (phys)
1399 		*phys = map->phys + cfi->chips[chipnum].start + ofs;
1400 
1401 	while (len) {
1402 		unsigned long thislen;
1403 
1404 		if (chipnum >= cfi->numchips)
1405 			break;
1406 
1407 		/* We cannot point across chips that are virtually disjoint */
1408 		if (!last_end)
1409 			last_end = cfi->chips[chipnum].start;
1410 		else if (cfi->chips[chipnum].start != last_end)
1411 			break;
1412 
1413 		if ((len + ofs -1) >> cfi->chipshift)
1414 			thislen = (1<<cfi->chipshift) - ofs;
1415 		else
1416 			thislen = len;
1417 
1418 		ret = do_point_onechip(map, &cfi->chips[chipnum], ofs, thislen);
1419 		if (ret)
1420 			break;
1421 
1422 		*retlen += thislen;
1423 		len -= thislen;
1424 
1425 		ofs = 0;
1426 		last_end += 1 << cfi->chipshift;
1427 		chipnum++;
1428 	}
1429 	return 0;
1430 }
1431 
cfi_intelext_unpoint(struct mtd_info * mtd,loff_t from,size_t len)1432 static int cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
1433 {
1434 	struct map_info *map = mtd->priv;
1435 	struct cfi_private *cfi = map->fldrv_priv;
1436 	unsigned long ofs;
1437 	int chipnum, err = 0;
1438 
1439 	/* Now unlock the chip(s) POINT state */
1440 
1441 	/* ofs: offset within the first chip that the first read should start */
1442 	chipnum = (from >> cfi->chipshift);
1443 	ofs = from - (chipnum <<  cfi->chipshift);
1444 
1445 	while (len && !err) {
1446 		unsigned long thislen;
1447 		struct flchip *chip;
1448 
1449 		chip = &cfi->chips[chipnum];
1450 		if (chipnum >= cfi->numchips)
1451 			break;
1452 
1453 		if ((len + ofs -1) >> cfi->chipshift)
1454 			thislen = (1<<cfi->chipshift) - ofs;
1455 		else
1456 			thislen = len;
1457 
1458 		mutex_lock(&chip->mutex);
1459 		if (chip->state == FL_POINT) {
1460 			chip->ref_point_counter--;
1461 			if(chip->ref_point_counter == 0)
1462 				chip->state = FL_READY;
1463 		} else {
1464 			printk(KERN_ERR "%s: Error: unpoint called on non pointed region\n", map->name);
1465 			err = -EINVAL;
1466 		}
1467 
1468 		put_chip(map, chip, chip->start);
1469 		mutex_unlock(&chip->mutex);
1470 
1471 		len -= thislen;
1472 		ofs = 0;
1473 		chipnum++;
1474 	}
1475 
1476 	return err;
1477 }
1478 
do_read_onechip(struct map_info * map,struct flchip * chip,loff_t adr,size_t len,u_char * buf)1479 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1480 {
1481 	unsigned long cmd_addr;
1482 	struct cfi_private *cfi = map->fldrv_priv;
1483 	int ret;
1484 
1485 	adr += chip->start;
1486 
1487 	/* Ensure cmd read/writes are aligned. */
1488 	cmd_addr = adr & ~(map_bankwidth(map)-1);
1489 
1490 	mutex_lock(&chip->mutex);
1491 	ret = get_chip(map, chip, cmd_addr, FL_READY);
1492 	if (ret) {
1493 		mutex_unlock(&chip->mutex);
1494 		return ret;
1495 	}
1496 
1497 	if (chip->state != FL_POINT && chip->state != FL_READY) {
1498 		map_write(map, CMD(0xff), cmd_addr);
1499 
1500 		chip->state = FL_READY;
1501 	}
1502 
1503 	map_copy_from(map, buf, adr, len);
1504 
1505 	put_chip(map, chip, cmd_addr);
1506 
1507 	mutex_unlock(&chip->mutex);
1508 	return 0;
1509 }
1510 
cfi_intelext_read(struct mtd_info * mtd,loff_t from,size_t len,size_t * retlen,u_char * buf)1511 static int cfi_intelext_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1512 {
1513 	struct map_info *map = mtd->priv;
1514 	struct cfi_private *cfi = map->fldrv_priv;
1515 	unsigned long ofs;
1516 	int chipnum;
1517 	int ret = 0;
1518 
1519 	/* ofs: offset within the first chip that the first read should start */
1520 	chipnum = (from >> cfi->chipshift);
1521 	ofs = from - (chipnum <<  cfi->chipshift);
1522 
1523 	while (len) {
1524 		unsigned long thislen;
1525 
1526 		if (chipnum >= cfi->numchips)
1527 			break;
1528 
1529 		if ((len + ofs -1) >> cfi->chipshift)
1530 			thislen = (1<<cfi->chipshift) - ofs;
1531 		else
1532 			thislen = len;
1533 
1534 		ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1535 		if (ret)
1536 			break;
1537 
1538 		*retlen += thislen;
1539 		len -= thislen;
1540 		buf += thislen;
1541 
1542 		ofs = 0;
1543 		chipnum++;
1544 	}
1545 	return ret;
1546 }
1547 
do_write_oneword(struct map_info * map,struct flchip * chip,unsigned long adr,map_word datum,int mode)1548 static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1549 				     unsigned long adr, map_word datum, int mode)
1550 {
1551 	struct cfi_private *cfi = map->fldrv_priv;
1552 	map_word status, write_cmd;
1553 	int ret=0;
1554 
1555 	adr += chip->start;
1556 
1557 	switch (mode) {
1558 	case FL_WRITING:
1559 		write_cmd = (cfi->cfiq->P_ID != P_ID_INTEL_PERFORMANCE) ? CMD(0x40) : CMD(0x41);
1560 		break;
1561 	case FL_OTP_WRITE:
1562 		write_cmd = CMD(0xc0);
1563 		break;
1564 	default:
1565 		return -EINVAL;
1566 	}
1567 
1568 	mutex_lock(&chip->mutex);
1569 	ret = get_chip(map, chip, adr, mode);
1570 	if (ret) {
1571 		mutex_unlock(&chip->mutex);
1572 		return ret;
1573 	}
1574 
1575 	XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1576 	ENABLE_VPP(map);
1577 	xip_disable(map, chip, adr);
1578 	map_write(map, write_cmd, adr);
1579 	map_write(map, datum, adr);
1580 	chip->state = mode;
1581 
1582 	ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1583 				   adr, map_bankwidth(map),
1584 				   chip->word_write_time,
1585 				   chip->word_write_time_max);
1586 	if (ret) {
1587 		xip_enable(map, chip, adr);
1588 		printk(KERN_ERR "%s: word write error (status timeout)\n", map->name);
1589 		goto out;
1590 	}
1591 
1592 	/* check for errors */
1593 	status = map_read(map, adr);
1594 	if (map_word_bitsset(map, status, CMD(0x1a))) {
1595 		unsigned long chipstatus = MERGESTATUS(status);
1596 
1597 		/* reset status */
1598 		map_write(map, CMD(0x50), adr);
1599 		map_write(map, CMD(0x70), adr);
1600 		xip_enable(map, chip, adr);
1601 
1602 		if (chipstatus & 0x02) {
1603 			ret = -EROFS;
1604 		} else if (chipstatus & 0x08) {
1605 			printk(KERN_ERR "%s: word write error (bad VPP)\n", map->name);
1606 			ret = -EIO;
1607 		} else {
1608 			printk(KERN_ERR "%s: word write error (status 0x%lx)\n", map->name, chipstatus);
1609 			ret = -EINVAL;
1610 		}
1611 
1612 		goto out;
1613 	}
1614 
1615 	xip_enable(map, chip, adr);
1616  out:	DISABLE_VPP(map);
1617 	put_chip(map, chip, adr);
1618 	mutex_unlock(&chip->mutex);
1619 	return ret;
1620 }
1621 
1622 
cfi_intelext_write_words(struct mtd_info * mtd,loff_t to,size_t len,size_t * retlen,const u_char * buf)1623 static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t len, size_t *retlen, const u_char *buf)
1624 {
1625 	struct map_info *map = mtd->priv;
1626 	struct cfi_private *cfi = map->fldrv_priv;
1627 	int ret = 0;
1628 	int chipnum;
1629 	unsigned long ofs;
1630 
1631 	chipnum = to >> cfi->chipshift;
1632 	ofs = to  - (chipnum << cfi->chipshift);
1633 
1634 	/* If it's not bus-aligned, do the first byte write */
1635 	if (ofs & (map_bankwidth(map)-1)) {
1636 		unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1637 		int gap = ofs - bus_ofs;
1638 		int n;
1639 		map_word datum;
1640 
1641 		n = min_t(int, len, map_bankwidth(map)-gap);
1642 		datum = map_word_ff(map);
1643 		datum = map_word_load_partial(map, datum, buf, gap, n);
1644 
1645 		ret = do_write_oneword(map, &cfi->chips[chipnum],
1646 					       bus_ofs, datum, FL_WRITING);
1647 		if (ret)
1648 			return ret;
1649 
1650 		len -= n;
1651 		ofs += n;
1652 		buf += n;
1653 		(*retlen) += n;
1654 
1655 		if (ofs >> cfi->chipshift) {
1656 			chipnum ++;
1657 			ofs = 0;
1658 			if (chipnum == cfi->numchips)
1659 				return 0;
1660 		}
1661 	}
1662 
1663 	while(len >= map_bankwidth(map)) {
1664 		map_word datum = map_word_load(map, buf);
1665 
1666 		ret = do_write_oneword(map, &cfi->chips[chipnum],
1667 				       ofs, datum, FL_WRITING);
1668 		if (ret)
1669 			return ret;
1670 
1671 		ofs += map_bankwidth(map);
1672 		buf += map_bankwidth(map);
1673 		(*retlen) += map_bankwidth(map);
1674 		len -= map_bankwidth(map);
1675 
1676 		if (ofs >> cfi->chipshift) {
1677 			chipnum ++;
1678 			ofs = 0;
1679 			if (chipnum == cfi->numchips)
1680 				return 0;
1681 		}
1682 	}
1683 
1684 	if (len & (map_bankwidth(map)-1)) {
1685 		map_word datum;
1686 
1687 		datum = map_word_ff(map);
1688 		datum = map_word_load_partial(map, datum, buf, 0, len);
1689 
1690 		ret = do_write_oneword(map, &cfi->chips[chipnum],
1691 				       ofs, datum, FL_WRITING);
1692 		if (ret)
1693 			return ret;
1694 
1695 		(*retlen) += len;
1696 	}
1697 
1698 	return 0;
1699 }
1700 
1701 
do_write_buffer(struct map_info * map,struct flchip * chip,unsigned long adr,const struct kvec ** pvec,unsigned long * pvec_seek,int len)1702 static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1703 				    unsigned long adr, const struct kvec **pvec,
1704 				    unsigned long *pvec_seek, int len)
1705 {
1706 	struct cfi_private *cfi = map->fldrv_priv;
1707 	map_word status, write_cmd, datum;
1708 	unsigned long cmd_adr;
1709 	int ret, wbufsize, word_gap, words;
1710 	const struct kvec *vec;
1711 	unsigned long vec_seek;
1712 	unsigned long initial_adr;
1713 	int initial_len = len;
1714 
1715 	wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1716 	adr += chip->start;
1717 	initial_adr = adr;
1718 	cmd_adr = adr & ~(wbufsize-1);
1719 
1720 	/* Sharp LH28F640BF chips need the first address for the
1721 	 * Page Buffer Program command. See Table 5 of
1722 	 * LH28F320BF, LH28F640BF, LH28F128BF Series (Appendix FUM00701) */
1723 	if (is_LH28F640BF(cfi))
1724 		cmd_adr = adr;
1725 
1726 	/* Let's determine this according to the interleave only once */
1727 	write_cmd = (cfi->cfiq->P_ID != P_ID_INTEL_PERFORMANCE) ? CMD(0xe8) : CMD(0xe9);
1728 
1729 	mutex_lock(&chip->mutex);
1730 	ret = get_chip(map, chip, cmd_adr, FL_WRITING);
1731 	if (ret) {
1732 		mutex_unlock(&chip->mutex);
1733 		return ret;
1734 	}
1735 
1736 	XIP_INVAL_CACHED_RANGE(map, initial_adr, initial_len);
1737 	ENABLE_VPP(map);
1738 	xip_disable(map, chip, cmd_adr);
1739 
1740 	/* §4.8 of the 28FxxxJ3A datasheet says "Any time SR.4 and/or SR.5 is set
1741 	   [...], the device will not accept any more Write to Buffer commands".
1742 	   So we must check here and reset those bits if they're set. Otherwise
1743 	   we're just pissing in the wind */
1744 	if (chip->state != FL_STATUS) {
1745 		map_write(map, CMD(0x70), cmd_adr);
1746 		chip->state = FL_STATUS;
1747 	}
1748 	status = map_read(map, cmd_adr);
1749 	if (map_word_bitsset(map, status, CMD(0x30))) {
1750 		xip_enable(map, chip, cmd_adr);
1751 		printk(KERN_WARNING "SR.4 or SR.5 bits set in buffer write (status %lx). Clearing.\n", status.x[0]);
1752 		xip_disable(map, chip, cmd_adr);
1753 		map_write(map, CMD(0x50), cmd_adr);
1754 		map_write(map, CMD(0x70), cmd_adr);
1755 	}
1756 
1757 	chip->state = FL_WRITING_TO_BUFFER;
1758 	map_write(map, write_cmd, cmd_adr);
1759 	ret = WAIT_TIMEOUT(map, chip, cmd_adr, 0, 0);
1760 	if (ret) {
1761 		/* Argh. Not ready for write to buffer */
1762 		map_word Xstatus = map_read(map, cmd_adr);
1763 		map_write(map, CMD(0x70), cmd_adr);
1764 		chip->state = FL_STATUS;
1765 		status = map_read(map, cmd_adr);
1766 		map_write(map, CMD(0x50), cmd_adr);
1767 		map_write(map, CMD(0x70), cmd_adr);
1768 		xip_enable(map, chip, cmd_adr);
1769 		printk(KERN_ERR "%s: Chip not ready for buffer write. Xstatus = %lx, status = %lx\n",
1770 				map->name, Xstatus.x[0], status.x[0]);
1771 		goto out;
1772 	}
1773 
1774 	/* Figure out the number of words to write */
1775 	word_gap = (-adr & (map_bankwidth(map)-1));
1776 	words = DIV_ROUND_UP(len - word_gap, map_bankwidth(map));
1777 	if (!word_gap) {
1778 		words--;
1779 	} else {
1780 		word_gap = map_bankwidth(map) - word_gap;
1781 		adr -= word_gap;
1782 		datum = map_word_ff(map);
1783 	}
1784 
1785 	/* Write length of data to come */
1786 	map_write(map, CMD(words), cmd_adr );
1787 
1788 	/* Write data */
1789 	vec = *pvec;
1790 	vec_seek = *pvec_seek;
1791 	do {
1792 		int n = map_bankwidth(map) - word_gap;
1793 		if (n > vec->iov_len - vec_seek)
1794 			n = vec->iov_len - vec_seek;
1795 		if (n > len)
1796 			n = len;
1797 
1798 		if (!word_gap && len < map_bankwidth(map))
1799 			datum = map_word_ff(map);
1800 
1801 		datum = map_word_load_partial(map, datum,
1802 					      vec->iov_base + vec_seek,
1803 					      word_gap, n);
1804 
1805 		len -= n;
1806 		word_gap += n;
1807 		if (!len || word_gap == map_bankwidth(map)) {
1808 			map_write(map, datum, adr);
1809 			adr += map_bankwidth(map);
1810 			word_gap = 0;
1811 		}
1812 
1813 		vec_seek += n;
1814 		if (vec_seek == vec->iov_len) {
1815 			vec++;
1816 			vec_seek = 0;
1817 		}
1818 	} while (len);
1819 	*pvec = vec;
1820 	*pvec_seek = vec_seek;
1821 
1822 	/* GO GO GO */
1823 	map_write(map, CMD(0xd0), cmd_adr);
1824 	chip->state = FL_WRITING;
1825 
1826 	ret = INVAL_CACHE_AND_WAIT(map, chip, cmd_adr,
1827 				   initial_adr, initial_len,
1828 				   chip->buffer_write_time,
1829 				   chip->buffer_write_time_max);
1830 	if (ret) {
1831 		map_write(map, CMD(0x70), cmd_adr);
1832 		chip->state = FL_STATUS;
1833 		xip_enable(map, chip, cmd_adr);
1834 		printk(KERN_ERR "%s: buffer write error (status timeout)\n", map->name);
1835 		goto out;
1836 	}
1837 
1838 	/* check for errors */
1839 	status = map_read(map, cmd_adr);
1840 	if (map_word_bitsset(map, status, CMD(0x1a))) {
1841 		unsigned long chipstatus = MERGESTATUS(status);
1842 
1843 		/* reset status */
1844 		map_write(map, CMD(0x50), cmd_adr);
1845 		map_write(map, CMD(0x70), cmd_adr);
1846 		xip_enable(map, chip, cmd_adr);
1847 
1848 		if (chipstatus & 0x02) {
1849 			ret = -EROFS;
1850 		} else if (chipstatus & 0x08) {
1851 			printk(KERN_ERR "%s: buffer write error (bad VPP)\n", map->name);
1852 			ret = -EIO;
1853 		} else {
1854 			printk(KERN_ERR "%s: buffer write error (status 0x%lx)\n", map->name, chipstatus);
1855 			ret = -EINVAL;
1856 		}
1857 
1858 		goto out;
1859 	}
1860 
1861 	xip_enable(map, chip, cmd_adr);
1862  out:	DISABLE_VPP(map);
1863 	put_chip(map, chip, cmd_adr);
1864 	mutex_unlock(&chip->mutex);
1865 	return ret;
1866 }
1867 
cfi_intelext_writev(struct mtd_info * mtd,const struct kvec * vecs,unsigned long count,loff_t to,size_t * retlen)1868 static int cfi_intelext_writev (struct mtd_info *mtd, const struct kvec *vecs,
1869 				unsigned long count, loff_t to, size_t *retlen)
1870 {
1871 	struct map_info *map = mtd->priv;
1872 	struct cfi_private *cfi = map->fldrv_priv;
1873 	int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1874 	int ret = 0;
1875 	int chipnum;
1876 	unsigned long ofs, vec_seek, i;
1877 	size_t len = 0;
1878 
1879 	for (i = 0; i < count; i++)
1880 		len += vecs[i].iov_len;
1881 
1882 	if (!len)
1883 		return 0;
1884 
1885 	chipnum = to >> cfi->chipshift;
1886 	ofs = to - (chipnum << cfi->chipshift);
1887 	vec_seek = 0;
1888 
1889 	do {
1890 		/* We must not cross write block boundaries */
1891 		int size = wbufsize - (ofs & (wbufsize-1));
1892 
1893 		if (size > len)
1894 			size = len;
1895 		ret = do_write_buffer(map, &cfi->chips[chipnum],
1896 				      ofs, &vecs, &vec_seek, size);
1897 		if (ret)
1898 			return ret;
1899 
1900 		ofs += size;
1901 		(*retlen) += size;
1902 		len -= size;
1903 
1904 		if (ofs >> cfi->chipshift) {
1905 			chipnum ++;
1906 			ofs = 0;
1907 			if (chipnum == cfi->numchips)
1908 				return 0;
1909 		}
1910 
1911 		/* Be nice and reschedule with the chip in a usable state for other
1912 		   processes. */
1913 		cond_resched();
1914 
1915 	} while (len);
1916 
1917 	return 0;
1918 }
1919 
cfi_intelext_write_buffers(struct mtd_info * mtd,loff_t to,size_t len,size_t * retlen,const u_char * buf)1920 static int cfi_intelext_write_buffers (struct mtd_info *mtd, loff_t to,
1921 				       size_t len, size_t *retlen, const u_char *buf)
1922 {
1923 	struct kvec vec;
1924 
1925 	vec.iov_base = (void *) buf;
1926 	vec.iov_len = len;
1927 
1928 	return cfi_intelext_writev(mtd, &vec, 1, to, retlen);
1929 }
1930 
do_erase_oneblock(struct map_info * map,struct flchip * chip,unsigned long adr,int len,void * thunk)1931 static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1932 				      unsigned long adr, int len, void *thunk)
1933 {
1934 	struct cfi_private *cfi = map->fldrv_priv;
1935 	map_word status;
1936 	int retries = 3;
1937 	int ret;
1938 
1939 	adr += chip->start;
1940 
1941  retry:
1942 	mutex_lock(&chip->mutex);
1943 	ret = get_chip(map, chip, adr, FL_ERASING);
1944 	if (ret) {
1945 		mutex_unlock(&chip->mutex);
1946 		return ret;
1947 	}
1948 
1949 	XIP_INVAL_CACHED_RANGE(map, adr, len);
1950 	ENABLE_VPP(map);
1951 	xip_disable(map, chip, adr);
1952 
1953 	/* Clear the status register first */
1954 	map_write(map, CMD(0x50), adr);
1955 
1956 	/* Now erase */
1957 	map_write(map, CMD(0x20), adr);
1958 	map_write(map, CMD(0xD0), adr);
1959 	chip->state = FL_ERASING;
1960 	chip->erase_suspended = 0;
1961 	chip->in_progress_block_addr = adr;
1962 	chip->in_progress_block_mask = ~(len - 1);
1963 
1964 	ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1965 				   adr, len,
1966 				   chip->erase_time,
1967 				   chip->erase_time_max);
1968 	if (ret) {
1969 		map_write(map, CMD(0x70), adr);
1970 		chip->state = FL_STATUS;
1971 		xip_enable(map, chip, adr);
1972 		printk(KERN_ERR "%s: block erase error: (status timeout)\n", map->name);
1973 		goto out;
1974 	}
1975 
1976 	/* We've broken this before. It doesn't hurt to be safe */
1977 	map_write(map, CMD(0x70), adr);
1978 	chip->state = FL_STATUS;
1979 	status = map_read(map, adr);
1980 
1981 	/* check for errors */
1982 	if (map_word_bitsset(map, status, CMD(0x3a))) {
1983 		unsigned long chipstatus = MERGESTATUS(status);
1984 
1985 		/* Reset the error bits */
1986 		map_write(map, CMD(0x50), adr);
1987 		map_write(map, CMD(0x70), adr);
1988 		xip_enable(map, chip, adr);
1989 
1990 		if ((chipstatus & 0x30) == 0x30) {
1991 			printk(KERN_ERR "%s: block erase error: (bad command sequence, status 0x%lx)\n", map->name, chipstatus);
1992 			ret = -EINVAL;
1993 		} else if (chipstatus & 0x02) {
1994 			/* Protection bit set */
1995 			ret = -EROFS;
1996 		} else if (chipstatus & 0x8) {
1997 			/* Voltage */
1998 			printk(KERN_ERR "%s: block erase error: (bad VPP)\n", map->name);
1999 			ret = -EIO;
2000 		} else if (chipstatus & 0x20 && retries--) {
2001 			printk(KERN_DEBUG "block erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus);
2002 			DISABLE_VPP(map);
2003 			put_chip(map, chip, adr);
2004 			mutex_unlock(&chip->mutex);
2005 			goto retry;
2006 		} else {
2007 			printk(KERN_ERR "%s: block erase failed at 0x%08lx (status 0x%lx)\n", map->name, adr, chipstatus);
2008 			ret = -EIO;
2009 		}
2010 
2011 		goto out;
2012 	}
2013 
2014 	xip_enable(map, chip, adr);
2015  out:	DISABLE_VPP(map);
2016 	put_chip(map, chip, adr);
2017 	mutex_unlock(&chip->mutex);
2018 	return ret;
2019 }
2020 
cfi_intelext_erase_varsize(struct mtd_info * mtd,struct erase_info * instr)2021 static int cfi_intelext_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
2022 {
2023 	return cfi_varsize_frob(mtd, do_erase_oneblock, instr->addr,
2024 				instr->len, NULL);
2025 }
2026 
cfi_intelext_sync(struct mtd_info * mtd)2027 static void cfi_intelext_sync (struct mtd_info *mtd)
2028 {
2029 	struct map_info *map = mtd->priv;
2030 	struct cfi_private *cfi = map->fldrv_priv;
2031 	int i;
2032 	struct flchip *chip;
2033 	int ret = 0;
2034 
2035 	for (i=0; !ret && i<cfi->numchips; i++) {
2036 		chip = &cfi->chips[i];
2037 
2038 		mutex_lock(&chip->mutex);
2039 		ret = get_chip(map, chip, chip->start, FL_SYNCING);
2040 
2041 		if (!ret) {
2042 			chip->oldstate = chip->state;
2043 			chip->state = FL_SYNCING;
2044 			/* No need to wake_up() on this state change -
2045 			 * as the whole point is that nobody can do anything
2046 			 * with the chip now anyway.
2047 			 */
2048 		}
2049 		mutex_unlock(&chip->mutex);
2050 	}
2051 
2052 	/* Unlock the chips again */
2053 
2054 	for (i--; i >=0; i--) {
2055 		chip = &cfi->chips[i];
2056 
2057 		mutex_lock(&chip->mutex);
2058 
2059 		if (chip->state == FL_SYNCING) {
2060 			chip->state = chip->oldstate;
2061 			chip->oldstate = FL_READY;
2062 			wake_up(&chip->wq);
2063 		}
2064 		mutex_unlock(&chip->mutex);
2065 	}
2066 }
2067 
do_getlockstatus_oneblock(struct map_info * map,struct flchip * chip,unsigned long adr,int len,void * thunk)2068 static int __xipram do_getlockstatus_oneblock(struct map_info *map,
2069 						struct flchip *chip,
2070 						unsigned long adr,
2071 						int len, void *thunk)
2072 {
2073 	struct cfi_private *cfi = map->fldrv_priv;
2074 	int status, ofs_factor = cfi->interleave * cfi->device_type;
2075 
2076 	adr += chip->start;
2077 	xip_disable(map, chip, adr+(2*ofs_factor));
2078 	map_write(map, CMD(0x90), adr+(2*ofs_factor));
2079 	chip->state = FL_JEDEC_QUERY;
2080 	status = cfi_read_query(map, adr+(2*ofs_factor));
2081 	xip_enable(map, chip, 0);
2082 	return status;
2083 }
2084 
2085 #ifdef DEBUG_LOCK_BITS
do_printlockstatus_oneblock(struct map_info * map,struct flchip * chip,unsigned long adr,int len,void * thunk)2086 static int __xipram do_printlockstatus_oneblock(struct map_info *map,
2087 						struct flchip *chip,
2088 						unsigned long adr,
2089 						int len, void *thunk)
2090 {
2091 	printk(KERN_DEBUG "block status register for 0x%08lx is %x\n",
2092 	       adr, do_getlockstatus_oneblock(map, chip, adr, len, thunk));
2093 	return 0;
2094 }
2095 #endif
2096 
2097 #define DO_XXLOCK_ONEBLOCK_LOCK		((void *) 1)
2098 #define DO_XXLOCK_ONEBLOCK_UNLOCK	((void *) 2)
2099 
do_xxlock_oneblock(struct map_info * map,struct flchip * chip,unsigned long adr,int len,void * thunk)2100 static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip,
2101 				       unsigned long adr, int len, void *thunk)
2102 {
2103 	struct cfi_private *cfi = map->fldrv_priv;
2104 	struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2105 	int mdelay;
2106 	int ret;
2107 
2108 	adr += chip->start;
2109 
2110 	mutex_lock(&chip->mutex);
2111 	ret = get_chip(map, chip, adr, FL_LOCKING);
2112 	if (ret) {
2113 		mutex_unlock(&chip->mutex);
2114 		return ret;
2115 	}
2116 
2117 	ENABLE_VPP(map);
2118 	xip_disable(map, chip, adr);
2119 
2120 	map_write(map, CMD(0x60), adr);
2121 	if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
2122 		map_write(map, CMD(0x01), adr);
2123 		chip->state = FL_LOCKING;
2124 	} else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
2125 		map_write(map, CMD(0xD0), adr);
2126 		chip->state = FL_UNLOCKING;
2127 	} else
2128 		BUG();
2129 
2130 	/*
2131 	 * If Instant Individual Block Locking supported then no need
2132 	 * to delay.
2133 	 */
2134 	/*
2135 	 * Unlocking may take up to 1.4 seconds on some Intel flashes. So
2136 	 * lets use a max of 1.5 seconds (1500ms) as timeout.
2137 	 *
2138 	 * See "Clear Block Lock-Bits Time" on page 40 in
2139 	 * "3 Volt Intel StrataFlash Memory" 28F128J3,28F640J3,28F320J3 manual
2140 	 * from February 2003
2141 	 */
2142 	mdelay = (!extp || !(extp->FeatureSupport & (1 << 5))) ? 1500 : 0;
2143 
2144 	ret = WAIT_TIMEOUT(map, chip, adr, mdelay, mdelay * 1000);
2145 	if (ret) {
2146 		map_write(map, CMD(0x70), adr);
2147 		chip->state = FL_STATUS;
2148 		xip_enable(map, chip, adr);
2149 		printk(KERN_ERR "%s: block unlock error: (status timeout)\n", map->name);
2150 		goto out;
2151 	}
2152 
2153 	xip_enable(map, chip, adr);
2154  out:	DISABLE_VPP(map);
2155 	put_chip(map, chip, adr);
2156 	mutex_unlock(&chip->mutex);
2157 	return ret;
2158 }
2159 
cfi_intelext_lock(struct mtd_info * mtd,loff_t ofs,uint64_t len)2160 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2161 {
2162 	int ret;
2163 
2164 #ifdef DEBUG_LOCK_BITS
2165 	printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
2166 	       __func__, ofs, len);
2167 	cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2168 		ofs, len, NULL);
2169 #endif
2170 
2171 	ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
2172 		ofs, len, DO_XXLOCK_ONEBLOCK_LOCK);
2173 
2174 #ifdef DEBUG_LOCK_BITS
2175 	printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
2176 	       __func__, ret);
2177 	cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2178 		ofs, len, NULL);
2179 #endif
2180 
2181 	return ret;
2182 }
2183 
cfi_intelext_unlock(struct mtd_info * mtd,loff_t ofs,uint64_t len)2184 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2185 {
2186 	int ret;
2187 
2188 #ifdef DEBUG_LOCK_BITS
2189 	printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
2190 	       __func__, ofs, len);
2191 	cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2192 		ofs, len, NULL);
2193 #endif
2194 
2195 	ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
2196 					ofs, len, DO_XXLOCK_ONEBLOCK_UNLOCK);
2197 
2198 #ifdef DEBUG_LOCK_BITS
2199 	printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
2200 	       __func__, ret);
2201 	cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2202 		ofs, len, NULL);
2203 #endif
2204 
2205 	return ret;
2206 }
2207 
cfi_intelext_is_locked(struct mtd_info * mtd,loff_t ofs,uint64_t len)2208 static int cfi_intelext_is_locked(struct mtd_info *mtd, loff_t ofs,
2209 				  uint64_t len)
2210 {
2211 	return cfi_varsize_frob(mtd, do_getlockstatus_oneblock,
2212 				ofs, len, NULL) ? 1 : 0;
2213 }
2214 
2215 #ifdef CONFIG_MTD_OTP
2216 
2217 typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip,
2218 			u_long data_offset, u_char *buf, u_int size,
2219 			u_long prot_offset, u_int groupno, u_int groupsize);
2220 
2221 static int __xipram
do_otp_read(struct map_info * map,struct flchip * chip,u_long offset,u_char * buf,u_int size,u_long prot,u_int grpno,u_int grpsz)2222 do_otp_read(struct map_info *map, struct flchip *chip, u_long offset,
2223 	    u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2224 {
2225 	struct cfi_private *cfi = map->fldrv_priv;
2226 	int ret;
2227 
2228 	mutex_lock(&chip->mutex);
2229 	ret = get_chip(map, chip, chip->start, FL_JEDEC_QUERY);
2230 	if (ret) {
2231 		mutex_unlock(&chip->mutex);
2232 		return ret;
2233 	}
2234 
2235 	/* let's ensure we're not reading back cached data from array mode */
2236 	INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
2237 
2238 	xip_disable(map, chip, chip->start);
2239 	if (chip->state != FL_JEDEC_QUERY) {
2240 		map_write(map, CMD(0x90), chip->start);
2241 		chip->state = FL_JEDEC_QUERY;
2242 	}
2243 	map_copy_from(map, buf, chip->start + offset, size);
2244 	xip_enable(map, chip, chip->start);
2245 
2246 	/* then ensure we don't keep OTP data in the cache */
2247 	INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
2248 
2249 	put_chip(map, chip, chip->start);
2250 	mutex_unlock(&chip->mutex);
2251 	return 0;
2252 }
2253 
2254 static int
do_otp_write(struct map_info * map,struct flchip * chip,u_long offset,u_char * buf,u_int size,u_long prot,u_int grpno,u_int grpsz)2255 do_otp_write(struct map_info *map, struct flchip *chip, u_long offset,
2256 	     u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2257 {
2258 	int ret;
2259 
2260 	while (size) {
2261 		unsigned long bus_ofs = offset & ~(map_bankwidth(map)-1);
2262 		int gap = offset - bus_ofs;
2263 		int n = min_t(int, size, map_bankwidth(map)-gap);
2264 		map_word datum = map_word_ff(map);
2265 
2266 		datum = map_word_load_partial(map, datum, buf, gap, n);
2267 		ret = do_write_oneword(map, chip, bus_ofs, datum, FL_OTP_WRITE);
2268 		if (ret)
2269 			return ret;
2270 
2271 		offset += n;
2272 		buf += n;
2273 		size -= n;
2274 	}
2275 
2276 	return 0;
2277 }
2278 
2279 static int
do_otp_lock(struct map_info * map,struct flchip * chip,u_long offset,u_char * buf,u_int size,u_long prot,u_int grpno,u_int grpsz)2280 do_otp_lock(struct map_info *map, struct flchip *chip, u_long offset,
2281 	    u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2282 {
2283 	struct cfi_private *cfi = map->fldrv_priv;
2284 	map_word datum;
2285 
2286 	/* make sure area matches group boundaries */
2287 	if (size != grpsz)
2288 		return -EXDEV;
2289 
2290 	datum = map_word_ff(map);
2291 	datum = map_word_clr(map, datum, CMD(1 << grpno));
2292 	return do_write_oneword(map, chip, prot, datum, FL_OTP_WRITE);
2293 }
2294 
cfi_intelext_otp_walk(struct mtd_info * mtd,loff_t from,size_t len,size_t * retlen,u_char * buf,otp_op_t action,int user_regs)2295 static int cfi_intelext_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
2296 				 size_t *retlen, u_char *buf,
2297 				 otp_op_t action, int user_regs)
2298 {
2299 	struct map_info *map = mtd->priv;
2300 	struct cfi_private *cfi = map->fldrv_priv;
2301 	struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2302 	struct flchip *chip;
2303 	struct cfi_intelext_otpinfo *otp;
2304 	u_long devsize, reg_prot_offset, data_offset;
2305 	u_int chip_num, chip_step, field, reg_fact_size, reg_user_size;
2306 	u_int groups, groupno, groupsize, reg_fact_groups, reg_user_groups;
2307 	int ret;
2308 
2309 	*retlen = 0;
2310 
2311 	/* Check that we actually have some OTP registers */
2312 	if (!extp || !(extp->FeatureSupport & 64) || !extp->NumProtectionFields)
2313 		return -ENODATA;
2314 
2315 	/* we need real chips here not virtual ones */
2316 	devsize = (1 << cfi->cfiq->DevSize) * cfi->interleave;
2317 	chip_step = devsize >> cfi->chipshift;
2318 	chip_num = 0;
2319 
2320 	/* Some chips have OTP located in the _top_ partition only.
2321 	   For example: Intel 28F256L18T (T means top-parameter device) */
2322 	if (cfi->mfr == CFI_MFR_INTEL) {
2323 		switch (cfi->id) {
2324 		case 0x880b:
2325 		case 0x880c:
2326 		case 0x880d:
2327 			chip_num = chip_step - 1;
2328 		}
2329 	}
2330 
2331 	for ( ; chip_num < cfi->numchips; chip_num += chip_step) {
2332 		chip = &cfi->chips[chip_num];
2333 		otp = (struct cfi_intelext_otpinfo *)&extp->extra[0];
2334 
2335 		/* first OTP region */
2336 		field = 0;
2337 		reg_prot_offset = extp->ProtRegAddr;
2338 		reg_fact_groups = 1;
2339 		reg_fact_size = 1 << extp->FactProtRegSize;
2340 		reg_user_groups = 1;
2341 		reg_user_size = 1 << extp->UserProtRegSize;
2342 
2343 		while (len > 0) {
2344 			/* flash geometry fixup */
2345 			data_offset = reg_prot_offset + 1;
2346 			data_offset *= cfi->interleave * cfi->device_type;
2347 			reg_prot_offset *= cfi->interleave * cfi->device_type;
2348 			reg_fact_size *= cfi->interleave;
2349 			reg_user_size *= cfi->interleave;
2350 
2351 			if (user_regs) {
2352 				groups = reg_user_groups;
2353 				groupsize = reg_user_size;
2354 				/* skip over factory reg area */
2355 				groupno = reg_fact_groups;
2356 				data_offset += reg_fact_groups * reg_fact_size;
2357 			} else {
2358 				groups = reg_fact_groups;
2359 				groupsize = reg_fact_size;
2360 				groupno = 0;
2361 			}
2362 
2363 			while (len > 0 && groups > 0) {
2364 				if (!action) {
2365 					/*
2366 					 * Special case: if action is NULL
2367 					 * we fill buf with otp_info records.
2368 					 */
2369 					struct otp_info *otpinfo;
2370 					map_word lockword;
2371 					len -= sizeof(struct otp_info);
2372 					if (len <= 0)
2373 						return -ENOSPC;
2374 					ret = do_otp_read(map, chip,
2375 							  reg_prot_offset,
2376 							  (u_char *)&lockword,
2377 							  map_bankwidth(map),
2378 							  0, 0,  0);
2379 					if (ret)
2380 						return ret;
2381 					otpinfo = (struct otp_info *)buf;
2382 					otpinfo->start = from;
2383 					otpinfo->length = groupsize;
2384 					otpinfo->locked =
2385 					   !map_word_bitsset(map, lockword,
2386 							     CMD(1 << groupno));
2387 					from += groupsize;
2388 					buf += sizeof(*otpinfo);
2389 					*retlen += sizeof(*otpinfo);
2390 				} else if (from >= groupsize) {
2391 					from -= groupsize;
2392 					data_offset += groupsize;
2393 				} else {
2394 					int size = groupsize;
2395 					data_offset += from;
2396 					size -= from;
2397 					from = 0;
2398 					if (size > len)
2399 						size = len;
2400 					ret = action(map, chip, data_offset,
2401 						     buf, size, reg_prot_offset,
2402 						     groupno, groupsize);
2403 					if (ret < 0)
2404 						return ret;
2405 					buf += size;
2406 					len -= size;
2407 					*retlen += size;
2408 					data_offset += size;
2409 				}
2410 				groupno++;
2411 				groups--;
2412 			}
2413 
2414 			/* next OTP region */
2415 			if (++field == extp->NumProtectionFields)
2416 				break;
2417 			reg_prot_offset = otp->ProtRegAddr;
2418 			reg_fact_groups = otp->FactGroups;
2419 			reg_fact_size = 1 << otp->FactProtRegSize;
2420 			reg_user_groups = otp->UserGroups;
2421 			reg_user_size = 1 << otp->UserProtRegSize;
2422 			otp++;
2423 		}
2424 	}
2425 
2426 	return 0;
2427 }
2428 
cfi_intelext_read_fact_prot_reg(struct mtd_info * mtd,loff_t from,size_t len,size_t * retlen,u_char * buf)2429 static int cfi_intelext_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
2430 					   size_t len, size_t *retlen,
2431 					    u_char *buf)
2432 {
2433 	return cfi_intelext_otp_walk(mtd, from, len, retlen,
2434 				     buf, do_otp_read, 0);
2435 }
2436 
cfi_intelext_read_user_prot_reg(struct mtd_info * mtd,loff_t from,size_t len,size_t * retlen,u_char * buf)2437 static int cfi_intelext_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
2438 					   size_t len, size_t *retlen,
2439 					    u_char *buf)
2440 {
2441 	return cfi_intelext_otp_walk(mtd, from, len, retlen,
2442 				     buf, do_otp_read, 1);
2443 }
2444 
cfi_intelext_write_user_prot_reg(struct mtd_info * mtd,loff_t from,size_t len,size_t * retlen,u_char * buf)2445 static int cfi_intelext_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
2446 					    size_t len, size_t *retlen,
2447 					     u_char *buf)
2448 {
2449 	return cfi_intelext_otp_walk(mtd, from, len, retlen,
2450 				     buf, do_otp_write, 1);
2451 }
2452 
cfi_intelext_lock_user_prot_reg(struct mtd_info * mtd,loff_t from,size_t len)2453 static int cfi_intelext_lock_user_prot_reg(struct mtd_info *mtd,
2454 					   loff_t from, size_t len)
2455 {
2456 	size_t retlen;
2457 	return cfi_intelext_otp_walk(mtd, from, len, &retlen,
2458 				     NULL, do_otp_lock, 1);
2459 }
2460 
cfi_intelext_get_fact_prot_info(struct mtd_info * mtd,size_t len,size_t * retlen,struct otp_info * buf)2461 static int cfi_intelext_get_fact_prot_info(struct mtd_info *mtd, size_t len,
2462 					   size_t *retlen, struct otp_info *buf)
2463 
2464 {
2465 	return cfi_intelext_otp_walk(mtd, 0, len, retlen, (u_char *)buf,
2466 				     NULL, 0);
2467 }
2468 
cfi_intelext_get_user_prot_info(struct mtd_info * mtd,size_t len,size_t * retlen,struct otp_info * buf)2469 static int cfi_intelext_get_user_prot_info(struct mtd_info *mtd, size_t len,
2470 					   size_t *retlen, struct otp_info *buf)
2471 {
2472 	return cfi_intelext_otp_walk(mtd, 0, len, retlen, (u_char *)buf,
2473 				     NULL, 1);
2474 }
2475 
2476 #endif
2477 
cfi_intelext_save_locks(struct mtd_info * mtd)2478 static void cfi_intelext_save_locks(struct mtd_info *mtd)
2479 {
2480 	struct mtd_erase_region_info *region;
2481 	int block, status, i;
2482 	unsigned long adr;
2483 	size_t len;
2484 
2485 	for (i = 0; i < mtd->numeraseregions; i++) {
2486 		region = &mtd->eraseregions[i];
2487 		if (!region->lockmap)
2488 			continue;
2489 
2490 		for (block = 0; block < region->numblocks; block++){
2491 			len = region->erasesize;
2492 			adr = region->offset + block * len;
2493 
2494 			status = cfi_varsize_frob(mtd,
2495 					do_getlockstatus_oneblock, adr, len, NULL);
2496 			if (status)
2497 				set_bit(block, region->lockmap);
2498 			else
2499 				clear_bit(block, region->lockmap);
2500 		}
2501 	}
2502 }
2503 
cfi_intelext_suspend(struct mtd_info * mtd)2504 static int cfi_intelext_suspend(struct mtd_info *mtd)
2505 {
2506 	struct map_info *map = mtd->priv;
2507 	struct cfi_private *cfi = map->fldrv_priv;
2508 	struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2509 	int i;
2510 	struct flchip *chip;
2511 	int ret = 0;
2512 
2513 	if ((mtd->flags & MTD_POWERUP_LOCK)
2514 	    && extp && (extp->FeatureSupport & (1 << 5)))
2515 		cfi_intelext_save_locks(mtd);
2516 
2517 	for (i=0; !ret && i<cfi->numchips; i++) {
2518 		chip = &cfi->chips[i];
2519 
2520 		mutex_lock(&chip->mutex);
2521 
2522 		switch (chip->state) {
2523 		case FL_READY:
2524 		case FL_STATUS:
2525 		case FL_CFI_QUERY:
2526 		case FL_JEDEC_QUERY:
2527 			if (chip->oldstate == FL_READY) {
2528 				/* place the chip in a known state before suspend */
2529 				map_write(map, CMD(0xFF), cfi->chips[i].start);
2530 				chip->oldstate = chip->state;
2531 				chip->state = FL_PM_SUSPENDED;
2532 				/* No need to wake_up() on this state change -
2533 				 * as the whole point is that nobody can do anything
2534 				 * with the chip now anyway.
2535 				 */
2536 			} else {
2537 				/* There seems to be an operation pending. We must wait for it. */
2538 				printk(KERN_NOTICE "Flash device refused suspend due to pending operation (oldstate %d)\n", chip->oldstate);
2539 				ret = -EAGAIN;
2540 			}
2541 			break;
2542 		default:
2543 			/* Should we actually wait? Once upon a time these routines weren't
2544 			   allowed to. Or should we return -EAGAIN, because the upper layers
2545 			   ought to have already shut down anything which was using the device
2546 			   anyway? The latter for now. */
2547 			printk(KERN_NOTICE "Flash device refused suspend due to active operation (state %d)\n", chip->state);
2548 			ret = -EAGAIN;
2549 		case FL_PM_SUSPENDED:
2550 			break;
2551 		}
2552 		mutex_unlock(&chip->mutex);
2553 	}
2554 
2555 	/* Unlock the chips again */
2556 
2557 	if (ret) {
2558 		for (i--; i >=0; i--) {
2559 			chip = &cfi->chips[i];
2560 
2561 			mutex_lock(&chip->mutex);
2562 
2563 			if (chip->state == FL_PM_SUSPENDED) {
2564 				/* No need to force it into a known state here,
2565 				   because we're returning failure, and it didn't
2566 				   get power cycled */
2567 				chip->state = chip->oldstate;
2568 				chip->oldstate = FL_READY;
2569 				wake_up(&chip->wq);
2570 			}
2571 			mutex_unlock(&chip->mutex);
2572 		}
2573 	}
2574 
2575 	return ret;
2576 }
2577 
cfi_intelext_restore_locks(struct mtd_info * mtd)2578 static void cfi_intelext_restore_locks(struct mtd_info *mtd)
2579 {
2580 	struct mtd_erase_region_info *region;
2581 	int block, i;
2582 	unsigned long adr;
2583 	size_t len;
2584 
2585 	for (i = 0; i < mtd->numeraseregions; i++) {
2586 		region = &mtd->eraseregions[i];
2587 		if (!region->lockmap)
2588 			continue;
2589 
2590 		for_each_clear_bit(block, region->lockmap, region->numblocks) {
2591 			len = region->erasesize;
2592 			adr = region->offset + block * len;
2593 			cfi_intelext_unlock(mtd, adr, len);
2594 		}
2595 	}
2596 }
2597 
cfi_intelext_resume(struct mtd_info * mtd)2598 static void cfi_intelext_resume(struct mtd_info *mtd)
2599 {
2600 	struct map_info *map = mtd->priv;
2601 	struct cfi_private *cfi = map->fldrv_priv;
2602 	struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2603 	int i;
2604 	struct flchip *chip;
2605 
2606 	for (i=0; i<cfi->numchips; i++) {
2607 
2608 		chip = &cfi->chips[i];
2609 
2610 		mutex_lock(&chip->mutex);
2611 
2612 		/* Go to known state. Chip may have been power cycled */
2613 		if (chip->state == FL_PM_SUSPENDED) {
2614 			/* Refresh LH28F640BF Partition Config. Register */
2615 			fixup_LH28F640BF(mtd);
2616 			map_write(map, CMD(0xFF), cfi->chips[i].start);
2617 			chip->oldstate = chip->state = FL_READY;
2618 			wake_up(&chip->wq);
2619 		}
2620 
2621 		mutex_unlock(&chip->mutex);
2622 	}
2623 
2624 	if ((mtd->flags & MTD_POWERUP_LOCK)
2625 	    && extp && (extp->FeatureSupport & (1 << 5)))
2626 		cfi_intelext_restore_locks(mtd);
2627 }
2628 
cfi_intelext_reset(struct mtd_info * mtd)2629 static int cfi_intelext_reset(struct mtd_info *mtd)
2630 {
2631 	struct map_info *map = mtd->priv;
2632 	struct cfi_private *cfi = map->fldrv_priv;
2633 	int i, ret;
2634 
2635 	for (i=0; i < cfi->numchips; i++) {
2636 		struct flchip *chip = &cfi->chips[i];
2637 
2638 		/* force the completion of any ongoing operation
2639 		   and switch to array mode so any bootloader in
2640 		   flash is accessible for soft reboot. */
2641 		mutex_lock(&chip->mutex);
2642 		ret = get_chip(map, chip, chip->start, FL_SHUTDOWN);
2643 		if (!ret) {
2644 			map_write(map, CMD(0xff), chip->start);
2645 			chip->state = FL_SHUTDOWN;
2646 			put_chip(map, chip, chip->start);
2647 		}
2648 		mutex_unlock(&chip->mutex);
2649 	}
2650 
2651 	return 0;
2652 }
2653 
cfi_intelext_reboot(struct notifier_block * nb,unsigned long val,void * v)2654 static int cfi_intelext_reboot(struct notifier_block *nb, unsigned long val,
2655 			       void *v)
2656 {
2657 	struct mtd_info *mtd;
2658 
2659 	mtd = container_of(nb, struct mtd_info, reboot_notifier);
2660 	cfi_intelext_reset(mtd);
2661 	return NOTIFY_DONE;
2662 }
2663 
cfi_intelext_destroy(struct mtd_info * mtd)2664 static void cfi_intelext_destroy(struct mtd_info *mtd)
2665 {
2666 	struct map_info *map = mtd->priv;
2667 	struct cfi_private *cfi = map->fldrv_priv;
2668 	struct mtd_erase_region_info *region;
2669 	int i;
2670 	cfi_intelext_reset(mtd);
2671 	unregister_reboot_notifier(&mtd->reboot_notifier);
2672 	kfree(cfi->cmdset_priv);
2673 	kfree(cfi->cfiq);
2674 	kfree(cfi->chips[0].priv);
2675 	kfree(cfi);
2676 	for (i = 0; i < mtd->numeraseregions; i++) {
2677 		region = &mtd->eraseregions[i];
2678 		kfree(region->lockmap);
2679 	}
2680 	kfree(mtd->eraseregions);
2681 }
2682 
2683 MODULE_LICENSE("GPL");
2684 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
2685 MODULE_DESCRIPTION("MTD chip driver for Intel/Sharp flash chips");
2686 MODULE_ALIAS("cfi_cmdset_0003");
2687 MODULE_ALIAS("cfi_cmdset_0200");
2688