• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 
2 /* Common Flash Interface structures
3  * See http://support.intel.com/design/flash/technote/index.htm
4  * $Id: cfi.h,v 1.57 2005/11/15 23:28:17 tpoynor Exp $
5  */
6 
7 #ifndef __MTD_CFI_H__
8 #define __MTD_CFI_H__
9 
10 #include <linux/delay.h>
11 #include <linux/types.h>
12 #include <linux/interrupt.h>
13 #include <linux/mtd/flashchip.h>
14 #include <linux/mtd/map.h>
15 #include <linux/mtd/cfi_endian.h>
16 
17 #ifdef CONFIG_MTD_CFI_I1
18 #define cfi_interleave(cfi) 1
19 #define cfi_interleave_is_1(cfi) (cfi_interleave(cfi) == 1)
20 #else
21 #define cfi_interleave_is_1(cfi) (0)
22 #endif
23 
24 #ifdef CONFIG_MTD_CFI_I2
25 # ifdef cfi_interleave
26 #  undef cfi_interleave
27 #  define cfi_interleave(cfi) ((cfi)->interleave)
28 # else
29 #  define cfi_interleave(cfi) 2
30 # endif
31 #define cfi_interleave_is_2(cfi) (cfi_interleave(cfi) == 2)
32 #else
33 #define cfi_interleave_is_2(cfi) (0)
34 #endif
35 
36 #ifdef CONFIG_MTD_CFI_I4
37 # ifdef cfi_interleave
38 #  undef cfi_interleave
39 #  define cfi_interleave(cfi) ((cfi)->interleave)
40 # else
41 #  define cfi_interleave(cfi) 4
42 # endif
43 #define cfi_interleave_is_4(cfi) (cfi_interleave(cfi) == 4)
44 #else
45 #define cfi_interleave_is_4(cfi) (0)
46 #endif
47 
48 #ifdef CONFIG_MTD_CFI_I8
49 # ifdef cfi_interleave
50 #  undef cfi_interleave
51 #  define cfi_interleave(cfi) ((cfi)->interleave)
52 # else
53 #  define cfi_interleave(cfi) 8
54 # endif
55 #define cfi_interleave_is_8(cfi) (cfi_interleave(cfi) == 8)
56 #else
57 #define cfi_interleave_is_8(cfi) (0)
58 #endif
59 
cfi_interleave_supported(int i)60 static inline int cfi_interleave_supported(int i)
61 {
62 	switch (i) {
63 #ifdef CONFIG_MTD_CFI_I1
64 	case 1:
65 #endif
66 #ifdef CONFIG_MTD_CFI_I2
67 	case 2:
68 #endif
69 #ifdef CONFIG_MTD_CFI_I4
70 	case 4:
71 #endif
72 #ifdef CONFIG_MTD_CFI_I8
73 	case 8:
74 #endif
75 		return 1;
76 
77 	default:
78 		return 0;
79 	}
80 }
81 
82 
83 /* NB: these values must represents the number of bytes needed to meet the
84  *     device type (x8, x16, x32).  Eg. a 32 bit device is 4 x 8 bytes.
85  *     These numbers are used in calculations.
86  */
87 #define CFI_DEVICETYPE_X8  (8 / 8)
88 #define CFI_DEVICETYPE_X16 (16 / 8)
89 #define CFI_DEVICETYPE_X32 (32 / 8)
90 #define CFI_DEVICETYPE_X64 (64 / 8)
91 
92 /* NB: We keep these structures in memory in HOST byteorder, except
93  * where individually noted.
94  */
95 
96 /* Basic Query Structure */
97 struct cfi_ident {
98 	uint8_t  qry[3];
99 	uint16_t P_ID;
100 	uint16_t P_ADR;
101 	uint16_t A_ID;
102 	uint16_t A_ADR;
103 	uint8_t  VccMin;
104 	uint8_t  VccMax;
105 	uint8_t  VppMin;
106 	uint8_t  VppMax;
107 	uint8_t  WordWriteTimeoutTyp;
108 	uint8_t  BufWriteTimeoutTyp;
109 	uint8_t  BlockEraseTimeoutTyp;
110 	uint8_t  ChipEraseTimeoutTyp;
111 	uint8_t  WordWriteTimeoutMax;
112 	uint8_t  BufWriteTimeoutMax;
113 	uint8_t  BlockEraseTimeoutMax;
114 	uint8_t  ChipEraseTimeoutMax;
115 	uint8_t  DevSize;
116 	uint16_t InterfaceDesc;
117 	uint16_t MaxBufWriteSize;
118 	uint8_t  NumEraseRegions;
119 	uint32_t EraseRegionInfo[0]; /* Not host ordered */
120 } __attribute__((packed));
121 
122 /* Extended Query Structure for both PRI and ALT */
123 
124 struct cfi_extquery {
125 	uint8_t  pri[3];
126 	uint8_t  MajorVersion;
127 	uint8_t  MinorVersion;
128 } __attribute__((packed));
129 
130 /* Vendor-Specific PRI for Intel/Sharp Extended Command Set (0x0001) */
131 
132 struct cfi_pri_intelext {
133 	uint8_t  pri[3];
134 	uint8_t  MajorVersion;
135 	uint8_t  MinorVersion;
136 	uint32_t FeatureSupport; /* if bit 31 is set then an additional uint32_t feature
137 				    block follows - FIXME - not currently supported */
138 	uint8_t  SuspendCmdSupport;
139 	uint16_t BlkStatusRegMask;
140 	uint8_t  VccOptimal;
141 	uint8_t  VppOptimal;
142 	uint8_t  NumProtectionFields;
143 	uint16_t ProtRegAddr;
144 	uint8_t  FactProtRegSize;
145 	uint8_t  UserProtRegSize;
146 	uint8_t  extra[0];
147 } __attribute__((packed));
148 
149 struct cfi_intelext_otpinfo {
150 	uint32_t ProtRegAddr;
151 	uint16_t FactGroups;
152 	uint8_t  FactProtRegSize;
153 	uint16_t UserGroups;
154 	uint8_t  UserProtRegSize;
155 } __attribute__((packed));
156 
157 struct cfi_intelext_blockinfo {
158 	uint16_t NumIdentBlocks;
159 	uint16_t BlockSize;
160 	uint16_t MinBlockEraseCycles;
161 	uint8_t  BitsPerCell;
162 	uint8_t  BlockCap;
163 } __attribute__((packed));
164 
165 struct cfi_intelext_regioninfo {
166 	uint16_t NumIdentPartitions;
167 	uint8_t  NumOpAllowed;
168 	uint8_t  NumOpAllowedSimProgMode;
169 	uint8_t  NumOpAllowedSimEraMode;
170 	uint8_t  NumBlockTypes;
171 	struct cfi_intelext_blockinfo BlockTypes[1];
172 } __attribute__((packed));
173 
174 struct cfi_intelext_programming_regioninfo {
175 	uint8_t  ProgRegShift;
176 	uint8_t  Reserved1;
177 	uint8_t  ControlValid;
178 	uint8_t  Reserved2;
179 	uint8_t  ControlInvalid;
180 	uint8_t  Reserved3;
181 } __attribute__((packed));
182 
183 /* Vendor-Specific PRI for AMD/Fujitsu Extended Command Set (0x0002) */
184 
185 struct cfi_pri_amdstd {
186 	uint8_t  pri[3];
187 	uint8_t  MajorVersion;
188 	uint8_t  MinorVersion;
189 	uint8_t  SiliconRevision; /* bits 1-0: Address Sensitive Unlock */
190 	uint8_t  EraseSuspend;
191 	uint8_t  BlkProt;
192 	uint8_t  TmpBlkUnprotect;
193 	uint8_t  BlkProtUnprot;
194 	uint8_t  SimultaneousOps;
195 	uint8_t  BurstMode;
196 	uint8_t  PageMode;
197 	uint8_t  VppMin;
198 	uint8_t  VppMax;
199 	uint8_t  TopBottom;
200 } __attribute__((packed));
201 
202 /* Vendor-Specific PRI for Atmel chips (command set 0x0002) */
203 
204 struct cfi_pri_atmel {
205 	uint8_t pri[3];
206 	uint8_t MajorVersion;
207 	uint8_t MinorVersion;
208 	uint8_t Features;
209 	uint8_t BottomBoot;
210 	uint8_t BurstMode;
211 	uint8_t PageMode;
212 } __attribute__((packed));
213 
214 struct cfi_pri_query {
215 	uint8_t  NumFields;
216 	uint32_t ProtField[1]; /* Not host ordered */
217 } __attribute__((packed));
218 
219 struct cfi_bri_query {
220 	uint8_t  PageModeReadCap;
221 	uint8_t  NumFields;
222 	uint32_t ConfField[1]; /* Not host ordered */
223 } __attribute__((packed));
224 
225 #define P_ID_NONE               0x0000
226 #define P_ID_INTEL_EXT          0x0001
227 #define P_ID_AMD_STD            0x0002
228 #define P_ID_INTEL_STD          0x0003
229 #define P_ID_AMD_EXT            0x0004
230 #define P_ID_WINBOND            0x0006
231 #define P_ID_ST_ADV             0x0020
232 #define P_ID_MITSUBISHI_STD     0x0100
233 #define P_ID_MITSUBISHI_EXT     0x0101
234 #define P_ID_SST_PAGE           0x0102
235 #define P_ID_INTEL_PERFORMANCE  0x0200
236 #define P_ID_INTEL_DATA         0x0210
237 #define P_ID_RESERVED           0xffff
238 
239 
240 #define CFI_MODE_CFI	1
241 #define CFI_MODE_JEDEC	0
242 
243 struct cfi_private {
244 	uint16_t cmdset;
245 	void *cmdset_priv;
246 	int interleave;
247 	int device_type;
248 	int cfi_mode;		/* Are we a JEDEC device pretending to be CFI? */
249 	int addr_unlock1;
250 	int addr_unlock2;
251 	struct mtd_info *(*cmdset_setup)(struct map_info *);
252 	struct cfi_ident *cfiq; /* For now only one. We insist that all devs
253 				  must be of the same type. */
254 	int mfr, id;
255 	int numchips;
256 	unsigned long chipshift; /* Because they're of the same type */
257 	const char *im_name;	 /* inter_module name for cmdset_setup */
258 	struct flchip chips[0];  /* per-chip data structure for each chip */
259 };
260 
261 /*
262  * Returns the command address according to the given geometry.
263  */
cfi_build_cmd_addr(uint32_t cmd_ofs,int interleave,int type)264 static inline uint32_t cfi_build_cmd_addr(uint32_t cmd_ofs, int interleave, int type)
265 {
266 	return (cmd_ofs * type) * interleave;
267 }
268 
269 /*
270  * Transforms the CFI command for the given geometry (bus width & interleave).
271  * It looks too long to be inline, but in the common case it should almost all
272  * get optimised away.
273  */
cfi_build_cmd(u_long cmd,struct map_info * map,struct cfi_private * cfi)274 static inline map_word cfi_build_cmd(u_long cmd, struct map_info *map, struct cfi_private *cfi)
275 {
276 	map_word val = { {0} };
277 	int wordwidth, words_per_bus, chip_mode, chips_per_word;
278 	unsigned long onecmd;
279 	int i;
280 
281 	/* We do it this way to give the compiler a fighting chance
282 	   of optimising away all the crap for 'bankwidth' larger than
283 	   an unsigned long, in the common case where that support is
284 	   disabled */
285 	if (map_bankwidth_is_large(map)) {
286 		wordwidth = sizeof(unsigned long);
287 		words_per_bus = (map_bankwidth(map)) / wordwidth; // i.e. normally 1
288 	} else {
289 		wordwidth = map_bankwidth(map);
290 		words_per_bus = 1;
291 	}
292 
293 	chip_mode = map_bankwidth(map) / cfi_interleave(cfi);
294 	chips_per_word = wordwidth * cfi_interleave(cfi) / map_bankwidth(map);
295 
296 	/* First, determine what the bit-pattern should be for a single
297 	   device, according to chip mode and endianness... */
298 	switch (chip_mode) {
299 	default: BUG();
300 	case 1:
301 		onecmd = cmd;
302 		break;
303 	case 2:
304 		onecmd = cpu_to_cfi16(cmd);
305 		break;
306 	case 4:
307 		onecmd = cpu_to_cfi32(cmd);
308 		break;
309 	}
310 
311 	/* Now replicate it across the size of an unsigned long, or
312 	   just to the bus width as appropriate */
313 	switch (chips_per_word) {
314 	default: BUG();
315 #if BITS_PER_LONG >= 64
316 	case 8:
317 		onecmd |= (onecmd << (chip_mode * 32));
318 #endif
319 	case 4:
320 		onecmd |= (onecmd << (chip_mode * 16));
321 	case 2:
322 		onecmd |= (onecmd << (chip_mode * 8));
323 	case 1:
324 		;
325 	}
326 
327 	/* And finally, for the multi-word case, replicate it
328 	   in all words in the structure */
329 	for (i=0; i < words_per_bus; i++) {
330 		val.x[i] = onecmd;
331 	}
332 
333 	return val;
334 }
335 #define CMD(x)  cfi_build_cmd((x), map, cfi)
336 
337 
cfi_merge_status(map_word val,struct map_info * map,struct cfi_private * cfi)338 static inline unsigned long cfi_merge_status(map_word val, struct map_info *map,
339 					   struct cfi_private *cfi)
340 {
341 	int wordwidth, words_per_bus, chip_mode, chips_per_word;
342 	unsigned long onestat, res = 0;
343 	int i;
344 
345 	/* We do it this way to give the compiler a fighting chance
346 	   of optimising away all the crap for 'bankwidth' larger than
347 	   an unsigned long, in the common case where that support is
348 	   disabled */
349 	if (map_bankwidth_is_large(map)) {
350 		wordwidth = sizeof(unsigned long);
351 		words_per_bus = (map_bankwidth(map)) / wordwidth; // i.e. normally 1
352 	} else {
353 		wordwidth = map_bankwidth(map);
354 		words_per_bus = 1;
355 	}
356 
357 	chip_mode = map_bankwidth(map) / cfi_interleave(cfi);
358 	chips_per_word = wordwidth * cfi_interleave(cfi) / map_bankwidth(map);
359 
360 	onestat = val.x[0];
361 	/* Or all status words together */
362 	for (i=1; i < words_per_bus; i++) {
363 		onestat |= val.x[i];
364 	}
365 
366 	res = onestat;
367 	switch(chips_per_word) {
368 	default: BUG();
369 #if BITS_PER_LONG >= 64
370 	case 8:
371 		res |= (onestat >> (chip_mode * 32));
372 #endif
373 	case 4:
374 		res |= (onestat >> (chip_mode * 16));
375 	case 2:
376 		res |= (onestat >> (chip_mode * 8));
377 	case 1:
378 		;
379 	}
380 
381 	/* Last, determine what the bit-pattern should be for a single
382 	   device, according to chip mode and endianness... */
383 	switch (chip_mode) {
384 	case 1:
385 		break;
386 	case 2:
387 		res = cfi16_to_cpu(res);
388 		break;
389 	case 4:
390 		res = cfi32_to_cpu(res);
391 		break;
392 	default: BUG();
393 	}
394 	return res;
395 }
396 
397 #define MERGESTATUS(x) cfi_merge_status((x), map, cfi)
398 
399 
400 /*
401  * Sends a CFI command to a bank of flash for the given geometry.
402  *
403  * Returns the offset in flash where the command was written.
404  * If prev_val is non-null, it will be set to the value at the command address,
405  * before the command was written.
406  */
cfi_send_gen_cmd(u_char cmd,uint32_t cmd_addr,uint32_t base,struct map_info * map,struct cfi_private * cfi,int type,map_word * prev_val)407 static inline uint32_t cfi_send_gen_cmd(u_char cmd, uint32_t cmd_addr, uint32_t base,
408 				struct map_info *map, struct cfi_private *cfi,
409 				int type, map_word *prev_val)
410 {
411 	map_word val;
412 	uint32_t addr = base + cfi_build_cmd_addr(cmd_addr, cfi_interleave(cfi), type);
413 
414 	val = cfi_build_cmd(cmd, map, cfi);
415 
416 	if (prev_val)
417 		*prev_val = map_read(map, addr);
418 
419 	map_write(map, val, addr);
420 
421 	return addr - base;
422 }
423 
cfi_read_query(struct map_info * map,uint32_t addr)424 static inline uint8_t cfi_read_query(struct map_info *map, uint32_t addr)
425 {
426 	map_word val = map_read(map, addr);
427 
428 	if (map_bankwidth_is_1(map)) {
429 		return val.x[0];
430 	} else if (map_bankwidth_is_2(map)) {
431 		return cfi16_to_cpu(val.x[0]);
432 	} else {
433 		/* No point in a 64-bit byteswap since that would just be
434 		   swapping the responses from different chips, and we are
435 		   only interested in one chip (a representative sample) */
436 		return cfi32_to_cpu(val.x[0]);
437 	}
438 }
439 
cfi_read_query16(struct map_info * map,uint32_t addr)440 static inline uint16_t cfi_read_query16(struct map_info *map, uint32_t addr)
441 {
442 	map_word val = map_read(map, addr);
443 
444 	if (map_bankwidth_is_1(map)) {
445 		return val.x[0] & 0xff;
446 	} else if (map_bankwidth_is_2(map)) {
447 		return cfi16_to_cpu(val.x[0]);
448 	} else {
449 		/* No point in a 64-bit byteswap since that would just be
450 		   swapping the responses from different chips, and we are
451 		   only interested in one chip (a representative sample) */
452 		return cfi32_to_cpu(val.x[0]);
453 	}
454 }
455 
cfi_udelay(int us)456 static inline void cfi_udelay(int us)
457 {
458 	if (us >= 1000) {
459 		msleep((us+999)/1000);
460 	} else {
461 		udelay(us);
462 		cond_resched();
463 	}
464 }
465 
466 struct cfi_extquery *cfi_read_pri(struct map_info *map, uint16_t adr, uint16_t size,
467 			     const char* name);
468 struct cfi_fixup {
469 	uint16_t mfr;
470 	uint16_t id;
471 	void (*fixup)(struct mtd_info *mtd, void* param);
472 	void* param;
473 };
474 
475 #define CFI_MFR_ANY 0xffff
476 #define CFI_ID_ANY  0xffff
477 
478 #define CFI_MFR_AMD 0x0001
479 #define CFI_MFR_ATMEL 0x001F
480 #define CFI_MFR_ST  0x0020 	/* STMicroelectronics */
481 
482 void cfi_fixup(struct mtd_info *mtd, struct cfi_fixup* fixups);
483 
484 typedef int (*varsize_frob_t)(struct map_info *map, struct flchip *chip,
485 			      unsigned long adr, int len, void *thunk);
486 
487 int cfi_varsize_frob(struct mtd_info *mtd, varsize_frob_t frob,
488 	loff_t ofs, size_t len, void *thunk);
489 
490 
491 #endif /* __MTD_CFI_H__ */
492