• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  OneNAND driver for OMAP2 / OMAP3
3  *
4  *  Copyright © 2005-2006 Nokia Corporation
5  *
6  *  Author: Jarkko Lavinen <jarkko.lavinen@nokia.com> and Juha Yrjölä
7  *  IRQ and DMA support written by Timo Teras
8  *
9  * This program is free software; you can redistribute it and/or modify it
10  * under the terms of the GNU General Public License version 2 as published by
11  * the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful, but WITHOUT
14  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15  * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16  * more details.
17  *
18  * You should have received a copy of the GNU General Public License along with
19  * this program; see the file COPYING. If not, write to the Free Software
20  * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21  *
22  */
23 
24 #include <linux/device.h>
25 #include <linux/module.h>
26 #include <linux/mtd/mtd.h>
27 #include <linux/mtd/onenand.h>
28 #include <linux/mtd/partitions.h>
29 #include <linux/of_device.h>
30 #include <linux/omap-gpmc.h>
31 #include <linux/platform_device.h>
32 #include <linux/interrupt.h>
33 #include <linux/delay.h>
34 #include <linux/dma-mapping.h>
35 #include <linux/dmaengine.h>
36 #include <linux/io.h>
37 #include <linux/slab.h>
38 #include <linux/gpio/consumer.h>
39 
40 #include <asm/mach/flash.h>
41 
42 #define DRIVER_NAME "omap2-onenand"
43 
44 #define ONENAND_BUFRAM_SIZE	(1024 * 5)
45 
46 struct omap2_onenand {
47 	struct platform_device *pdev;
48 	int gpmc_cs;
49 	unsigned long phys_base;
50 	struct gpio_desc *int_gpiod;
51 	struct mtd_info mtd;
52 	struct onenand_chip onenand;
53 	struct completion irq_done;
54 	struct completion dma_done;
55 	struct dma_chan *dma_chan;
56 };
57 
omap2_onenand_dma_complete_func(void * completion)58 static void omap2_onenand_dma_complete_func(void *completion)
59 {
60 	complete(completion);
61 }
62 
omap2_onenand_interrupt(int irq,void * dev_id)63 static irqreturn_t omap2_onenand_interrupt(int irq, void *dev_id)
64 {
65 	struct omap2_onenand *c = dev_id;
66 
67 	complete(&c->irq_done);
68 
69 	return IRQ_HANDLED;
70 }
71 
read_reg(struct omap2_onenand * c,int reg)72 static inline unsigned short read_reg(struct omap2_onenand *c, int reg)
73 {
74 	return readw(c->onenand.base + reg);
75 }
76 
write_reg(struct omap2_onenand * c,unsigned short value,int reg)77 static inline void write_reg(struct omap2_onenand *c, unsigned short value,
78 			     int reg)
79 {
80 	writew(value, c->onenand.base + reg);
81 }
82 
omap2_onenand_set_cfg(struct omap2_onenand * c,bool sr,bool sw,int latency,int burst_len)83 static int omap2_onenand_set_cfg(struct omap2_onenand *c,
84 				 bool sr, bool sw,
85 				 int latency, int burst_len)
86 {
87 	unsigned short reg = ONENAND_SYS_CFG1_RDY | ONENAND_SYS_CFG1_INT;
88 
89 	reg |= latency << ONENAND_SYS_CFG1_BRL_SHIFT;
90 
91 	switch (burst_len) {
92 	case 0:		/* continuous */
93 		break;
94 	case 4:
95 		reg |= ONENAND_SYS_CFG1_BL_4;
96 		break;
97 	case 8:
98 		reg |= ONENAND_SYS_CFG1_BL_8;
99 		break;
100 	case 16:
101 		reg |= ONENAND_SYS_CFG1_BL_16;
102 		break;
103 	case 32:
104 		reg |= ONENAND_SYS_CFG1_BL_32;
105 		break;
106 	default:
107 		return -EINVAL;
108 	}
109 
110 	if (latency > 5)
111 		reg |= ONENAND_SYS_CFG1_HF;
112 	if (latency > 7)
113 		reg |= ONENAND_SYS_CFG1_VHF;
114 	if (sr)
115 		reg |= ONENAND_SYS_CFG1_SYNC_READ;
116 	if (sw)
117 		reg |= ONENAND_SYS_CFG1_SYNC_WRITE;
118 
119 	write_reg(c, reg, ONENAND_REG_SYS_CFG1);
120 
121 	return 0;
122 }
123 
omap2_onenand_get_freq(int ver)124 static int omap2_onenand_get_freq(int ver)
125 {
126 	switch ((ver >> 4) & 0xf) {
127 	case 0:
128 		return 40;
129 	case 1:
130 		return 54;
131 	case 2:
132 		return 66;
133 	case 3:
134 		return 83;
135 	case 4:
136 		return 104;
137 	}
138 
139 	return -EINVAL;
140 }
141 
wait_err(char * msg,int state,unsigned int ctrl,unsigned int intr)142 static void wait_err(char *msg, int state, unsigned int ctrl, unsigned int intr)
143 {
144 	printk(KERN_ERR "onenand_wait: %s! state %d ctrl 0x%04x intr 0x%04x\n",
145 	       msg, state, ctrl, intr);
146 }
147 
wait_warn(char * msg,int state,unsigned int ctrl,unsigned int intr)148 static void wait_warn(char *msg, int state, unsigned int ctrl,
149 		      unsigned int intr)
150 {
151 	printk(KERN_WARNING "onenand_wait: %s! state %d ctrl 0x%04x "
152 	       "intr 0x%04x\n", msg, state, ctrl, intr);
153 }
154 
omap2_onenand_wait(struct mtd_info * mtd,int state)155 static int omap2_onenand_wait(struct mtd_info *mtd, int state)
156 {
157 	struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
158 	struct onenand_chip *this = mtd->priv;
159 	unsigned int intr = 0;
160 	unsigned int ctrl, ctrl_mask;
161 	unsigned long timeout;
162 	u32 syscfg;
163 
164 	if (state == FL_RESETING || state == FL_PREPARING_ERASE ||
165 	    state == FL_VERIFYING_ERASE) {
166 		int i = 21;
167 		unsigned int intr_flags = ONENAND_INT_MASTER;
168 
169 		switch (state) {
170 		case FL_RESETING:
171 			intr_flags |= ONENAND_INT_RESET;
172 			break;
173 		case FL_PREPARING_ERASE:
174 			intr_flags |= ONENAND_INT_ERASE;
175 			break;
176 		case FL_VERIFYING_ERASE:
177 			i = 101;
178 			break;
179 		}
180 
181 		while (--i) {
182 			udelay(1);
183 			intr = read_reg(c, ONENAND_REG_INTERRUPT);
184 			if (intr & ONENAND_INT_MASTER)
185 				break;
186 		}
187 		ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
188 		if (ctrl & ONENAND_CTRL_ERROR) {
189 			wait_err("controller error", state, ctrl, intr);
190 			return -EIO;
191 		}
192 		if ((intr & intr_flags) == intr_flags)
193 			return 0;
194 		/* Continue in wait for interrupt branch */
195 	}
196 
197 	if (state != FL_READING) {
198 		int result;
199 
200 		/* Turn interrupts on */
201 		syscfg = read_reg(c, ONENAND_REG_SYS_CFG1);
202 		if (!(syscfg & ONENAND_SYS_CFG1_IOBE)) {
203 			syscfg |= ONENAND_SYS_CFG1_IOBE;
204 			write_reg(c, syscfg, ONENAND_REG_SYS_CFG1);
205 			/* Add a delay to let GPIO settle */
206 			syscfg = read_reg(c, ONENAND_REG_SYS_CFG1);
207 		}
208 
209 		reinit_completion(&c->irq_done);
210 		result = gpiod_get_value(c->int_gpiod);
211 		if (result < 0) {
212 			ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
213 			intr = read_reg(c, ONENAND_REG_INTERRUPT);
214 			wait_err("gpio error", state, ctrl, intr);
215 			return result;
216 		} else if (result == 0) {
217 			int retry_cnt = 0;
218 retry:
219 			if (!wait_for_completion_io_timeout(&c->irq_done,
220 						msecs_to_jiffies(20))) {
221 				/* Timeout after 20ms */
222 				ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
223 				if (ctrl & ONENAND_CTRL_ONGO &&
224 				    !this->ongoing) {
225 					/*
226 					 * The operation seems to be still going
227 					 * so give it some more time.
228 					 */
229 					retry_cnt += 1;
230 					if (retry_cnt < 3)
231 						goto retry;
232 					intr = read_reg(c,
233 							ONENAND_REG_INTERRUPT);
234 					wait_err("timeout", state, ctrl, intr);
235 					return -EIO;
236 				}
237 				intr = read_reg(c, ONENAND_REG_INTERRUPT);
238 				if ((intr & ONENAND_INT_MASTER) == 0)
239 					wait_warn("timeout", state, ctrl, intr);
240 			}
241 		}
242 	} else {
243 		int retry_cnt = 0;
244 
245 		/* Turn interrupts off */
246 		syscfg = read_reg(c, ONENAND_REG_SYS_CFG1);
247 		syscfg &= ~ONENAND_SYS_CFG1_IOBE;
248 		write_reg(c, syscfg, ONENAND_REG_SYS_CFG1);
249 
250 		timeout = jiffies + msecs_to_jiffies(20);
251 		while (1) {
252 			if (time_before(jiffies, timeout)) {
253 				intr = read_reg(c, ONENAND_REG_INTERRUPT);
254 				if (intr & ONENAND_INT_MASTER)
255 					break;
256 			} else {
257 				/* Timeout after 20ms */
258 				ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
259 				if (ctrl & ONENAND_CTRL_ONGO) {
260 					/*
261 					 * The operation seems to be still going
262 					 * so give it some more time.
263 					 */
264 					retry_cnt += 1;
265 					if (retry_cnt < 3) {
266 						timeout = jiffies +
267 							  msecs_to_jiffies(20);
268 						continue;
269 					}
270 				}
271 				break;
272 			}
273 		}
274 	}
275 
276 	intr = read_reg(c, ONENAND_REG_INTERRUPT);
277 	ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
278 
279 	if (intr & ONENAND_INT_READ) {
280 		int ecc = read_reg(c, ONENAND_REG_ECC_STATUS);
281 
282 		if (ecc) {
283 			unsigned int addr1, addr8;
284 
285 			addr1 = read_reg(c, ONENAND_REG_START_ADDRESS1);
286 			addr8 = read_reg(c, ONENAND_REG_START_ADDRESS8);
287 			if (ecc & ONENAND_ECC_2BIT_ALL) {
288 				printk(KERN_ERR "onenand_wait: ECC error = "
289 				       "0x%04x, addr1 %#x, addr8 %#x\n",
290 				       ecc, addr1, addr8);
291 				mtd->ecc_stats.failed++;
292 				return -EBADMSG;
293 			} else if (ecc & ONENAND_ECC_1BIT_ALL) {
294 				printk(KERN_NOTICE "onenand_wait: correctable "
295 				       "ECC error = 0x%04x, addr1 %#x, "
296 				       "addr8 %#x\n", ecc, addr1, addr8);
297 				mtd->ecc_stats.corrected++;
298 			}
299 		}
300 	} else if (state == FL_READING) {
301 		wait_err("timeout", state, ctrl, intr);
302 		return -EIO;
303 	}
304 
305 	if (ctrl & ONENAND_CTRL_ERROR) {
306 		wait_err("controller error", state, ctrl, intr);
307 		if (ctrl & ONENAND_CTRL_LOCK)
308 			printk(KERN_ERR "onenand_wait: "
309 					"Device is write protected!!!\n");
310 		return -EIO;
311 	}
312 
313 	ctrl_mask = 0xFE9F;
314 	if (this->ongoing)
315 		ctrl_mask &= ~0x8000;
316 
317 	if (ctrl & ctrl_mask)
318 		wait_warn("unexpected controller status", state, ctrl, intr);
319 
320 	return 0;
321 }
322 
omap2_onenand_bufferram_offset(struct mtd_info * mtd,int area)323 static inline int omap2_onenand_bufferram_offset(struct mtd_info *mtd, int area)
324 {
325 	struct onenand_chip *this = mtd->priv;
326 
327 	if (ONENAND_CURRENT_BUFFERRAM(this)) {
328 		if (area == ONENAND_DATARAM)
329 			return this->writesize;
330 		if (area == ONENAND_SPARERAM)
331 			return mtd->oobsize;
332 	}
333 
334 	return 0;
335 }
336 
omap2_onenand_dma_transfer(struct omap2_onenand * c,dma_addr_t src,dma_addr_t dst,size_t count)337 static inline int omap2_onenand_dma_transfer(struct omap2_onenand *c,
338 					     dma_addr_t src, dma_addr_t dst,
339 					     size_t count)
340 {
341 	struct dma_async_tx_descriptor *tx;
342 	dma_cookie_t cookie;
343 
344 	tx = dmaengine_prep_dma_memcpy(c->dma_chan, dst, src, count,
345 				       DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
346 	if (!tx) {
347 		dev_err(&c->pdev->dev, "Failed to prepare DMA memcpy\n");
348 		return -EIO;
349 	}
350 
351 	reinit_completion(&c->dma_done);
352 
353 	tx->callback = omap2_onenand_dma_complete_func;
354 	tx->callback_param = &c->dma_done;
355 
356 	cookie = tx->tx_submit(tx);
357 	if (dma_submit_error(cookie)) {
358 		dev_err(&c->pdev->dev, "Failed to do DMA tx_submit\n");
359 		return -EIO;
360 	}
361 
362 	dma_async_issue_pending(c->dma_chan);
363 
364 	if (!wait_for_completion_io_timeout(&c->dma_done,
365 					    msecs_to_jiffies(20))) {
366 		dmaengine_terminate_sync(c->dma_chan);
367 		return -ETIMEDOUT;
368 	}
369 
370 	return 0;
371 }
372 
omap2_onenand_read_bufferram(struct mtd_info * mtd,int area,unsigned char * buffer,int offset,size_t count)373 static int omap2_onenand_read_bufferram(struct mtd_info *mtd, int area,
374 					unsigned char *buffer, int offset,
375 					size_t count)
376 {
377 	struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
378 	struct onenand_chip *this = mtd->priv;
379 	struct device *dev = &c->pdev->dev;
380 	void *buf = (void *)buffer;
381 	dma_addr_t dma_src, dma_dst;
382 	int bram_offset, err;
383 	size_t xtra;
384 
385 	bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
386 	/*
387 	 * If the buffer address is not DMA-able, len is not long enough to make
388 	 * DMA transfers profitable or panic_write() may be in an interrupt
389 	 * context fallback to PIO mode.
390 	 */
391 	if (!virt_addr_valid(buf) || bram_offset & 3 || (size_t)buf & 3 ||
392 	    count < 384 || in_interrupt() || oops_in_progress )
393 		goto out_copy;
394 
395 	xtra = count & 3;
396 	if (xtra) {
397 		count -= xtra;
398 		memcpy(buf + count, this->base + bram_offset + count, xtra);
399 	}
400 
401 	dma_dst = dma_map_single(dev, buf, count, DMA_FROM_DEVICE);
402 	dma_src = c->phys_base + bram_offset;
403 
404 	if (dma_mapping_error(dev, dma_dst)) {
405 		dev_err(dev, "Couldn't DMA map a %d byte buffer\n", count);
406 		goto out_copy;
407 	}
408 
409 	err = omap2_onenand_dma_transfer(c, dma_src, dma_dst, count);
410 	dma_unmap_single(dev, dma_dst, count, DMA_FROM_DEVICE);
411 	if (!err)
412 		return 0;
413 
414 	dev_err(dev, "timeout waiting for DMA\n");
415 
416 out_copy:
417 	memcpy(buf, this->base + bram_offset, count);
418 	return 0;
419 }
420 
omap2_onenand_write_bufferram(struct mtd_info * mtd,int area,const unsigned char * buffer,int offset,size_t count)421 static int omap2_onenand_write_bufferram(struct mtd_info *mtd, int area,
422 					 const unsigned char *buffer,
423 					 int offset, size_t count)
424 {
425 	struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
426 	struct onenand_chip *this = mtd->priv;
427 	struct device *dev = &c->pdev->dev;
428 	void *buf = (void *)buffer;
429 	dma_addr_t dma_src, dma_dst;
430 	int bram_offset, err;
431 
432 	bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
433 	/*
434 	 * If the buffer address is not DMA-able, len is not long enough to make
435 	 * DMA transfers profitable or panic_write() may be in an interrupt
436 	 * context fallback to PIO mode.
437 	 */
438 	if (!virt_addr_valid(buf) || bram_offset & 3 || (size_t)buf & 3 ||
439 	    count < 384 || in_interrupt() || oops_in_progress )
440 		goto out_copy;
441 
442 	dma_src = dma_map_single(dev, buf, count, DMA_TO_DEVICE);
443 	dma_dst = c->phys_base + bram_offset;
444 	if (dma_mapping_error(dev, dma_src)) {
445 		dev_err(dev, "Couldn't DMA map a %d byte buffer\n", count);
446 		goto out_copy;
447 	}
448 
449 	err = omap2_onenand_dma_transfer(c, dma_src, dma_dst, count);
450 	dma_unmap_page(dev, dma_src, count, DMA_TO_DEVICE);
451 	if (!err)
452 		return 0;
453 
454 	dev_err(dev, "timeout waiting for DMA\n");
455 
456 out_copy:
457 	memcpy(this->base + bram_offset, buf, count);
458 	return 0;
459 }
460 
omap2_onenand_shutdown(struct platform_device * pdev)461 static void omap2_onenand_shutdown(struct platform_device *pdev)
462 {
463 	struct omap2_onenand *c = dev_get_drvdata(&pdev->dev);
464 
465 	/* With certain content in the buffer RAM, the OMAP boot ROM code
466 	 * can recognize the flash chip incorrectly. Zero it out before
467 	 * soft reset.
468 	 */
469 	memset((__force void *)c->onenand.base, 0, ONENAND_BUFRAM_SIZE);
470 }
471 
omap2_onenand_probe(struct platform_device * pdev)472 static int omap2_onenand_probe(struct platform_device *pdev)
473 {
474 	u32 val;
475 	dma_cap_mask_t mask;
476 	int freq, latency, r;
477 	struct resource *res;
478 	struct omap2_onenand *c;
479 	struct gpmc_onenand_info info;
480 	struct device *dev = &pdev->dev;
481 	struct device_node *np = dev->of_node;
482 
483 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
484 	if (!res) {
485 		dev_err(dev, "error getting memory resource\n");
486 		return -EINVAL;
487 	}
488 
489 	r = of_property_read_u32(np, "reg", &val);
490 	if (r) {
491 		dev_err(dev, "reg not found in DT\n");
492 		return r;
493 	}
494 
495 	c = devm_kzalloc(dev, sizeof(struct omap2_onenand), GFP_KERNEL);
496 	if (!c)
497 		return -ENOMEM;
498 
499 	init_completion(&c->irq_done);
500 	init_completion(&c->dma_done);
501 	c->gpmc_cs = val;
502 	c->phys_base = res->start;
503 
504 	c->onenand.base = devm_ioremap_resource(dev, res);
505 	if (IS_ERR(c->onenand.base))
506 		return PTR_ERR(c->onenand.base);
507 
508 	c->int_gpiod = devm_gpiod_get_optional(dev, "int", GPIOD_IN);
509 	if (IS_ERR(c->int_gpiod)) {
510 		r = PTR_ERR(c->int_gpiod);
511 		/* Just try again if this happens */
512 		if (r != -EPROBE_DEFER)
513 			dev_err(dev, "error getting gpio: %d\n", r);
514 		return r;
515 	}
516 
517 	if (c->int_gpiod) {
518 		r = devm_request_irq(dev, gpiod_to_irq(c->int_gpiod),
519 				     omap2_onenand_interrupt,
520 				     IRQF_TRIGGER_RISING, "onenand", c);
521 		if (r)
522 			return r;
523 
524 		c->onenand.wait = omap2_onenand_wait;
525 	}
526 
527 	dma_cap_zero(mask);
528 	dma_cap_set(DMA_MEMCPY, mask);
529 
530 	c->dma_chan = dma_request_channel(mask, NULL, NULL);
531 	if (c->dma_chan) {
532 		c->onenand.read_bufferram = omap2_onenand_read_bufferram;
533 		c->onenand.write_bufferram = omap2_onenand_write_bufferram;
534 	}
535 
536 	c->pdev = pdev;
537 	c->mtd.priv = &c->onenand;
538 	c->mtd.dev.parent = dev;
539 	mtd_set_of_node(&c->mtd, dev->of_node);
540 
541 	dev_info(dev, "initializing on CS%d (0x%08lx), va %p, %s mode\n",
542 		 c->gpmc_cs, c->phys_base, c->onenand.base,
543 		 c->dma_chan ? "DMA" : "PIO");
544 
545 	if ((r = onenand_scan(&c->mtd, 1)) < 0)
546 		goto err_release_dma;
547 
548 	freq = omap2_onenand_get_freq(c->onenand.version_id);
549 	if (freq > 0) {
550 		switch (freq) {
551 		case 104:
552 			latency = 7;
553 			break;
554 		case 83:
555 			latency = 6;
556 			break;
557 		case 66:
558 			latency = 5;
559 			break;
560 		case 56:
561 			latency = 4;
562 			break;
563 		default:	/* 40 MHz or lower */
564 			latency = 3;
565 			break;
566 		}
567 
568 		r = gpmc_omap_onenand_set_timings(dev, c->gpmc_cs,
569 						  freq, latency, &info);
570 		if (r)
571 			goto err_release_onenand;
572 
573 		r = omap2_onenand_set_cfg(c, info.sync_read, info.sync_write,
574 					  latency, info.burst_len);
575 		if (r)
576 			goto err_release_onenand;
577 
578 		if (info.sync_read || info.sync_write)
579 			dev_info(dev, "optimized timings for %d MHz\n", freq);
580 	}
581 
582 	r = mtd_device_register(&c->mtd, NULL, 0);
583 	if (r)
584 		goto err_release_onenand;
585 
586 	platform_set_drvdata(pdev, c);
587 
588 	return 0;
589 
590 err_release_onenand:
591 	onenand_release(&c->mtd);
592 err_release_dma:
593 	if (c->dma_chan)
594 		dma_release_channel(c->dma_chan);
595 
596 	return r;
597 }
598 
omap2_onenand_remove(struct platform_device * pdev)599 static int omap2_onenand_remove(struct platform_device *pdev)
600 {
601 	struct omap2_onenand *c = dev_get_drvdata(&pdev->dev);
602 
603 	onenand_release(&c->mtd);
604 	if (c->dma_chan)
605 		dma_release_channel(c->dma_chan);
606 	omap2_onenand_shutdown(pdev);
607 
608 	return 0;
609 }
610 
611 static const struct of_device_id omap2_onenand_id_table[] = {
612 	{ .compatible = "ti,omap2-onenand", },
613 	{},
614 };
615 MODULE_DEVICE_TABLE(of, omap2_onenand_id_table);
616 
617 static struct platform_driver omap2_onenand_driver = {
618 	.probe		= omap2_onenand_probe,
619 	.remove		= omap2_onenand_remove,
620 	.shutdown	= omap2_onenand_shutdown,
621 	.driver		= {
622 		.name	= DRIVER_NAME,
623 		.of_match_table = omap2_onenand_id_table,
624 	},
625 };
626 
627 module_platform_driver(omap2_onenand_driver);
628 
629 MODULE_ALIAS("platform:" DRIVER_NAME);
630 MODULE_LICENSE("GPL");
631 MODULE_AUTHOR("Jarkko Lavinen <jarkko.lavinen@nokia.com>");
632 MODULE_DESCRIPTION("Glue layer for OneNAND flash on OMAP2 / OMAP3");
633