• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  drivers/block/mg_disk.c
3  *
4  *  Support for the mGine m[g]flash IO mode.
5  *  Based on legacy hd.c
6  *
7  * (c) 2008 mGine Co.,LTD
8  * (c) 2008 unsik Kim <donari75@gmail.com>
9  *
10  *  This program is free software; you can redistribute it and/or modify
11  *  it under the terms of the GNU General Public License version 2 as
12  *  published by the Free Software Foundation.
13  */
14 
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/fs.h>
18 #include <linux/blkdev.h>
19 #include <linux/hdreg.h>
20 #include <linux/ata.h>
21 #include <linux/interrupt.h>
22 #include <linux/delay.h>
23 #include <linux/platform_device.h>
24 #include <linux/gpio.h>
25 #include <linux/mg_disk.h>
26 #include <linux/slab.h>
27 
28 #define MG_RES_SEC (CONFIG_MG_DISK_RES << 1)
29 
30 /* name for block device */
31 #define MG_DISK_NAME "mgd"
32 
33 #define MG_DISK_MAJ 0
34 #define MG_DISK_MAX_PART 16
35 #define MG_SECTOR_SIZE 512
36 #define MG_MAX_SECTS 256
37 
38 /* Register offsets */
39 #define MG_BUFF_OFFSET			0x8000
40 #define MG_REG_OFFSET			0xC000
41 #define MG_REG_FEATURE			(MG_REG_OFFSET + 2)	/* write case */
42 #define MG_REG_ERROR			(MG_REG_OFFSET + 2)	/* read case */
43 #define MG_REG_SECT_CNT			(MG_REG_OFFSET + 4)
44 #define MG_REG_SECT_NUM			(MG_REG_OFFSET + 6)
45 #define MG_REG_CYL_LOW			(MG_REG_OFFSET + 8)
46 #define MG_REG_CYL_HIGH			(MG_REG_OFFSET + 0xA)
47 #define MG_REG_DRV_HEAD			(MG_REG_OFFSET + 0xC)
48 #define MG_REG_COMMAND			(MG_REG_OFFSET + 0xE)	/* write case */
49 #define MG_REG_STATUS			(MG_REG_OFFSET + 0xE)	/* read  case */
50 #define MG_REG_DRV_CTRL			(MG_REG_OFFSET + 0x10)
51 #define MG_REG_BURST_CTRL		(MG_REG_OFFSET + 0x12)
52 
53 /* handy status */
54 #define MG_STAT_READY	(ATA_DRDY | ATA_DSC)
55 #define MG_READY_OK(s)	(((s) & (MG_STAT_READY | (ATA_BUSY | ATA_DF | \
56 				 ATA_ERR))) == MG_STAT_READY)
57 
58 /* error code for others */
59 #define MG_ERR_NONE		0
60 #define MG_ERR_TIMEOUT		0x100
61 #define MG_ERR_INIT_STAT	0x101
62 #define MG_ERR_TRANSLATION	0x102
63 #define MG_ERR_CTRL_RST		0x103
64 #define MG_ERR_INV_STAT		0x104
65 #define MG_ERR_RSTOUT		0x105
66 
67 #define MG_MAX_ERRORS	6	/* Max read/write errors */
68 
69 /* command */
70 #define MG_CMD_RD 0x20
71 #define MG_CMD_WR 0x30
72 #define MG_CMD_SLEEP 0x99
73 #define MG_CMD_WAKEUP 0xC3
74 #define MG_CMD_ID 0xEC
75 #define MG_CMD_WR_CONF 0x3C
76 #define MG_CMD_RD_CONF 0x40
77 
78 /* operation mode */
79 #define MG_OP_CASCADE (1 << 0)
80 #define MG_OP_CASCADE_SYNC_RD (1 << 1)
81 #define MG_OP_CASCADE_SYNC_WR (1 << 2)
82 #define MG_OP_INTERLEAVE (1 << 3)
83 
84 /* synchronous */
85 #define MG_BURST_LAT_4 (3 << 4)
86 #define MG_BURST_LAT_5 (4 << 4)
87 #define MG_BURST_LAT_6 (5 << 4)
88 #define MG_BURST_LAT_7 (6 << 4)
89 #define MG_BURST_LAT_8 (7 << 4)
90 #define MG_BURST_LEN_4 (1 << 1)
91 #define MG_BURST_LEN_8 (2 << 1)
92 #define MG_BURST_LEN_16 (3 << 1)
93 #define MG_BURST_LEN_32 (4 << 1)
94 #define MG_BURST_LEN_CONT (0 << 1)
95 
96 /* timeout value (unit: ms) */
97 #define MG_TMAX_CONF_TO_CMD	1
98 #define MG_TMAX_WAIT_RD_DRQ	10
99 #define MG_TMAX_WAIT_WR_DRQ	500
100 #define MG_TMAX_RST_TO_BUSY	10
101 #define MG_TMAX_HDRST_TO_RDY	500
102 #define MG_TMAX_SWRST_TO_RDY	500
103 #define MG_TMAX_RSTOUT		3000
104 
105 #define MG_DEV_MASK (MG_BOOT_DEV | MG_STORAGE_DEV | MG_STORAGE_DEV_SKIP_RST)
106 
107 /* main structure for mflash driver */
108 struct mg_host {
109 	struct device *dev;
110 
111 	struct request_queue *breq;
112 	struct request *req;
113 	spinlock_t lock;
114 	struct gendisk *gd;
115 
116 	struct timer_list timer;
117 	void (*mg_do_intr) (struct mg_host *);
118 
119 	u16 id[ATA_ID_WORDS];
120 
121 	u16 cyls;
122 	u16 heads;
123 	u16 sectors;
124 	u32 n_sectors;
125 	u32 nres_sectors;
126 
127 	void __iomem *dev_base;
128 	unsigned int irq;
129 	unsigned int rst;
130 	unsigned int rstout;
131 
132 	u32 major;
133 	u32 error;
134 };
135 
136 /*
137  * Debugging macro and defines
138  */
139 #undef DO_MG_DEBUG
140 #ifdef DO_MG_DEBUG
141 #  define MG_DBG(fmt, args...) \
142 	printk(KERN_DEBUG "%s:%d "fmt, __func__, __LINE__, ##args)
143 #else /* CONFIG_MG_DEBUG */
144 #  define MG_DBG(fmt, args...) do { } while (0)
145 #endif /* CONFIG_MG_DEBUG */
146 
147 static void mg_request(struct request_queue *);
148 
mg_end_request(struct mg_host * host,int err,unsigned int nr_bytes)149 static bool mg_end_request(struct mg_host *host, int err, unsigned int nr_bytes)
150 {
151 	if (__blk_end_request(host->req, err, nr_bytes))
152 		return true;
153 
154 	host->req = NULL;
155 	return false;
156 }
157 
mg_end_request_cur(struct mg_host * host,int err)158 static bool mg_end_request_cur(struct mg_host *host, int err)
159 {
160 	return mg_end_request(host, err, blk_rq_cur_bytes(host->req));
161 }
162 
mg_dump_status(const char * msg,unsigned int stat,struct mg_host * host)163 static void mg_dump_status(const char *msg, unsigned int stat,
164 		struct mg_host *host)
165 {
166 	char *name = MG_DISK_NAME;
167 
168 	if (host->req)
169 		name = host->req->rq_disk->disk_name;
170 
171 	printk(KERN_ERR "%s: %s: status=0x%02x { ", name, msg, stat & 0xff);
172 	if (stat & ATA_BUSY)
173 		printk("Busy ");
174 	if (stat & ATA_DRDY)
175 		printk("DriveReady ");
176 	if (stat & ATA_DF)
177 		printk("WriteFault ");
178 	if (stat & ATA_DSC)
179 		printk("SeekComplete ");
180 	if (stat & ATA_DRQ)
181 		printk("DataRequest ");
182 	if (stat & ATA_CORR)
183 		printk("CorrectedError ");
184 	if (stat & ATA_ERR)
185 		printk("Error ");
186 	printk("}\n");
187 	if ((stat & ATA_ERR) == 0) {
188 		host->error = 0;
189 	} else {
190 		host->error = inb((unsigned long)host->dev_base + MG_REG_ERROR);
191 		printk(KERN_ERR "%s: %s: error=0x%02x { ", name, msg,
192 				host->error & 0xff);
193 		if (host->error & ATA_BBK)
194 			printk("BadSector ");
195 		if (host->error & ATA_UNC)
196 			printk("UncorrectableError ");
197 		if (host->error & ATA_IDNF)
198 			printk("SectorIdNotFound ");
199 		if (host->error & ATA_ABORTED)
200 			printk("DriveStatusError ");
201 		if (host->error & ATA_AMNF)
202 			printk("AddrMarkNotFound ");
203 		printk("}");
204 		if (host->error & (ATA_BBK | ATA_UNC | ATA_IDNF | ATA_AMNF)) {
205 			if (host->req)
206 				printk(", sector=%u",
207 				       (unsigned int)blk_rq_pos(host->req));
208 		}
209 		printk("\n");
210 	}
211 }
212 
mg_wait(struct mg_host * host,u32 expect,u32 msec)213 static unsigned int mg_wait(struct mg_host *host, u32 expect, u32 msec)
214 {
215 	u8 status;
216 	unsigned long expire, cur_jiffies;
217 	struct mg_drv_data *prv_data = host->dev->platform_data;
218 
219 	host->error = MG_ERR_NONE;
220 	expire = jiffies + msecs_to_jiffies(msec);
221 
222 	/* These 2 times dummy status read prevents reading invalid
223 	 * status. A very little time (3 times of mflash operating clk)
224 	 * is required for busy bit is set. Use dummy read instead of
225 	 * busy wait, because mflash's PLL is machine dependent.
226 	 */
227 	if (prv_data->use_polling) {
228 		status = inb((unsigned long)host->dev_base + MG_REG_STATUS);
229 		status = inb((unsigned long)host->dev_base + MG_REG_STATUS);
230 	}
231 
232 	status = inb((unsigned long)host->dev_base + MG_REG_STATUS);
233 
234 	do {
235 		cur_jiffies = jiffies;
236 		if (status & ATA_BUSY) {
237 			if (expect == ATA_BUSY)
238 				break;
239 		} else {
240 			/* Check the error condition! */
241 			if (status & ATA_ERR) {
242 				mg_dump_status("mg_wait", status, host);
243 				break;
244 			}
245 
246 			if (expect == MG_STAT_READY)
247 				if (MG_READY_OK(status))
248 					break;
249 
250 			if (expect == ATA_DRQ)
251 				if (status & ATA_DRQ)
252 					break;
253 		}
254 		if (!msec) {
255 			mg_dump_status("not ready", status, host);
256 			return MG_ERR_INV_STAT;
257 		}
258 
259 		status = inb((unsigned long)host->dev_base + MG_REG_STATUS);
260 	} while (time_before(cur_jiffies, expire));
261 
262 	if (time_after_eq(cur_jiffies, expire) && msec)
263 		host->error = MG_ERR_TIMEOUT;
264 
265 	return host->error;
266 }
267 
mg_wait_rstout(u32 rstout,u32 msec)268 static unsigned int mg_wait_rstout(u32 rstout, u32 msec)
269 {
270 	unsigned long expire;
271 
272 	expire = jiffies + msecs_to_jiffies(msec);
273 	while (time_before(jiffies, expire)) {
274 		if (gpio_get_value(rstout) == 1)
275 			return MG_ERR_NONE;
276 		msleep(10);
277 	}
278 
279 	return MG_ERR_RSTOUT;
280 }
281 
mg_unexpected_intr(struct mg_host * host)282 static void mg_unexpected_intr(struct mg_host *host)
283 {
284 	u32 status = inb((unsigned long)host->dev_base + MG_REG_STATUS);
285 
286 	mg_dump_status("mg_unexpected_intr", status, host);
287 }
288 
mg_irq(int irq,void * dev_id)289 static irqreturn_t mg_irq(int irq, void *dev_id)
290 {
291 	struct mg_host *host = dev_id;
292 	void (*handler)(struct mg_host *) = host->mg_do_intr;
293 
294 	spin_lock(&host->lock);
295 
296 	host->mg_do_intr = NULL;
297 	del_timer(&host->timer);
298 	if (!handler)
299 		handler = mg_unexpected_intr;
300 	handler(host);
301 
302 	spin_unlock(&host->lock);
303 
304 	return IRQ_HANDLED;
305 }
306 
307 /* local copy of ata_id_string() */
mg_id_string(const u16 * id,unsigned char * s,unsigned int ofs,unsigned int len)308 static void mg_id_string(const u16 *id, unsigned char *s,
309 			 unsigned int ofs, unsigned int len)
310 {
311 	unsigned int c;
312 
313 	BUG_ON(len & 1);
314 
315 	while (len > 0) {
316 		c = id[ofs] >> 8;
317 		*s = c;
318 		s++;
319 
320 		c = id[ofs] & 0xff;
321 		*s = c;
322 		s++;
323 
324 		ofs++;
325 		len -= 2;
326 	}
327 }
328 
329 /* local copy of ata_id_c_string() */
mg_id_c_string(const u16 * id,unsigned char * s,unsigned int ofs,unsigned int len)330 static void mg_id_c_string(const u16 *id, unsigned char *s,
331 			   unsigned int ofs, unsigned int len)
332 {
333 	unsigned char *p;
334 
335 	mg_id_string(id, s, ofs, len - 1);
336 
337 	p = s + strnlen(s, len - 1);
338 	while (p > s && p[-1] == ' ')
339 		p--;
340 	*p = '\0';
341 }
342 
mg_get_disk_id(struct mg_host * host)343 static int mg_get_disk_id(struct mg_host *host)
344 {
345 	u32 i;
346 	s32 err;
347 	const u16 *id = host->id;
348 	struct mg_drv_data *prv_data = host->dev->platform_data;
349 	char fwrev[ATA_ID_FW_REV_LEN + 1];
350 	char model[ATA_ID_PROD_LEN + 1];
351 	char serial[ATA_ID_SERNO_LEN + 1];
352 
353 	if (!prv_data->use_polling)
354 		outb(ATA_NIEN, (unsigned long)host->dev_base + MG_REG_DRV_CTRL);
355 
356 	outb(MG_CMD_ID, (unsigned long)host->dev_base + MG_REG_COMMAND);
357 	err = mg_wait(host, ATA_DRQ, MG_TMAX_WAIT_RD_DRQ);
358 	if (err)
359 		return err;
360 
361 	for (i = 0; i < (MG_SECTOR_SIZE >> 1); i++)
362 		host->id[i] = le16_to_cpu(inw((unsigned long)host->dev_base +
363 					MG_BUFF_OFFSET + i * 2));
364 
365 	outb(MG_CMD_RD_CONF, (unsigned long)host->dev_base + MG_REG_COMMAND);
366 	err = mg_wait(host, MG_STAT_READY, MG_TMAX_CONF_TO_CMD);
367 	if (err)
368 		return err;
369 
370 	if ((id[ATA_ID_FIELD_VALID] & 1) == 0)
371 		return MG_ERR_TRANSLATION;
372 
373 	host->n_sectors = ata_id_u32(id, ATA_ID_LBA_CAPACITY);
374 	host->cyls = id[ATA_ID_CYLS];
375 	host->heads = id[ATA_ID_HEADS];
376 	host->sectors = id[ATA_ID_SECTORS];
377 
378 	if (MG_RES_SEC && host->heads && host->sectors) {
379 		/* modify cyls, n_sectors */
380 		host->cyls = (host->n_sectors - MG_RES_SEC) /
381 			host->heads / host->sectors;
382 		host->nres_sectors = host->n_sectors - host->cyls *
383 			host->heads * host->sectors;
384 		host->n_sectors -= host->nres_sectors;
385 	}
386 
387 	mg_id_c_string(id, fwrev, ATA_ID_FW_REV, sizeof(fwrev));
388 	mg_id_c_string(id, model, ATA_ID_PROD, sizeof(model));
389 	mg_id_c_string(id, serial, ATA_ID_SERNO, sizeof(serial));
390 	printk(KERN_INFO "mg_disk: model: %s\n", model);
391 	printk(KERN_INFO "mg_disk: firm: %.8s\n", fwrev);
392 	printk(KERN_INFO "mg_disk: serial: %s\n", serial);
393 	printk(KERN_INFO "mg_disk: %d + reserved %d sectors\n",
394 			host->n_sectors, host->nres_sectors);
395 
396 	if (!prv_data->use_polling)
397 		outb(0, (unsigned long)host->dev_base + MG_REG_DRV_CTRL);
398 
399 	return err;
400 }
401 
402 
mg_disk_init(struct mg_host * host)403 static int mg_disk_init(struct mg_host *host)
404 {
405 	struct mg_drv_data *prv_data = host->dev->platform_data;
406 	s32 err;
407 	u8 init_status;
408 
409 	/* hdd rst low */
410 	gpio_set_value(host->rst, 0);
411 	err = mg_wait(host, ATA_BUSY, MG_TMAX_RST_TO_BUSY);
412 	if (err)
413 		return err;
414 
415 	/* hdd rst high */
416 	gpio_set_value(host->rst, 1);
417 	err = mg_wait(host, MG_STAT_READY, MG_TMAX_HDRST_TO_RDY);
418 	if (err)
419 		return err;
420 
421 	/* soft reset on */
422 	outb(ATA_SRST | (prv_data->use_polling ? ATA_NIEN : 0),
423 			(unsigned long)host->dev_base + MG_REG_DRV_CTRL);
424 	err = mg_wait(host, ATA_BUSY, MG_TMAX_RST_TO_BUSY);
425 	if (err)
426 		return err;
427 
428 	/* soft reset off */
429 	outb(prv_data->use_polling ? ATA_NIEN : 0,
430 			(unsigned long)host->dev_base + MG_REG_DRV_CTRL);
431 	err = mg_wait(host, MG_STAT_READY, MG_TMAX_SWRST_TO_RDY);
432 	if (err)
433 		return err;
434 
435 	init_status = inb((unsigned long)host->dev_base + MG_REG_STATUS) & 0xf;
436 
437 	if (init_status == 0xf)
438 		return MG_ERR_INIT_STAT;
439 
440 	return err;
441 }
442 
mg_bad_rw_intr(struct mg_host * host)443 static void mg_bad_rw_intr(struct mg_host *host)
444 {
445 	if (host->req)
446 		if (++host->req->errors >= MG_MAX_ERRORS ||
447 		    host->error == MG_ERR_TIMEOUT)
448 			mg_end_request_cur(host, -EIO);
449 }
450 
mg_out(struct mg_host * host,unsigned int sect_num,unsigned int sect_cnt,unsigned int cmd,void (* intr_addr)(struct mg_host *))451 static unsigned int mg_out(struct mg_host *host,
452 		unsigned int sect_num,
453 		unsigned int sect_cnt,
454 		unsigned int cmd,
455 		void (*intr_addr)(struct mg_host *))
456 {
457 	struct mg_drv_data *prv_data = host->dev->platform_data;
458 
459 	if (mg_wait(host, MG_STAT_READY, MG_TMAX_CONF_TO_CMD))
460 		return host->error;
461 
462 	if (!prv_data->use_polling) {
463 		host->mg_do_intr = intr_addr;
464 		mod_timer(&host->timer, jiffies + 3 * HZ);
465 	}
466 	if (MG_RES_SEC)
467 		sect_num += MG_RES_SEC;
468 	outb((u8)sect_cnt, (unsigned long)host->dev_base + MG_REG_SECT_CNT);
469 	outb((u8)sect_num, (unsigned long)host->dev_base + MG_REG_SECT_NUM);
470 	outb((u8)(sect_num >> 8), (unsigned long)host->dev_base +
471 			MG_REG_CYL_LOW);
472 	outb((u8)(sect_num >> 16), (unsigned long)host->dev_base +
473 			MG_REG_CYL_HIGH);
474 	outb((u8)((sect_num >> 24) | ATA_LBA | ATA_DEVICE_OBS),
475 			(unsigned long)host->dev_base + MG_REG_DRV_HEAD);
476 	outb(cmd, (unsigned long)host->dev_base + MG_REG_COMMAND);
477 	return MG_ERR_NONE;
478 }
479 
mg_read_one(struct mg_host * host,struct request * req)480 static void mg_read_one(struct mg_host *host, struct request *req)
481 {
482 	u16 *buff = (u16 *)bio_data(req->bio);
483 	u32 i;
484 
485 	for (i = 0; i < MG_SECTOR_SIZE >> 1; i++)
486 		*buff++ = inw((unsigned long)host->dev_base + MG_BUFF_OFFSET +
487 			      (i << 1));
488 }
489 
mg_read(struct request * req)490 static void mg_read(struct request *req)
491 {
492 	struct mg_host *host = req->rq_disk->private_data;
493 
494 	if (mg_out(host, blk_rq_pos(req), blk_rq_sectors(req),
495 		   MG_CMD_RD, NULL) != MG_ERR_NONE)
496 		mg_bad_rw_intr(host);
497 
498 	MG_DBG("requested %d sects (from %ld), buffer=0x%p\n",
499 	       blk_rq_sectors(req), blk_rq_pos(req), bio_data(req->bio));
500 
501 	do {
502 		if (mg_wait(host, ATA_DRQ,
503 			    MG_TMAX_WAIT_RD_DRQ) != MG_ERR_NONE) {
504 			mg_bad_rw_intr(host);
505 			return;
506 		}
507 
508 		mg_read_one(host, req);
509 
510 		outb(MG_CMD_RD_CONF, (unsigned long)host->dev_base +
511 				MG_REG_COMMAND);
512 	} while (mg_end_request(host, 0, MG_SECTOR_SIZE));
513 }
514 
mg_write_one(struct mg_host * host,struct request * req)515 static void mg_write_one(struct mg_host *host, struct request *req)
516 {
517 	u16 *buff = (u16 *)bio_data(req->bio);
518 	u32 i;
519 
520 	for (i = 0; i < MG_SECTOR_SIZE >> 1; i++)
521 		outw(*buff++, (unsigned long)host->dev_base + MG_BUFF_OFFSET +
522 		     (i << 1));
523 }
524 
mg_write(struct request * req)525 static void mg_write(struct request *req)
526 {
527 	struct mg_host *host = req->rq_disk->private_data;
528 	unsigned int rem = blk_rq_sectors(req);
529 
530 	if (mg_out(host, blk_rq_pos(req), rem,
531 		   MG_CMD_WR, NULL) != MG_ERR_NONE) {
532 		mg_bad_rw_intr(host);
533 		return;
534 	}
535 
536 	MG_DBG("requested %d sects (from %ld), buffer=0x%p\n",
537 	       rem, blk_rq_pos(req), bio_data(req->bio));
538 
539 	if (mg_wait(host, ATA_DRQ,
540 		    MG_TMAX_WAIT_WR_DRQ) != MG_ERR_NONE) {
541 		mg_bad_rw_intr(host);
542 		return;
543 	}
544 
545 	do {
546 		mg_write_one(host, req);
547 
548 		outb(MG_CMD_WR_CONF, (unsigned long)host->dev_base +
549 				MG_REG_COMMAND);
550 
551 		rem--;
552 		if (rem > 1 && mg_wait(host, ATA_DRQ,
553 					MG_TMAX_WAIT_WR_DRQ) != MG_ERR_NONE) {
554 			mg_bad_rw_intr(host);
555 			return;
556 		} else if (mg_wait(host, MG_STAT_READY,
557 					MG_TMAX_WAIT_WR_DRQ) != MG_ERR_NONE) {
558 			mg_bad_rw_intr(host);
559 			return;
560 		}
561 	} while (mg_end_request(host, 0, MG_SECTOR_SIZE));
562 }
563 
mg_read_intr(struct mg_host * host)564 static void mg_read_intr(struct mg_host *host)
565 {
566 	struct request *req = host->req;
567 	u32 i;
568 
569 	/* check status */
570 	do {
571 		i = inb((unsigned long)host->dev_base + MG_REG_STATUS);
572 		if (i & ATA_BUSY)
573 			break;
574 		if (!MG_READY_OK(i))
575 			break;
576 		if (i & ATA_DRQ)
577 			goto ok_to_read;
578 	} while (0);
579 	mg_dump_status("mg_read_intr", i, host);
580 	mg_bad_rw_intr(host);
581 	mg_request(host->breq);
582 	return;
583 
584 ok_to_read:
585 	mg_read_one(host, req);
586 
587 	MG_DBG("sector %ld, remaining=%ld, buffer=0x%p\n",
588 	       blk_rq_pos(req), blk_rq_sectors(req) - 1, bio_data(req->bio));
589 
590 	/* send read confirm */
591 	outb(MG_CMD_RD_CONF, (unsigned long)host->dev_base + MG_REG_COMMAND);
592 
593 	if (mg_end_request(host, 0, MG_SECTOR_SIZE)) {
594 		/* set handler if read remains */
595 		host->mg_do_intr = mg_read_intr;
596 		mod_timer(&host->timer, jiffies + 3 * HZ);
597 	} else /* goto next request */
598 		mg_request(host->breq);
599 }
600 
mg_write_intr(struct mg_host * host)601 static void mg_write_intr(struct mg_host *host)
602 {
603 	struct request *req = host->req;
604 	u32 i;
605 	bool rem;
606 
607 	/* check status */
608 	do {
609 		i = inb((unsigned long)host->dev_base + MG_REG_STATUS);
610 		if (i & ATA_BUSY)
611 			break;
612 		if (!MG_READY_OK(i))
613 			break;
614 		if ((blk_rq_sectors(req) <= 1) || (i & ATA_DRQ))
615 			goto ok_to_write;
616 	} while (0);
617 	mg_dump_status("mg_write_intr", i, host);
618 	mg_bad_rw_intr(host);
619 	mg_request(host->breq);
620 	return;
621 
622 ok_to_write:
623 	if ((rem = mg_end_request(host, 0, MG_SECTOR_SIZE))) {
624 		/* write 1 sector and set handler if remains */
625 		mg_write_one(host, req);
626 		MG_DBG("sector %ld, remaining=%ld, buffer=0x%p\n",
627 		       blk_rq_pos(req), blk_rq_sectors(req), bio_data(req->bio));
628 		host->mg_do_intr = mg_write_intr;
629 		mod_timer(&host->timer, jiffies + 3 * HZ);
630 	}
631 
632 	/* send write confirm */
633 	outb(MG_CMD_WR_CONF, (unsigned long)host->dev_base + MG_REG_COMMAND);
634 
635 	if (!rem)
636 		mg_request(host->breq);
637 }
638 
mg_times_out(unsigned long data)639 static void mg_times_out(unsigned long data)
640 {
641 	struct mg_host *host = (struct mg_host *)data;
642 	char *name;
643 
644 	spin_lock_irq(&host->lock);
645 
646 	if (!host->req)
647 		goto out_unlock;
648 
649 	host->mg_do_intr = NULL;
650 
651 	name = host->req->rq_disk->disk_name;
652 	printk(KERN_DEBUG "%s: timeout\n", name);
653 
654 	host->error = MG_ERR_TIMEOUT;
655 	mg_bad_rw_intr(host);
656 
657 out_unlock:
658 	mg_request(host->breq);
659 	spin_unlock_irq(&host->lock);
660 }
661 
mg_request_poll(struct request_queue * q)662 static void mg_request_poll(struct request_queue *q)
663 {
664 	struct mg_host *host = q->queuedata;
665 
666 	while (1) {
667 		if (!host->req) {
668 			host->req = blk_fetch_request(q);
669 			if (!host->req)
670 				break;
671 		}
672 
673 		if (unlikely(host->req->cmd_type != REQ_TYPE_FS)) {
674 			mg_end_request_cur(host, -EIO);
675 			continue;
676 		}
677 
678 		if (rq_data_dir(host->req) == READ)
679 			mg_read(host->req);
680 		else
681 			mg_write(host->req);
682 	}
683 }
684 
mg_issue_req(struct request * req,struct mg_host * host,unsigned int sect_num,unsigned int sect_cnt)685 static unsigned int mg_issue_req(struct request *req,
686 		struct mg_host *host,
687 		unsigned int sect_num,
688 		unsigned int sect_cnt)
689 {
690 	switch (rq_data_dir(req)) {
691 	case READ:
692 		if (mg_out(host, sect_num, sect_cnt, MG_CMD_RD, &mg_read_intr)
693 				!= MG_ERR_NONE) {
694 			mg_bad_rw_intr(host);
695 			return host->error;
696 		}
697 		break;
698 	case WRITE:
699 		/* TODO : handler */
700 		outb(ATA_NIEN, (unsigned long)host->dev_base + MG_REG_DRV_CTRL);
701 		if (mg_out(host, sect_num, sect_cnt, MG_CMD_WR, &mg_write_intr)
702 				!= MG_ERR_NONE) {
703 			mg_bad_rw_intr(host);
704 			return host->error;
705 		}
706 		del_timer(&host->timer);
707 		mg_wait(host, ATA_DRQ, MG_TMAX_WAIT_WR_DRQ);
708 		outb(0, (unsigned long)host->dev_base + MG_REG_DRV_CTRL);
709 		if (host->error) {
710 			mg_bad_rw_intr(host);
711 			return host->error;
712 		}
713 		mg_write_one(host, req);
714 		mod_timer(&host->timer, jiffies + 3 * HZ);
715 		outb(MG_CMD_WR_CONF, (unsigned long)host->dev_base +
716 				MG_REG_COMMAND);
717 		break;
718 	}
719 	return MG_ERR_NONE;
720 }
721 
722 /* This function also called from IRQ context */
mg_request(struct request_queue * q)723 static void mg_request(struct request_queue *q)
724 {
725 	struct mg_host *host = q->queuedata;
726 	struct request *req;
727 	u32 sect_num, sect_cnt;
728 
729 	while (1) {
730 		if (!host->req) {
731 			host->req = blk_fetch_request(q);
732 			if (!host->req)
733 				break;
734 		}
735 		req = host->req;
736 
737 		/* check unwanted request call */
738 		if (host->mg_do_intr)
739 			return;
740 
741 		del_timer(&host->timer);
742 
743 		sect_num = blk_rq_pos(req);
744 		/* deal whole segments */
745 		sect_cnt = blk_rq_sectors(req);
746 
747 		/* sanity check */
748 		if (sect_num >= get_capacity(req->rq_disk) ||
749 				((sect_num + sect_cnt) >
750 				 get_capacity(req->rq_disk))) {
751 			printk(KERN_WARNING
752 					"%s: bad access: sector=%d, count=%d\n",
753 					req->rq_disk->disk_name,
754 					sect_num, sect_cnt);
755 			mg_end_request_cur(host, -EIO);
756 			continue;
757 		}
758 
759 		if (unlikely(req->cmd_type != REQ_TYPE_FS)) {
760 			mg_end_request_cur(host, -EIO);
761 			continue;
762 		}
763 
764 		if (!mg_issue_req(req, host, sect_num, sect_cnt))
765 			return;
766 	}
767 }
768 
mg_getgeo(struct block_device * bdev,struct hd_geometry * geo)769 static int mg_getgeo(struct block_device *bdev, struct hd_geometry *geo)
770 {
771 	struct mg_host *host = bdev->bd_disk->private_data;
772 
773 	geo->cylinders = (unsigned short)host->cyls;
774 	geo->heads = (unsigned char)host->heads;
775 	geo->sectors = (unsigned char)host->sectors;
776 	return 0;
777 }
778 
779 static const struct block_device_operations mg_disk_ops = {
780 	.getgeo = mg_getgeo
781 };
782 
783 #ifdef CONFIG_PM_SLEEP
mg_suspend(struct device * dev)784 static int mg_suspend(struct device *dev)
785 {
786 	struct mg_drv_data *prv_data = dev->platform_data;
787 	struct mg_host *host = prv_data->host;
788 
789 	if (mg_wait(host, MG_STAT_READY, MG_TMAX_CONF_TO_CMD))
790 		return -EIO;
791 
792 	if (!prv_data->use_polling)
793 		outb(ATA_NIEN, (unsigned long)host->dev_base + MG_REG_DRV_CTRL);
794 
795 	outb(MG_CMD_SLEEP, (unsigned long)host->dev_base + MG_REG_COMMAND);
796 	/* wait until mflash deep sleep */
797 	msleep(1);
798 
799 	if (mg_wait(host, MG_STAT_READY, MG_TMAX_CONF_TO_CMD)) {
800 		if (!prv_data->use_polling)
801 			outb(0, (unsigned long)host->dev_base + MG_REG_DRV_CTRL);
802 		return -EIO;
803 	}
804 
805 	return 0;
806 }
807 
mg_resume(struct device * dev)808 static int mg_resume(struct device *dev)
809 {
810 	struct mg_drv_data *prv_data = dev->platform_data;
811 	struct mg_host *host = prv_data->host;
812 
813 	if (mg_wait(host, MG_STAT_READY, MG_TMAX_CONF_TO_CMD))
814 		return -EIO;
815 
816 	outb(MG_CMD_WAKEUP, (unsigned long)host->dev_base + MG_REG_COMMAND);
817 	/* wait until mflash wakeup */
818 	msleep(1);
819 
820 	if (mg_wait(host, MG_STAT_READY, MG_TMAX_CONF_TO_CMD))
821 		return -EIO;
822 
823 	if (!prv_data->use_polling)
824 		outb(0, (unsigned long)host->dev_base + MG_REG_DRV_CTRL);
825 
826 	return 0;
827 }
828 #endif
829 
830 static SIMPLE_DEV_PM_OPS(mg_pm, mg_suspend, mg_resume);
831 
mg_probe(struct platform_device * plat_dev)832 static int mg_probe(struct platform_device *plat_dev)
833 {
834 	struct mg_host *host;
835 	struct resource *rsc;
836 	struct mg_drv_data *prv_data = plat_dev->dev.platform_data;
837 	int err = 0;
838 
839 	if (!prv_data) {
840 		printk(KERN_ERR	"%s:%d fail (no driver_data)\n",
841 				__func__, __LINE__);
842 		err = -EINVAL;
843 		goto probe_err;
844 	}
845 
846 	/* alloc mg_host */
847 	host = kzalloc(sizeof(struct mg_host), GFP_KERNEL);
848 	if (!host) {
849 		printk(KERN_ERR "%s:%d fail (no memory for mg_host)\n",
850 				__func__, __LINE__);
851 		err = -ENOMEM;
852 		goto probe_err;
853 	}
854 	host->major = MG_DISK_MAJ;
855 
856 	/* link each other */
857 	prv_data->host = host;
858 	host->dev = &plat_dev->dev;
859 
860 	/* io remap */
861 	rsc = platform_get_resource(plat_dev, IORESOURCE_MEM, 0);
862 	if (!rsc) {
863 		printk(KERN_ERR "%s:%d platform_get_resource fail\n",
864 				__func__, __LINE__);
865 		err = -EINVAL;
866 		goto probe_err_2;
867 	}
868 	host->dev_base = ioremap(rsc->start, resource_size(rsc));
869 	if (!host->dev_base) {
870 		printk(KERN_ERR "%s:%d ioremap fail\n",
871 				__func__, __LINE__);
872 		err = -EIO;
873 		goto probe_err_2;
874 	}
875 	MG_DBG("dev_base = 0x%x\n", (u32)host->dev_base);
876 
877 	/* get reset pin */
878 	rsc = platform_get_resource_byname(plat_dev, IORESOURCE_IO,
879 			MG_RST_PIN);
880 	if (!rsc) {
881 		printk(KERN_ERR "%s:%d get reset pin fail\n",
882 				__func__, __LINE__);
883 		err = -EIO;
884 		goto probe_err_3;
885 	}
886 	host->rst = rsc->start;
887 
888 	/* init rst pin */
889 	err = gpio_request(host->rst, MG_RST_PIN);
890 	if (err)
891 		goto probe_err_3;
892 	gpio_direction_output(host->rst, 1);
893 
894 	/* reset out pin */
895 	if (!(prv_data->dev_attr & MG_DEV_MASK)) {
896 		err = -EINVAL;
897 		goto probe_err_3a;
898 	}
899 
900 	if (prv_data->dev_attr != MG_BOOT_DEV) {
901 		rsc = platform_get_resource_byname(plat_dev, IORESOURCE_IO,
902 				MG_RSTOUT_PIN);
903 		if (!rsc) {
904 			printk(KERN_ERR "%s:%d get reset-out pin fail\n",
905 					__func__, __LINE__);
906 			err = -EIO;
907 			goto probe_err_3a;
908 		}
909 		host->rstout = rsc->start;
910 		err = gpio_request(host->rstout, MG_RSTOUT_PIN);
911 		if (err)
912 			goto probe_err_3a;
913 		gpio_direction_input(host->rstout);
914 	}
915 
916 	/* disk reset */
917 	if (prv_data->dev_attr == MG_STORAGE_DEV) {
918 		/* If POR seq. not yet finished, wait */
919 		err = mg_wait_rstout(host->rstout, MG_TMAX_RSTOUT);
920 		if (err)
921 			goto probe_err_3b;
922 		err = mg_disk_init(host);
923 		if (err) {
924 			printk(KERN_ERR "%s:%d fail (err code : %d)\n",
925 					__func__, __LINE__, err);
926 			err = -EIO;
927 			goto probe_err_3b;
928 		}
929 	}
930 
931 	/* get irq resource */
932 	if (!prv_data->use_polling) {
933 		host->irq = platform_get_irq(plat_dev, 0);
934 		if (host->irq == -ENXIO) {
935 			err = host->irq;
936 			goto probe_err_3b;
937 		}
938 		err = request_irq(host->irq, mg_irq,
939 				IRQF_TRIGGER_RISING,
940 				MG_DEV_NAME, host);
941 		if (err) {
942 			printk(KERN_ERR "%s:%d fail (request_irq err=%d)\n",
943 					__func__, __LINE__, err);
944 			goto probe_err_3b;
945 		}
946 
947 	}
948 
949 	/* get disk id */
950 	err = mg_get_disk_id(host);
951 	if (err) {
952 		printk(KERN_ERR "%s:%d fail (err code : %d)\n",
953 				__func__, __LINE__, err);
954 		err = -EIO;
955 		goto probe_err_4;
956 	}
957 
958 	err = register_blkdev(host->major, MG_DISK_NAME);
959 	if (err < 0) {
960 		printk(KERN_ERR "%s:%d register_blkdev fail (err code : %d)\n",
961 				__func__, __LINE__, err);
962 		goto probe_err_4;
963 	}
964 	if (!host->major)
965 		host->major = err;
966 
967 	spin_lock_init(&host->lock);
968 
969 	if (prv_data->use_polling)
970 		host->breq = blk_init_queue(mg_request_poll, &host->lock);
971 	else
972 		host->breq = blk_init_queue(mg_request, &host->lock);
973 
974 	if (!host->breq) {
975 		err = -ENOMEM;
976 		printk(KERN_ERR "%s:%d (blk_init_queue) fail\n",
977 				__func__, __LINE__);
978 		goto probe_err_5;
979 	}
980 	host->breq->queuedata = host;
981 
982 	/* mflash is random device, thanx for the noop */
983 	err = elevator_change(host->breq, "noop");
984 	if (err) {
985 		printk(KERN_ERR "%s:%d (elevator_init) fail\n",
986 				__func__, __LINE__);
987 		goto probe_err_6;
988 	}
989 	blk_queue_max_hw_sectors(host->breq, MG_MAX_SECTS);
990 	blk_queue_logical_block_size(host->breq, MG_SECTOR_SIZE);
991 
992 	init_timer(&host->timer);
993 	host->timer.function = mg_times_out;
994 	host->timer.data = (unsigned long)host;
995 
996 	host->gd = alloc_disk(MG_DISK_MAX_PART);
997 	if (!host->gd) {
998 		printk(KERN_ERR "%s:%d (alloc_disk) fail\n",
999 				__func__, __LINE__);
1000 		err = -ENOMEM;
1001 		goto probe_err_7;
1002 	}
1003 	host->gd->major = host->major;
1004 	host->gd->first_minor = 0;
1005 	host->gd->fops = &mg_disk_ops;
1006 	host->gd->queue = host->breq;
1007 	host->gd->private_data = host;
1008 	sprintf(host->gd->disk_name, MG_DISK_NAME"a");
1009 
1010 	set_capacity(host->gd, host->n_sectors);
1011 
1012 	add_disk(host->gd);
1013 
1014 	return err;
1015 
1016 probe_err_7:
1017 	del_timer_sync(&host->timer);
1018 probe_err_6:
1019 	blk_cleanup_queue(host->breq);
1020 probe_err_5:
1021 	unregister_blkdev(MG_DISK_MAJ, MG_DISK_NAME);
1022 probe_err_4:
1023 	if (!prv_data->use_polling)
1024 		free_irq(host->irq, host);
1025 probe_err_3b:
1026 	gpio_free(host->rstout);
1027 probe_err_3a:
1028 	gpio_free(host->rst);
1029 probe_err_3:
1030 	iounmap(host->dev_base);
1031 probe_err_2:
1032 	kfree(host);
1033 probe_err:
1034 	return err;
1035 }
1036 
mg_remove(struct platform_device * plat_dev)1037 static int mg_remove(struct platform_device *plat_dev)
1038 {
1039 	struct mg_drv_data *prv_data = plat_dev->dev.platform_data;
1040 	struct mg_host *host = prv_data->host;
1041 	int err = 0;
1042 
1043 	/* delete timer */
1044 	del_timer_sync(&host->timer);
1045 
1046 	/* remove disk */
1047 	if (host->gd) {
1048 		del_gendisk(host->gd);
1049 		put_disk(host->gd);
1050 	}
1051 	/* remove queue */
1052 	if (host->breq)
1053 		blk_cleanup_queue(host->breq);
1054 
1055 	/* unregister blk device */
1056 	unregister_blkdev(host->major, MG_DISK_NAME);
1057 
1058 	/* free irq */
1059 	if (!prv_data->use_polling)
1060 		free_irq(host->irq, host);
1061 
1062 	/* free reset-out pin */
1063 	if (prv_data->dev_attr != MG_BOOT_DEV)
1064 		gpio_free(host->rstout);
1065 
1066 	/* free rst pin */
1067 	if (host->rst)
1068 		gpio_free(host->rst);
1069 
1070 	/* unmap io */
1071 	if (host->dev_base)
1072 		iounmap(host->dev_base);
1073 
1074 	/* free mg_host */
1075 	kfree(host);
1076 
1077 	return err;
1078 }
1079 
1080 static struct platform_driver mg_disk_driver = {
1081 	.probe = mg_probe,
1082 	.remove = mg_remove,
1083 	.driver = {
1084 		.name = MG_DEV_NAME,
1085 		.owner = THIS_MODULE,
1086 		.pm = &mg_pm,
1087 	}
1088 };
1089 
1090 /****************************************************************************
1091  *
1092  * Module stuff
1093  *
1094  ****************************************************************************/
1095 
mg_init(void)1096 static int __init mg_init(void)
1097 {
1098 	printk(KERN_INFO "mGine mflash driver, (c) 2008 mGine Co.\n");
1099 	return platform_driver_register(&mg_disk_driver);
1100 }
1101 
mg_exit(void)1102 static void __exit mg_exit(void)
1103 {
1104 	printk(KERN_INFO "mflash driver : bye bye\n");
1105 	platform_driver_unregister(&mg_disk_driver);
1106 }
1107 
1108 module_init(mg_init);
1109 module_exit(mg_exit);
1110 
1111 MODULE_LICENSE("GPL");
1112 MODULE_AUTHOR("unsik Kim <donari75@gmail.com>");
1113 MODULE_DESCRIPTION("mGine m[g]flash device driver");
1114