• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * mm.c - Micro Memory(tm) PCI memory board block device driver - v2.3
4  *
5  * (C) 2001 San Mehat <nettwerk@valinux.com>
6  * (C) 2001 Johannes Erdfelt <jerdfelt@valinux.com>
7  * (C) 2001 NeilBrown <neilb@cse.unsw.edu.au>
8  *
9  * This driver for the Micro Memory PCI Memory Module with Battery Backup
10  * is Copyright Micro Memory Inc 2001-2002.  All rights reserved.
11  *
12  * This driver provides a standard block device interface for Micro Memory(tm)
13  * PCI based RAM boards.
14  * 10/05/01: Phap Nguyen - Rebuilt the driver
15  * 10/22/01: Phap Nguyen - v2.1 Added disk partitioning
16  * 29oct2001:NeilBrown   - Use make_request_fn instead of request_fn
17  *                       - use stand disk partitioning (so fdisk works).
18  * 08nov2001:NeilBrown	 - change driver name from "mm" to "umem"
19  *			 - incorporate into main kernel
20  * 08apr2002:NeilBrown   - Move some of interrupt handle to tasklet
21  *			 - use spin_lock_bh instead of _irq
22  *			 - Never block on make_request.  queue
23  *			   bh's instead.
24  *			 - unregister umem from devfs at mod unload
25  *			 - Change version to 2.3
26  * 07Nov2001:Phap Nguyen - Select pci read command: 06, 12, 15 (Decimal)
27  * 07Jan2002: P. Nguyen  - Used PCI Memory Write & Invalidate for DMA
28  * 15May2002:NeilBrown   - convert to bio for 2.5
29  * 17May2002:NeilBrown   - remove init_mem initialisation.  Instead detect
30  *			 - a sequence of writes that cover the card, and
31  *			 - set initialised bit then.
32  */
33 
34 #undef DEBUG	/* #define DEBUG if you want debugging info (pr_debug) */
35 #include <linux/fs.h>
36 #include <linux/bio.h>
37 #include <linux/kernel.h>
38 #include <linux/mm.h>
39 #include <linux/mman.h>
40 #include <linux/gfp.h>
41 #include <linux/ioctl.h>
42 #include <linux/module.h>
43 #include <linux/init.h>
44 #include <linux/interrupt.h>
45 #include <linux/timer.h>
46 #include <linux/pci.h>
47 #include <linux/dma-mapping.h>
48 
49 #include <linux/fcntl.h>        /* O_ACCMODE */
50 #include <linux/hdreg.h>  /* HDIO_GETGEO */
51 
52 #include "umem.h"
53 
54 #include <linux/uaccess.h>
55 #include <asm/io.h>
56 
57 #define MM_MAXCARDS 4
58 #define MM_RAHEAD 2      /* two sectors */
59 #define MM_BLKSIZE 1024  /* 1k blocks */
60 #define MM_HARDSECT 512  /* 512-byte hardware sectors */
61 #define MM_SHIFT 6       /* max 64 partitions on 4 cards  */
62 
63 /*
64  * Version Information
65  */
66 
67 #define DRIVER_NAME	"umem"
68 #define DRIVER_VERSION	"v2.3"
69 #define DRIVER_AUTHOR	"San Mehat, Johannes Erdfelt, NeilBrown"
70 #define DRIVER_DESC	"Micro Memory(tm) PCI memory board block driver"
71 
72 static int debug;
73 /* #define HW_TRACE(x)     writeb(x,cards[0].csr_remap + MEMCTRLSTATUS_MAGIC) */
74 #define HW_TRACE(x)
75 
76 #define DEBUG_LED_ON_TRANSFER	0x01
77 #define DEBUG_BATTERY_POLLING	0x02
78 
79 module_param(debug, int, 0644);
80 MODULE_PARM_DESC(debug, "Debug bitmask");
81 
82 static int pci_read_cmd = 0x0C;		/* Read Multiple */
83 module_param(pci_read_cmd, int, 0);
84 MODULE_PARM_DESC(pci_read_cmd, "PCI read command");
85 
86 static int pci_write_cmd = 0x0F;	/* Write and Invalidate */
87 module_param(pci_write_cmd, int, 0);
88 MODULE_PARM_DESC(pci_write_cmd, "PCI write command");
89 
90 static int pci_cmds;
91 
92 static int major_nr;
93 
94 #include <linux/blkdev.h>
95 #include <linux/blkpg.h>
96 
97 struct cardinfo {
98 	struct pci_dev	*dev;
99 
100 	unsigned char	__iomem *csr_remap;
101 	unsigned int	mm_size;  /* size in kbytes */
102 
103 	unsigned int	init_size; /* initial segment, in sectors,
104 				    * that we know to
105 				    * have been written
106 				    */
107 	struct bio	*bio, *currentbio, **biotail;
108 	struct bvec_iter current_iter;
109 
110 	struct request_queue *queue;
111 
112 	struct mm_page {
113 		dma_addr_t		page_dma;
114 		struct mm_dma_desc	*desc;
115 		int	 		cnt, headcnt;
116 		struct bio		*bio, **biotail;
117 		struct bvec_iter	iter;
118 	} mm_pages[2];
119 #define DESC_PER_PAGE ((PAGE_SIZE*2)/sizeof(struct mm_dma_desc))
120 
121 	int  Active, Ready;
122 
123 	struct tasklet_struct	tasklet;
124 	unsigned int dma_status;
125 
126 	struct {
127 		int		good;
128 		int		warned;
129 		unsigned long	last_change;
130 	} battery[2];
131 
132 	spinlock_t 	lock;
133 	int		check_batteries;
134 
135 	int		flags;
136 };
137 
138 static struct cardinfo cards[MM_MAXCARDS];
139 static struct timer_list battery_timer;
140 
141 static int num_cards;
142 
143 static struct gendisk *mm_gendisk[MM_MAXCARDS];
144 
145 static void check_batteries(struct cardinfo *card);
146 
get_userbit(struct cardinfo * card,int bit)147 static int get_userbit(struct cardinfo *card, int bit)
148 {
149 	unsigned char led;
150 
151 	led = readb(card->csr_remap + MEMCTRLCMD_LEDCTRL);
152 	return led & bit;
153 }
154 
set_userbit(struct cardinfo * card,int bit,unsigned char state)155 static int set_userbit(struct cardinfo *card, int bit, unsigned char state)
156 {
157 	unsigned char led;
158 
159 	led = readb(card->csr_remap + MEMCTRLCMD_LEDCTRL);
160 	if (state)
161 		led |= bit;
162 	else
163 		led &= ~bit;
164 	writeb(led, card->csr_remap + MEMCTRLCMD_LEDCTRL);
165 
166 	return 0;
167 }
168 
169 /*
170  * NOTE: For the power LED, use the LED_POWER_* macros since they differ
171  */
set_led(struct cardinfo * card,int shift,unsigned char state)172 static void set_led(struct cardinfo *card, int shift, unsigned char state)
173 {
174 	unsigned char led;
175 
176 	led = readb(card->csr_remap + MEMCTRLCMD_LEDCTRL);
177 	if (state == LED_FLIP)
178 		led ^= (1<<shift);
179 	else {
180 		led &= ~(0x03 << shift);
181 		led |= (state << shift);
182 	}
183 	writeb(led, card->csr_remap + MEMCTRLCMD_LEDCTRL);
184 
185 }
186 
187 #ifdef MM_DIAG
dump_regs(struct cardinfo * card)188 static void dump_regs(struct cardinfo *card)
189 {
190 	unsigned char *p;
191 	int i, i1;
192 
193 	p = card->csr_remap;
194 	for (i = 0; i < 8; i++) {
195 		printk(KERN_DEBUG "%p   ", p);
196 
197 		for (i1 = 0; i1 < 16; i1++)
198 			printk("%02x ", *p++);
199 
200 		printk("\n");
201 	}
202 }
203 #endif
204 
dump_dmastat(struct cardinfo * card,unsigned int dmastat)205 static void dump_dmastat(struct cardinfo *card, unsigned int dmastat)
206 {
207 	dev_printk(KERN_DEBUG, &card->dev->dev, "DMAstat - ");
208 	if (dmastat & DMASCR_ANY_ERR)
209 		printk(KERN_CONT "ANY_ERR ");
210 	if (dmastat & DMASCR_MBE_ERR)
211 		printk(KERN_CONT "MBE_ERR ");
212 	if (dmastat & DMASCR_PARITY_ERR_REP)
213 		printk(KERN_CONT "PARITY_ERR_REP ");
214 	if (dmastat & DMASCR_PARITY_ERR_DET)
215 		printk(KERN_CONT "PARITY_ERR_DET ");
216 	if (dmastat & DMASCR_SYSTEM_ERR_SIG)
217 		printk(KERN_CONT "SYSTEM_ERR_SIG ");
218 	if (dmastat & DMASCR_TARGET_ABT)
219 		printk(KERN_CONT "TARGET_ABT ");
220 	if (dmastat & DMASCR_MASTER_ABT)
221 		printk(KERN_CONT "MASTER_ABT ");
222 	if (dmastat & DMASCR_CHAIN_COMPLETE)
223 		printk(KERN_CONT "CHAIN_COMPLETE ");
224 	if (dmastat & DMASCR_DMA_COMPLETE)
225 		printk(KERN_CONT "DMA_COMPLETE ");
226 	printk("\n");
227 }
228 
229 /*
230  * Theory of request handling
231  *
232  * Each bio is assigned to one mm_dma_desc - which may not be enough FIXME
233  * We have two pages of mm_dma_desc, holding about 64 descriptors
234  * each.  These are allocated at init time.
235  * One page is "Ready" and is either full, or can have request added.
236  * The other page might be "Active", which DMA is happening on it.
237  *
238  * Whenever IO on the active page completes, the Ready page is activated
239  * and the ex-Active page is clean out and made Ready.
240  * Otherwise the Ready page is only activated when it becomes full.
241  *
242  * If a request arrives while both pages a full, it is queued, and b_rdev is
243  * overloaded to record whether it was a read or a write.
244  *
245  * The interrupt handler only polls the device to clear the interrupt.
246  * The processing of the result is done in a tasklet.
247  */
248 
mm_start_io(struct cardinfo * card)249 static void mm_start_io(struct cardinfo *card)
250 {
251 	/* we have the lock, we know there is
252 	 * no IO active, and we know that card->Active
253 	 * is set
254 	 */
255 	struct mm_dma_desc *desc;
256 	struct mm_page *page;
257 	int offset;
258 
259 	/* make the last descriptor end the chain */
260 	page = &card->mm_pages[card->Active];
261 	pr_debug("start_io: %d %d->%d\n",
262 		card->Active, page->headcnt, page->cnt - 1);
263 	desc = &page->desc[page->cnt-1];
264 
265 	desc->control_bits |= cpu_to_le32(DMASCR_CHAIN_COMP_EN);
266 	desc->control_bits &= ~cpu_to_le32(DMASCR_CHAIN_EN);
267 	desc->sem_control_bits = desc->control_bits;
268 
269 
270 	if (debug & DEBUG_LED_ON_TRANSFER)
271 		set_led(card, LED_REMOVE, LED_ON);
272 
273 	desc = &page->desc[page->headcnt];
274 	writel(0, card->csr_remap + DMA_PCI_ADDR);
275 	writel(0, card->csr_remap + DMA_PCI_ADDR + 4);
276 
277 	writel(0, card->csr_remap + DMA_LOCAL_ADDR);
278 	writel(0, card->csr_remap + DMA_LOCAL_ADDR + 4);
279 
280 	writel(0, card->csr_remap + DMA_TRANSFER_SIZE);
281 	writel(0, card->csr_remap + DMA_TRANSFER_SIZE + 4);
282 
283 	writel(0, card->csr_remap + DMA_SEMAPHORE_ADDR);
284 	writel(0, card->csr_remap + DMA_SEMAPHORE_ADDR + 4);
285 
286 	offset = ((char *)desc) - ((char *)page->desc);
287 	writel(cpu_to_le32((page->page_dma+offset) & 0xffffffff),
288 	       card->csr_remap + DMA_DESCRIPTOR_ADDR);
289 	/* Force the value to u64 before shifting otherwise >> 32 is undefined C
290 	 * and on some ports will do nothing ! */
291 	writel(cpu_to_le32(((u64)page->page_dma)>>32),
292 	       card->csr_remap + DMA_DESCRIPTOR_ADDR + 4);
293 
294 	/* Go, go, go */
295 	writel(cpu_to_le32(DMASCR_GO | DMASCR_CHAIN_EN | pci_cmds),
296 	       card->csr_remap + DMA_STATUS_CTRL);
297 }
298 
299 static int add_bio(struct cardinfo *card);
300 
activate(struct cardinfo * card)301 static void activate(struct cardinfo *card)
302 {
303 	/* if No page is Active, and Ready is
304 	 * not empty, then switch Ready page
305 	 * to active and start IO.
306 	 * Then add any bh's that are available to Ready
307 	 */
308 
309 	do {
310 		while (add_bio(card))
311 			;
312 
313 		if (card->Active == -1 &&
314 		    card->mm_pages[card->Ready].cnt > 0) {
315 			card->Active = card->Ready;
316 			card->Ready = 1-card->Ready;
317 			mm_start_io(card);
318 		}
319 
320 	} while (card->Active == -1 && add_bio(card));
321 }
322 
reset_page(struct mm_page * page)323 static inline void reset_page(struct mm_page *page)
324 {
325 	page->cnt = 0;
326 	page->headcnt = 0;
327 	page->bio = NULL;
328 	page->biotail = &page->bio;
329 }
330 
331 /*
332  * If there is room on Ready page, take
333  * one bh off list and add it.
334  * return 1 if there was room, else 0.
335  */
add_bio(struct cardinfo * card)336 static int add_bio(struct cardinfo *card)
337 {
338 	struct mm_page *p;
339 	struct mm_dma_desc *desc;
340 	dma_addr_t dma_handle;
341 	int offset;
342 	struct bio *bio;
343 	struct bio_vec vec;
344 
345 	bio = card->currentbio;
346 	if (!bio && card->bio) {
347 		card->currentbio = card->bio;
348 		card->current_iter = card->bio->bi_iter;
349 		card->bio = card->bio->bi_next;
350 		if (card->bio == NULL)
351 			card->biotail = &card->bio;
352 		card->currentbio->bi_next = NULL;
353 		return 1;
354 	}
355 	if (!bio)
356 		return 0;
357 
358 	if (card->mm_pages[card->Ready].cnt >= DESC_PER_PAGE)
359 		return 0;
360 
361 	vec = bio_iter_iovec(bio, card->current_iter);
362 
363 	dma_handle = dma_map_page(&card->dev->dev,
364 				  vec.bv_page,
365 				  vec.bv_offset,
366 				  vec.bv_len,
367 				  bio_op(bio) == REQ_OP_READ ?
368 				  DMA_FROM_DEVICE : DMA_TO_DEVICE);
369 
370 	p = &card->mm_pages[card->Ready];
371 	desc = &p->desc[p->cnt];
372 	p->cnt++;
373 	if (p->bio == NULL)
374 		p->iter = card->current_iter;
375 	if ((p->biotail) != &bio->bi_next) {
376 		*(p->biotail) = bio;
377 		p->biotail = &(bio->bi_next);
378 		bio->bi_next = NULL;
379 	}
380 
381 	desc->data_dma_handle = dma_handle;
382 
383 	desc->pci_addr = cpu_to_le64((u64)desc->data_dma_handle);
384 	desc->local_addr = cpu_to_le64(card->current_iter.bi_sector << 9);
385 	desc->transfer_size = cpu_to_le32(vec.bv_len);
386 	offset = (((char *)&desc->sem_control_bits) - ((char *)p->desc));
387 	desc->sem_addr = cpu_to_le64((u64)(p->page_dma+offset));
388 	desc->zero1 = desc->zero2 = 0;
389 	offset = (((char *)(desc+1)) - ((char *)p->desc));
390 	desc->next_desc_addr = cpu_to_le64(p->page_dma+offset);
391 	desc->control_bits = cpu_to_le32(DMASCR_GO|DMASCR_ERR_INT_EN|
392 					 DMASCR_PARITY_INT_EN|
393 					 DMASCR_CHAIN_EN |
394 					 DMASCR_SEM_EN |
395 					 pci_cmds);
396 	if (bio_op(bio) == REQ_OP_WRITE)
397 		desc->control_bits |= cpu_to_le32(DMASCR_TRANSFER_READ);
398 	desc->sem_control_bits = desc->control_bits;
399 
400 
401 	bio_advance_iter(bio, &card->current_iter, vec.bv_len);
402 	if (!card->current_iter.bi_size)
403 		card->currentbio = NULL;
404 
405 	return 1;
406 }
407 
process_page(unsigned long data)408 static void process_page(unsigned long data)
409 {
410 	/* check if any of the requests in the page are DMA_COMPLETE,
411 	 * and deal with them appropriately.
412 	 * If we find a descriptor without DMA_COMPLETE in the semaphore, then
413 	 * dma must have hit an error on that descriptor, so use dma_status
414 	 * instead and assume that all following descriptors must be re-tried.
415 	 */
416 	struct mm_page *page;
417 	struct bio *return_bio = NULL;
418 	struct cardinfo *card = (struct cardinfo *)data;
419 	unsigned int dma_status = card->dma_status;
420 
421 	spin_lock(&card->lock);
422 	if (card->Active < 0)
423 		goto out_unlock;
424 	page = &card->mm_pages[card->Active];
425 
426 	while (page->headcnt < page->cnt) {
427 		struct bio *bio = page->bio;
428 		struct mm_dma_desc *desc = &page->desc[page->headcnt];
429 		int control = le32_to_cpu(desc->sem_control_bits);
430 		int last = 0;
431 		struct bio_vec vec;
432 
433 		if (!(control & DMASCR_DMA_COMPLETE)) {
434 			control = dma_status;
435 			last = 1;
436 		}
437 
438 		page->headcnt++;
439 		vec = bio_iter_iovec(bio, page->iter);
440 		bio_advance_iter(bio, &page->iter, vec.bv_len);
441 
442 		if (!page->iter.bi_size) {
443 			page->bio = bio->bi_next;
444 			if (page->bio)
445 				page->iter = page->bio->bi_iter;
446 		}
447 
448 		dma_unmap_page(&card->dev->dev, desc->data_dma_handle,
449 			       vec.bv_len,
450 				 (control & DMASCR_TRANSFER_READ) ?
451 				DMA_TO_DEVICE : DMA_FROM_DEVICE);
452 		if (control & DMASCR_HARD_ERROR) {
453 			/* error */
454 			bio->bi_status = BLK_STS_IOERR;
455 			dev_printk(KERN_WARNING, &card->dev->dev,
456 				"I/O error on sector %d/%d\n",
457 				le32_to_cpu(desc->local_addr)>>9,
458 				le32_to_cpu(desc->transfer_size));
459 			dump_dmastat(card, control);
460 		} else if (op_is_write(bio_op(bio)) &&
461 			   le32_to_cpu(desc->local_addr) >> 9 ==
462 				card->init_size) {
463 			card->init_size += le32_to_cpu(desc->transfer_size) >> 9;
464 			if (card->init_size >> 1 >= card->mm_size) {
465 				dev_printk(KERN_INFO, &card->dev->dev,
466 					"memory now initialised\n");
467 				set_userbit(card, MEMORY_INITIALIZED, 1);
468 			}
469 		}
470 		if (bio != page->bio) {
471 			bio->bi_next = return_bio;
472 			return_bio = bio;
473 		}
474 
475 		if (last)
476 			break;
477 	}
478 
479 	if (debug & DEBUG_LED_ON_TRANSFER)
480 		set_led(card, LED_REMOVE, LED_OFF);
481 
482 	if (card->check_batteries) {
483 		card->check_batteries = 0;
484 		check_batteries(card);
485 	}
486 	if (page->headcnt >= page->cnt) {
487 		reset_page(page);
488 		card->Active = -1;
489 		activate(card);
490 	} else {
491 		/* haven't finished with this one yet */
492 		pr_debug("do some more\n");
493 		mm_start_io(card);
494 	}
495  out_unlock:
496 	spin_unlock(&card->lock);
497 
498 	while (return_bio) {
499 		struct bio *bio = return_bio;
500 
501 		return_bio = bio->bi_next;
502 		bio->bi_next = NULL;
503 		bio_endio(bio);
504 	}
505 }
506 
mm_unplug(struct blk_plug_cb * cb,bool from_schedule)507 static void mm_unplug(struct blk_plug_cb *cb, bool from_schedule)
508 {
509 	struct cardinfo *card = cb->data;
510 
511 	spin_lock_irq(&card->lock);
512 	activate(card);
513 	spin_unlock_irq(&card->lock);
514 	kfree(cb);
515 }
516 
mm_check_plugged(struct cardinfo * card)517 static int mm_check_plugged(struct cardinfo *card)
518 {
519 	return !!blk_check_plugged(mm_unplug, card, sizeof(struct blk_plug_cb));
520 }
521 
mm_make_request(struct request_queue * q,struct bio * bio)522 static blk_qc_t mm_make_request(struct request_queue *q, struct bio *bio)
523 {
524 	struct cardinfo *card = q->queuedata;
525 	pr_debug("mm_make_request %llu %u\n",
526 		 (unsigned long long)bio->bi_iter.bi_sector,
527 		 bio->bi_iter.bi_size);
528 
529 	blk_queue_split(q, &bio);
530 
531 	spin_lock_irq(&card->lock);
532 	*card->biotail = bio;
533 	bio->bi_next = NULL;
534 	card->biotail = &bio->bi_next;
535 	if (op_is_sync(bio->bi_opf) || !mm_check_plugged(card))
536 		activate(card);
537 	spin_unlock_irq(&card->lock);
538 
539 	return BLK_QC_T_NONE;
540 }
541 
mm_interrupt(int irq,void * __card)542 static irqreturn_t mm_interrupt(int irq, void *__card)
543 {
544 	struct cardinfo *card = (struct cardinfo *) __card;
545 	unsigned int dma_status;
546 	unsigned short cfg_status;
547 
548 HW_TRACE(0x30);
549 
550 	dma_status = le32_to_cpu(readl(card->csr_remap + DMA_STATUS_CTRL));
551 
552 	if (!(dma_status & (DMASCR_ERROR_MASK | DMASCR_CHAIN_COMPLETE))) {
553 		/* interrupt wasn't for me ... */
554 		return IRQ_NONE;
555 	}
556 
557 	/* clear COMPLETION interrupts */
558 	if (card->flags & UM_FLAG_NO_BYTE_STATUS)
559 		writel(cpu_to_le32(DMASCR_DMA_COMPLETE|DMASCR_CHAIN_COMPLETE),
560 		       card->csr_remap + DMA_STATUS_CTRL);
561 	else
562 		writeb((DMASCR_DMA_COMPLETE|DMASCR_CHAIN_COMPLETE) >> 16,
563 		       card->csr_remap + DMA_STATUS_CTRL + 2);
564 
565 	/* log errors and clear interrupt status */
566 	if (dma_status & DMASCR_ANY_ERR) {
567 		unsigned int	data_log1, data_log2;
568 		unsigned int	addr_log1, addr_log2;
569 		unsigned char	stat, count, syndrome, check;
570 
571 		stat = readb(card->csr_remap + MEMCTRLCMD_ERRSTATUS);
572 
573 		data_log1 = le32_to_cpu(readl(card->csr_remap +
574 						ERROR_DATA_LOG));
575 		data_log2 = le32_to_cpu(readl(card->csr_remap +
576 						ERROR_DATA_LOG + 4));
577 		addr_log1 = le32_to_cpu(readl(card->csr_remap +
578 						ERROR_ADDR_LOG));
579 		addr_log2 = readb(card->csr_remap + ERROR_ADDR_LOG + 4);
580 
581 		count = readb(card->csr_remap + ERROR_COUNT);
582 		syndrome = readb(card->csr_remap + ERROR_SYNDROME);
583 		check = readb(card->csr_remap + ERROR_CHECK);
584 
585 		dump_dmastat(card, dma_status);
586 
587 		if (stat & 0x01)
588 			dev_printk(KERN_ERR, &card->dev->dev,
589 				"Memory access error detected (err count %d)\n",
590 				count);
591 		if (stat & 0x02)
592 			dev_printk(KERN_ERR, &card->dev->dev,
593 				"Multi-bit EDC error\n");
594 
595 		dev_printk(KERN_ERR, &card->dev->dev,
596 			"Fault Address 0x%02x%08x, Fault Data 0x%08x%08x\n",
597 			addr_log2, addr_log1, data_log2, data_log1);
598 		dev_printk(KERN_ERR, &card->dev->dev,
599 			"Fault Check 0x%02x, Fault Syndrome 0x%02x\n",
600 			check, syndrome);
601 
602 		writeb(0, card->csr_remap + ERROR_COUNT);
603 	}
604 
605 	if (dma_status & DMASCR_PARITY_ERR_REP) {
606 		dev_printk(KERN_ERR, &card->dev->dev,
607 			"PARITY ERROR REPORTED\n");
608 		pci_read_config_word(card->dev, PCI_STATUS, &cfg_status);
609 		pci_write_config_word(card->dev, PCI_STATUS, cfg_status);
610 	}
611 
612 	if (dma_status & DMASCR_PARITY_ERR_DET) {
613 		dev_printk(KERN_ERR, &card->dev->dev,
614 			"PARITY ERROR DETECTED\n");
615 		pci_read_config_word(card->dev, PCI_STATUS, &cfg_status);
616 		pci_write_config_word(card->dev, PCI_STATUS, cfg_status);
617 	}
618 
619 	if (dma_status & DMASCR_SYSTEM_ERR_SIG) {
620 		dev_printk(KERN_ERR, &card->dev->dev, "SYSTEM ERROR\n");
621 		pci_read_config_word(card->dev, PCI_STATUS, &cfg_status);
622 		pci_write_config_word(card->dev, PCI_STATUS, cfg_status);
623 	}
624 
625 	if (dma_status & DMASCR_TARGET_ABT) {
626 		dev_printk(KERN_ERR, &card->dev->dev, "TARGET ABORT\n");
627 		pci_read_config_word(card->dev, PCI_STATUS, &cfg_status);
628 		pci_write_config_word(card->dev, PCI_STATUS, cfg_status);
629 	}
630 
631 	if (dma_status & DMASCR_MASTER_ABT) {
632 		dev_printk(KERN_ERR, &card->dev->dev, "MASTER ABORT\n");
633 		pci_read_config_word(card->dev, PCI_STATUS, &cfg_status);
634 		pci_write_config_word(card->dev, PCI_STATUS, cfg_status);
635 	}
636 
637 	/* and process the DMA descriptors */
638 	card->dma_status = dma_status;
639 	tasklet_schedule(&card->tasklet);
640 
641 HW_TRACE(0x36);
642 
643 	return IRQ_HANDLED;
644 }
645 
646 /*
647  * If both batteries are good, no LED
648  * If either battery has been warned, solid LED
649  * If both batteries are bad, flash the LED quickly
650  * If either battery is bad, flash the LED semi quickly
651  */
set_fault_to_battery_status(struct cardinfo * card)652 static void set_fault_to_battery_status(struct cardinfo *card)
653 {
654 	if (card->battery[0].good && card->battery[1].good)
655 		set_led(card, LED_FAULT, LED_OFF);
656 	else if (card->battery[0].warned || card->battery[1].warned)
657 		set_led(card, LED_FAULT, LED_ON);
658 	else if (!card->battery[0].good && !card->battery[1].good)
659 		set_led(card, LED_FAULT, LED_FLASH_7_0);
660 	else
661 		set_led(card, LED_FAULT, LED_FLASH_3_5);
662 }
663 
664 static void init_battery_timer(void);
665 
check_battery(struct cardinfo * card,int battery,int status)666 static int check_battery(struct cardinfo *card, int battery, int status)
667 {
668 	if (status != card->battery[battery].good) {
669 		card->battery[battery].good = !card->battery[battery].good;
670 		card->battery[battery].last_change = jiffies;
671 
672 		if (card->battery[battery].good) {
673 			dev_printk(KERN_ERR, &card->dev->dev,
674 				"Battery %d now good\n", battery + 1);
675 			card->battery[battery].warned = 0;
676 		} else
677 			dev_printk(KERN_ERR, &card->dev->dev,
678 				"Battery %d now FAILED\n", battery + 1);
679 
680 		return 1;
681 	} else if (!card->battery[battery].good &&
682 		   !card->battery[battery].warned &&
683 		   time_after_eq(jiffies, card->battery[battery].last_change +
684 				 (HZ * 60 * 60 * 5))) {
685 		dev_printk(KERN_ERR, &card->dev->dev,
686 			"Battery %d still FAILED after 5 hours\n", battery + 1);
687 		card->battery[battery].warned = 1;
688 
689 		return 1;
690 	}
691 
692 	return 0;
693 }
694 
check_batteries(struct cardinfo * card)695 static void check_batteries(struct cardinfo *card)
696 {
697 	/* NOTE: this must *never* be called while the card
698 	 * is doing (bus-to-card) DMA, or you will need the
699 	 * reset switch
700 	 */
701 	unsigned char status;
702 	int ret1, ret2;
703 
704 	status = readb(card->csr_remap + MEMCTRLSTATUS_BATTERY);
705 	if (debug & DEBUG_BATTERY_POLLING)
706 		dev_printk(KERN_DEBUG, &card->dev->dev,
707 			"checking battery status, 1 = %s, 2 = %s\n",
708 		       (status & BATTERY_1_FAILURE) ? "FAILURE" : "OK",
709 		       (status & BATTERY_2_FAILURE) ? "FAILURE" : "OK");
710 
711 	ret1 = check_battery(card, 0, !(status & BATTERY_1_FAILURE));
712 	ret2 = check_battery(card, 1, !(status & BATTERY_2_FAILURE));
713 
714 	if (ret1 || ret2)
715 		set_fault_to_battery_status(card);
716 }
717 
check_all_batteries(struct timer_list * unused)718 static void check_all_batteries(struct timer_list *unused)
719 {
720 	int i;
721 
722 	for (i = 0; i < num_cards; i++)
723 		if (!(cards[i].flags & UM_FLAG_NO_BATT)) {
724 			struct cardinfo *card = &cards[i];
725 			spin_lock_bh(&card->lock);
726 			if (card->Active >= 0)
727 				card->check_batteries = 1;
728 			else
729 				check_batteries(card);
730 			spin_unlock_bh(&card->lock);
731 		}
732 
733 	init_battery_timer();
734 }
735 
init_battery_timer(void)736 static void init_battery_timer(void)
737 {
738 	timer_setup(&battery_timer, check_all_batteries, 0);
739 	battery_timer.expires = jiffies + (HZ * 60);
740 	add_timer(&battery_timer);
741 }
742 
del_battery_timer(void)743 static void del_battery_timer(void)
744 {
745 	del_timer(&battery_timer);
746 }
747 
748 /*
749  * Note no locks taken out here.  In a worst case scenario, we could drop
750  * a chunk of system memory.  But that should never happen, since validation
751  * happens at open or mount time, when locks are held.
752  *
753  *	That's crap, since doing that while some partitions are opened
754  * or mounted will give you really nasty results.
755  */
mm_revalidate(struct gendisk * disk)756 static int mm_revalidate(struct gendisk *disk)
757 {
758 	struct cardinfo *card = disk->private_data;
759 	set_capacity(disk, card->mm_size << 1);
760 	return 0;
761 }
762 
mm_getgeo(struct block_device * bdev,struct hd_geometry * geo)763 static int mm_getgeo(struct block_device *bdev, struct hd_geometry *geo)
764 {
765 	struct cardinfo *card = bdev->bd_disk->private_data;
766 	int size = card->mm_size * (1024 / MM_HARDSECT);
767 
768 	/*
769 	 * get geometry: we have to fake one...  trim the size to a
770 	 * multiple of 2048 (1M): tell we have 32 sectors, 64 heads,
771 	 * whatever cylinders.
772 	 */
773 	geo->heads     = 64;
774 	geo->sectors   = 32;
775 	geo->cylinders = size / (geo->heads * geo->sectors);
776 	return 0;
777 }
778 
779 static const struct block_device_operations mm_fops = {
780 	.owner		= THIS_MODULE,
781 	.getgeo		= mm_getgeo,
782 	.revalidate_disk = mm_revalidate,
783 };
784 
mm_pci_probe(struct pci_dev * dev,const struct pci_device_id * id)785 static int mm_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
786 {
787 	int ret = -ENODEV;
788 	struct cardinfo *card = &cards[num_cards];
789 	unsigned char	mem_present;
790 	unsigned char	batt_status;
791 	unsigned int	saved_bar, data;
792 	unsigned long	csr_base;
793 	unsigned long	csr_len;
794 	int		magic_number;
795 	static int	printed_version;
796 
797 	if (!printed_version++)
798 		printk(KERN_INFO DRIVER_VERSION " : " DRIVER_DESC "\n");
799 
800 	ret = pci_enable_device(dev);
801 	if (ret)
802 		return ret;
803 
804 	pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0xF8);
805 	pci_set_master(dev);
806 
807 	card->dev         = dev;
808 
809 	csr_base = pci_resource_start(dev, 0);
810 	csr_len  = pci_resource_len(dev, 0);
811 	if (!csr_base || !csr_len)
812 		return -ENODEV;
813 
814 	dev_printk(KERN_INFO, &dev->dev,
815 	  "Micro Memory(tm) controller found (PCI Mem Module (Battery Backup))\n");
816 
817 	if (dma_set_mask(&dev->dev, DMA_BIT_MASK(64)) &&
818 	    dma_set_mask(&dev->dev, DMA_BIT_MASK(32))) {
819 		dev_printk(KERN_WARNING, &dev->dev, "NO suitable DMA found\n");
820 		return  -ENOMEM;
821 	}
822 
823 	ret = pci_request_regions(dev, DRIVER_NAME);
824 	if (ret) {
825 		dev_printk(KERN_ERR, &card->dev->dev,
826 			"Unable to request memory region\n");
827 		goto failed_req_csr;
828 	}
829 
830 	card->csr_remap = ioremap_nocache(csr_base, csr_len);
831 	if (!card->csr_remap) {
832 		dev_printk(KERN_ERR, &card->dev->dev,
833 			"Unable to remap memory region\n");
834 		ret = -ENOMEM;
835 
836 		goto failed_remap_csr;
837 	}
838 
839 	dev_printk(KERN_INFO, &card->dev->dev,
840 		"CSR 0x%08lx -> 0x%p (0x%lx)\n",
841 	       csr_base, card->csr_remap, csr_len);
842 
843 	switch (card->dev->device) {
844 	case 0x5415:
845 		card->flags |= UM_FLAG_NO_BYTE_STATUS | UM_FLAG_NO_BATTREG;
846 		magic_number = 0x59;
847 		break;
848 
849 	case 0x5425:
850 		card->flags |= UM_FLAG_NO_BYTE_STATUS;
851 		magic_number = 0x5C;
852 		break;
853 
854 	case 0x6155:
855 		card->flags |= UM_FLAG_NO_BYTE_STATUS |
856 				UM_FLAG_NO_BATTREG | UM_FLAG_NO_BATT;
857 		magic_number = 0x99;
858 		break;
859 
860 	default:
861 		magic_number = 0x100;
862 		break;
863 	}
864 
865 	if (readb(card->csr_remap + MEMCTRLSTATUS_MAGIC) != magic_number) {
866 		dev_printk(KERN_ERR, &card->dev->dev, "Magic number invalid\n");
867 		ret = -ENOMEM;
868 		goto failed_magic;
869 	}
870 
871 	card->mm_pages[0].desc = dma_alloc_coherent(&card->dev->dev,
872 			PAGE_SIZE * 2, &card->mm_pages[0].page_dma, GFP_KERNEL);
873 	card->mm_pages[1].desc = dma_alloc_coherent(&card->dev->dev,
874 			PAGE_SIZE * 2, &card->mm_pages[1].page_dma, GFP_KERNEL);
875 	if (card->mm_pages[0].desc == NULL ||
876 	    card->mm_pages[1].desc == NULL) {
877 		dev_printk(KERN_ERR, &card->dev->dev, "alloc failed\n");
878 		goto failed_alloc;
879 	}
880 	reset_page(&card->mm_pages[0]);
881 	reset_page(&card->mm_pages[1]);
882 	card->Ready = 0;	/* page 0 is ready */
883 	card->Active = -1;	/* no page is active */
884 	card->bio = NULL;
885 	card->biotail = &card->bio;
886 	spin_lock_init(&card->lock);
887 
888 	card->queue = blk_alloc_queue_node(GFP_KERNEL, NUMA_NO_NODE);
889 	if (!card->queue)
890 		goto failed_alloc;
891 
892 	blk_queue_make_request(card->queue, mm_make_request);
893 	card->queue->queuedata = card;
894 
895 	tasklet_init(&card->tasklet, process_page, (unsigned long)card);
896 
897 	card->check_batteries = 0;
898 
899 	mem_present = readb(card->csr_remap + MEMCTRLSTATUS_MEMORY);
900 	switch (mem_present) {
901 	case MEM_128_MB:
902 		card->mm_size = 1024 * 128;
903 		break;
904 	case MEM_256_MB:
905 		card->mm_size = 1024 * 256;
906 		break;
907 	case MEM_512_MB:
908 		card->mm_size = 1024 * 512;
909 		break;
910 	case MEM_1_GB:
911 		card->mm_size = 1024 * 1024;
912 		break;
913 	case MEM_2_GB:
914 		card->mm_size = 1024 * 2048;
915 		break;
916 	default:
917 		card->mm_size = 0;
918 		break;
919 	}
920 
921 	/* Clear the LED's we control */
922 	set_led(card, LED_REMOVE, LED_OFF);
923 	set_led(card, LED_FAULT, LED_OFF);
924 
925 	batt_status = readb(card->csr_remap + MEMCTRLSTATUS_BATTERY);
926 
927 	card->battery[0].good = !(batt_status & BATTERY_1_FAILURE);
928 	card->battery[1].good = !(batt_status & BATTERY_2_FAILURE);
929 	card->battery[0].last_change = card->battery[1].last_change = jiffies;
930 
931 	if (card->flags & UM_FLAG_NO_BATT)
932 		dev_printk(KERN_INFO, &card->dev->dev,
933 			"Size %d KB\n", card->mm_size);
934 	else {
935 		dev_printk(KERN_INFO, &card->dev->dev,
936 			"Size %d KB, Battery 1 %s (%s), Battery 2 %s (%s)\n",
937 		       card->mm_size,
938 		       batt_status & BATTERY_1_DISABLED ? "Disabled" : "Enabled",
939 		       card->battery[0].good ? "OK" : "FAILURE",
940 		       batt_status & BATTERY_2_DISABLED ? "Disabled" : "Enabled",
941 		       card->battery[1].good ? "OK" : "FAILURE");
942 
943 		set_fault_to_battery_status(card);
944 	}
945 
946 	pci_read_config_dword(dev, PCI_BASE_ADDRESS_1, &saved_bar);
947 	data = 0xffffffff;
948 	pci_write_config_dword(dev, PCI_BASE_ADDRESS_1, data);
949 	pci_read_config_dword(dev, PCI_BASE_ADDRESS_1, &data);
950 	pci_write_config_dword(dev, PCI_BASE_ADDRESS_1, saved_bar);
951 	data &= 0xfffffff0;
952 	data = ~data;
953 	data += 1;
954 
955 	if (request_irq(dev->irq, mm_interrupt, IRQF_SHARED, DRIVER_NAME,
956 			card)) {
957 		dev_printk(KERN_ERR, &card->dev->dev,
958 			"Unable to allocate IRQ\n");
959 		ret = -ENODEV;
960 		goto failed_req_irq;
961 	}
962 
963 	dev_printk(KERN_INFO, &card->dev->dev,
964 		"Window size %d bytes, IRQ %d\n", data, dev->irq);
965 
966 	pci_set_drvdata(dev, card);
967 
968 	if (pci_write_cmd != 0x0F) 	/* If not Memory Write & Invalidate */
969 		pci_write_cmd = 0x07;	/* then Memory Write command */
970 
971 	if (pci_write_cmd & 0x08) { /* use Memory Write and Invalidate */
972 		unsigned short cfg_command;
973 		pci_read_config_word(dev, PCI_COMMAND, &cfg_command);
974 		cfg_command |= 0x10; /* Memory Write & Invalidate Enable */
975 		pci_write_config_word(dev, PCI_COMMAND, cfg_command);
976 	}
977 	pci_cmds = (pci_read_cmd << 28) | (pci_write_cmd << 24);
978 
979 	num_cards++;
980 
981 	if (!get_userbit(card, MEMORY_INITIALIZED)) {
982 		dev_printk(KERN_INFO, &card->dev->dev,
983 		  "memory NOT initialized. Consider over-writing whole device.\n");
984 		card->init_size = 0;
985 	} else {
986 		dev_printk(KERN_INFO, &card->dev->dev,
987 			"memory already initialized\n");
988 		card->init_size = card->mm_size;
989 	}
990 
991 	/* Enable ECC */
992 	writeb(EDC_STORE_CORRECT, card->csr_remap + MEMCTRLCMD_ERRCTRL);
993 
994 	return 0;
995 
996  failed_req_irq:
997  failed_alloc:
998 	if (card->mm_pages[0].desc)
999 		dma_free_coherent(&card->dev->dev, PAGE_SIZE * 2,
1000 				  card->mm_pages[0].desc,
1001 				  card->mm_pages[0].page_dma);
1002 	if (card->mm_pages[1].desc)
1003 		dma_free_coherent(&card->dev->dev, PAGE_SIZE * 2,
1004 				  card->mm_pages[1].desc,
1005 				  card->mm_pages[1].page_dma);
1006  failed_magic:
1007 	iounmap(card->csr_remap);
1008  failed_remap_csr:
1009 	pci_release_regions(dev);
1010  failed_req_csr:
1011 
1012 	return ret;
1013 }
1014 
mm_pci_remove(struct pci_dev * dev)1015 static void mm_pci_remove(struct pci_dev *dev)
1016 {
1017 	struct cardinfo *card = pci_get_drvdata(dev);
1018 
1019 	tasklet_kill(&card->tasklet);
1020 	free_irq(dev->irq, card);
1021 	iounmap(card->csr_remap);
1022 
1023 	if (card->mm_pages[0].desc)
1024 		dma_free_coherent(&card->dev->dev, PAGE_SIZE * 2,
1025 				    card->mm_pages[0].desc,
1026 				    card->mm_pages[0].page_dma);
1027 	if (card->mm_pages[1].desc)
1028 		dma_free_coherent(&card->dev->dev, PAGE_SIZE * 2,
1029 				    card->mm_pages[1].desc,
1030 				    card->mm_pages[1].page_dma);
1031 	blk_cleanup_queue(card->queue);
1032 
1033 	pci_release_regions(dev);
1034 	pci_disable_device(dev);
1035 }
1036 
1037 static const struct pci_device_id mm_pci_ids[] = {
1038     {PCI_DEVICE(PCI_VENDOR_ID_MICRO_MEMORY, PCI_DEVICE_ID_MICRO_MEMORY_5415CN)},
1039     {PCI_DEVICE(PCI_VENDOR_ID_MICRO_MEMORY, PCI_DEVICE_ID_MICRO_MEMORY_5425CN)},
1040     {PCI_DEVICE(PCI_VENDOR_ID_MICRO_MEMORY, PCI_DEVICE_ID_MICRO_MEMORY_6155)},
1041     {
1042 	.vendor	=	0x8086,
1043 	.device	=	0xB555,
1044 	.subvendor =	0x1332,
1045 	.subdevice =	0x5460,
1046 	.class =	0x050000,
1047 	.class_mask =	0,
1048     }, { /* end: all zeroes */ }
1049 };
1050 
1051 MODULE_DEVICE_TABLE(pci, mm_pci_ids);
1052 
1053 static struct pci_driver mm_pci_driver = {
1054 	.name		= DRIVER_NAME,
1055 	.id_table	= mm_pci_ids,
1056 	.probe		= mm_pci_probe,
1057 	.remove		= mm_pci_remove,
1058 };
1059 
mm_init(void)1060 static int __init mm_init(void)
1061 {
1062 	int retval, i;
1063 	int err;
1064 
1065 	retval = pci_register_driver(&mm_pci_driver);
1066 	if (retval)
1067 		return -ENOMEM;
1068 
1069 	err = major_nr = register_blkdev(0, DRIVER_NAME);
1070 	if (err < 0) {
1071 		pci_unregister_driver(&mm_pci_driver);
1072 		return -EIO;
1073 	}
1074 
1075 	for (i = 0; i < num_cards; i++) {
1076 		mm_gendisk[i] = alloc_disk(1 << MM_SHIFT);
1077 		if (!mm_gendisk[i])
1078 			goto out;
1079 	}
1080 
1081 	for (i = 0; i < num_cards; i++) {
1082 		struct gendisk *disk = mm_gendisk[i];
1083 		sprintf(disk->disk_name, "umem%c", 'a'+i);
1084 		spin_lock_init(&cards[i].lock);
1085 		disk->major = major_nr;
1086 		disk->first_minor  = i << MM_SHIFT;
1087 		disk->fops = &mm_fops;
1088 		disk->private_data = &cards[i];
1089 		disk->queue = cards[i].queue;
1090 		set_capacity(disk, cards[i].mm_size << 1);
1091 		add_disk(disk);
1092 	}
1093 
1094 	init_battery_timer();
1095 	printk(KERN_INFO "MM: desc_per_page = %ld\n", DESC_PER_PAGE);
1096 /* printk("mm_init: Done. 10-19-01 9:00\n"); */
1097 	return 0;
1098 
1099 out:
1100 	pci_unregister_driver(&mm_pci_driver);
1101 	unregister_blkdev(major_nr, DRIVER_NAME);
1102 	while (i--)
1103 		put_disk(mm_gendisk[i]);
1104 	return -ENOMEM;
1105 }
1106 
mm_cleanup(void)1107 static void __exit mm_cleanup(void)
1108 {
1109 	int i;
1110 
1111 	del_battery_timer();
1112 
1113 	for (i = 0; i < num_cards ; i++) {
1114 		del_gendisk(mm_gendisk[i]);
1115 		put_disk(mm_gendisk[i]);
1116 	}
1117 
1118 	pci_unregister_driver(&mm_pci_driver);
1119 
1120 	unregister_blkdev(major_nr, DRIVER_NAME);
1121 }
1122 
1123 module_init(mm_init);
1124 module_exit(mm_cleanup);
1125 
1126 MODULE_AUTHOR(DRIVER_AUTHOR);
1127 MODULE_DESCRIPTION(DRIVER_DESC);
1128 MODULE_LICENSE("GPL");
1129