1 /*
2 * comedi/drivers/mite.c
3 * Hardware driver for NI Mite PCI interface chip
4 *
5 * COMEDI - Linux Control and Measurement Device Interface
6 * Copyright (C) 1997-2002 David A. Schleef <ds@schleef.org>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 */
18
19 /*
20 * The PCI-MIO E series driver was originally written by
21 * Tomasz Motylewski <...>, and ported to comedi by ds.
22 *
23 * References for specifications:
24 *
25 * 321747b.pdf Register Level Programmer Manual (obsolete)
26 * 321747c.pdf Register Level Programmer Manual (new)
27 * DAQ-STC reference manual
28 *
29 * Other possibly relevant info:
30 *
31 * 320517c.pdf User manual (obsolete)
32 * 320517f.pdf User manual (new)
33 * 320889a.pdf delete
34 * 320906c.pdf maximum signal ratings
35 * 321066a.pdf about 16x
36 * 321791a.pdf discontinuation of at-mio-16e-10 rev. c
37 * 321808a.pdf about at-mio-16e-10 rev P
38 * 321837a.pdf discontinuation of at-mio-16de-10 rev d
39 * 321838a.pdf about at-mio-16de-10 rev N
40 *
41 * ISSUES:
42 *
43 */
44
45 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
46
47 #include <linux/module.h>
48 #include <linux/slab.h>
49 #include <linux/log2.h>
50
51 #include "../comedi_pci.h"
52
53 #include "mite.h"
54
55 /*
56 * Mite registers
57 */
58 #define MITE_UNKNOWN_DMA_BURST_REG 0x28
59 #define UNKNOWN_DMA_BURST_ENABLE_BITS 0x600
60
61 #define MITE_PCI_CONFIG_OFFSET 0x300
62 #define MITE_CSIGR 0x460 /* chip signature */
63 #define CSIGR_TO_IOWINS(x) (((x) >> 29) & 0x7)
64 #define CSIGR_TO_WINS(x) (((x) >> 24) & 0x1f)
65 #define CSIGR_TO_WPDEP(x) (((x) >> 20) & 0x7)
66 #define CSIGR_TO_DMAC(x) (((x) >> 16) & 0xf)
67 #define CSIGR_TO_IMODE(x) (((x) >> 12) & 0x3) /* pci=0x3 */
68 #define CSIGR_TO_MMODE(x) (((x) >> 8) & 0x3) /* minimite=1 */
69 #define CSIGR_TO_TYPE(x) (((x) >> 4) & 0xf) /* mite=0, minimite=1 */
70 #define CSIGR_TO_VER(x) (((x) >> 0) & 0xf)
71
72 #define MITE_CHAN(x) (0x500 + 0x100 * (x))
73 #define MITE_CHOR(x) (0x00 + MITE_CHAN(x)) /* channel operation */
74 #define CHOR_DMARESET BIT(31)
75 #define CHOR_SET_SEND_TC BIT(11)
76 #define CHOR_CLR_SEND_TC BIT(10)
77 #define CHOR_SET_LPAUSE BIT(9)
78 #define CHOR_CLR_LPAUSE BIT(8)
79 #define CHOR_CLRDONE BIT(7)
80 #define CHOR_CLRRB BIT(6)
81 #define CHOR_CLRLC BIT(5)
82 #define CHOR_FRESET BIT(4)
83 #define CHOR_ABORT BIT(3) /* stop without emptying fifo */
84 #define CHOR_STOP BIT(2) /* stop after emptying fifo */
85 #define CHOR_CONT BIT(1)
86 #define CHOR_START BIT(0)
87 #define MITE_CHCR(x) (0x04 + MITE_CHAN(x)) /* channel control */
88 #define CHCR_SET_DMA_IE BIT(31)
89 #define CHCR_CLR_DMA_IE BIT(30)
90 #define CHCR_SET_LINKP_IE BIT(29)
91 #define CHCR_CLR_LINKP_IE BIT(28)
92 #define CHCR_SET_SAR_IE BIT(27)
93 #define CHCR_CLR_SAR_IE BIT(26)
94 #define CHCR_SET_DONE_IE BIT(25)
95 #define CHCR_CLR_DONE_IE BIT(24)
96 #define CHCR_SET_MRDY_IE BIT(23)
97 #define CHCR_CLR_MRDY_IE BIT(22)
98 #define CHCR_SET_DRDY_IE BIT(21)
99 #define CHCR_CLR_DRDY_IE BIT(20)
100 #define CHCR_SET_LC_IE BIT(19)
101 #define CHCR_CLR_LC_IE BIT(18)
102 #define CHCR_SET_CONT_RB_IE BIT(17)
103 #define CHCR_CLR_CONT_RB_IE BIT(16)
104 #define CHCR_FIFO(x) (((x) & 0x1) << 15)
105 #define CHCR_FIFODIS CHCR_FIFO(1)
106 #define CHCR_FIFO_ON CHCR_FIFO(0)
107 #define CHCR_BURST(x) (((x) & 0x1) << 14)
108 #define CHCR_BURSTEN CHCR_BURST(1)
109 #define CHCR_NO_BURSTEN CHCR_BURST(0)
110 #define CHCR_BYTE_SWAP_DEVICE BIT(6)
111 #define CHCR_BYTE_SWAP_MEMORY BIT(4)
112 #define CHCR_DIR(x) (((x) & 0x1) << 3)
113 #define CHCR_DEV_TO_MEM CHCR_DIR(1)
114 #define CHCR_MEM_TO_DEV CHCR_DIR(0)
115 #define CHCR_MODE(x) (((x) & 0x7) << 0)
116 #define CHCR_NORMAL CHCR_MODE(0)
117 #define CHCR_CONTINUE CHCR_MODE(1)
118 #define CHCR_RINGBUFF CHCR_MODE(2)
119 #define CHCR_LINKSHORT CHCR_MODE(4)
120 #define CHCR_LINKLONG CHCR_MODE(5)
121 #define MITE_TCR(x) (0x08 + MITE_CHAN(x)) /* transfer count */
122 #define MITE_MCR(x) (0x0c + MITE_CHAN(x)) /* memory config */
123 #define MITE_MAR(x) (0x10 + MITE_CHAN(x)) /* memory address */
124 #define MITE_DCR(x) (0x14 + MITE_CHAN(x)) /* device config */
125 #define DCR_NORMAL BIT(29)
126 #define MITE_DAR(x) (0x18 + MITE_CHAN(x)) /* device address */
127 #define MITE_LKCR(x) (0x1c + MITE_CHAN(x)) /* link config */
128 #define MITE_LKAR(x) (0x20 + MITE_CHAN(x)) /* link address */
129 #define MITE_LLKAR(x) (0x24 + MITE_CHAN(x)) /* see tnt5002 manual */
130 #define MITE_BAR(x) (0x28 + MITE_CHAN(x)) /* base address */
131 #define MITE_BCR(x) (0x2c + MITE_CHAN(x)) /* base count */
132 #define MITE_SAR(x) (0x30 + MITE_CHAN(x)) /* ? address */
133 #define MITE_WSCR(x) (0x34 + MITE_CHAN(x)) /* ? */
134 #define MITE_WSER(x) (0x38 + MITE_CHAN(x)) /* ? */
135 #define MITE_CHSR(x) (0x3c + MITE_CHAN(x)) /* channel status */
136 #define CHSR_INT BIT(31)
137 #define CHSR_LPAUSES BIT(29)
138 #define CHSR_SARS BIT(27)
139 #define CHSR_DONE BIT(25)
140 #define CHSR_MRDY BIT(23)
141 #define CHSR_DRDY BIT(21)
142 #define CHSR_LINKC BIT(19)
143 #define CHSR_CONTS_RB BIT(17)
144 #define CHSR_ERROR BIT(15)
145 #define CHSR_SABORT BIT(14)
146 #define CHSR_HABORT BIT(13)
147 #define CHSR_STOPS BIT(12)
148 #define CHSR_OPERR(x) (((x) & 0x3) << 10)
149 #define CHSR_OPERR_MASK CHSR_OPERR(3)
150 #define CHSR_OPERR_NOERROR CHSR_OPERR(0)
151 #define CHSR_OPERR_FIFOERROR CHSR_OPERR(1)
152 #define CHSR_OPERR_LINKERROR CHSR_OPERR(1) /* ??? */
153 #define CHSR_XFERR BIT(9)
154 #define CHSR_END BIT(8)
155 #define CHSR_DRQ1 BIT(7)
156 #define CHSR_DRQ0 BIT(6)
157 #define CHSR_LERR(x) (((x) & 0x3) << 4)
158 #define CHSR_LERR_MASK CHSR_LERR(3)
159 #define CHSR_LBERR CHSR_LERR(1)
160 #define CHSR_LRERR CHSR_LERR(2)
161 #define CHSR_LOERR CHSR_LERR(3)
162 #define CHSR_MERR(x) (((x) & 0x3) << 2)
163 #define CHSR_MERR_MASK CHSR_MERR(3)
164 #define CHSR_MBERR CHSR_MERR(1)
165 #define CHSR_MRERR CHSR_MERR(2)
166 #define CHSR_MOERR CHSR_MERR(3)
167 #define CHSR_DERR(x) (((x) & 0x3) << 0)
168 #define CHSR_DERR_MASK CHSR_DERR(3)
169 #define CHSR_DBERR CHSR_DERR(1)
170 #define CHSR_DRERR CHSR_DERR(2)
171 #define CHSR_DOERR CHSR_DERR(3)
172 #define MITE_FCR(x) (0x40 + MITE_CHAN(x)) /* fifo count */
173
174 /* common bits for the memory/device/link config registers */
175 #define CR_RL(x) (((x) & 0x7) << 21)
176 #define CR_REQS(x) (((x) & 0x7) << 16)
177 #define CR_REQS_MASK CR_REQS(7)
178 #define CR_ASEQ(x) (((x) & 0x3) << 10)
179 #define CR_ASEQDONT CR_ASEQ(0)
180 #define CR_ASEQUP CR_ASEQ(1)
181 #define CR_ASEQDOWN CR_ASEQ(2)
182 #define CR_ASEQ_MASK CR_ASEQ(3)
183 #define CR_PSIZE(x) (((x) & 0x3) << 8)
184 #define CR_PSIZE8 CR_PSIZE(1)
185 #define CR_PSIZE16 CR_PSIZE(2)
186 #define CR_PSIZE32 CR_PSIZE(3)
187 #define CR_PORT(x) (((x) & 0x3) << 6)
188 #define CR_PORTCPU CR_PORT(0)
189 #define CR_PORTIO CR_PORT(1)
190 #define CR_PORTVXI CR_PORT(2)
191 #define CR_PORTMXI CR_PORT(3)
192 #define CR_AMDEVICE BIT(0)
193
MITE_IODWBSR_1_WSIZE_bits(unsigned int size)194 static unsigned int MITE_IODWBSR_1_WSIZE_bits(unsigned int size)
195 {
196 return (ilog2(size) - 1) & 0x1f;
197 }
198
mite_retry_limit(unsigned int retry_limit)199 static unsigned int mite_retry_limit(unsigned int retry_limit)
200 {
201 unsigned int value = 0;
202
203 if (retry_limit)
204 value = 1 + ilog2(retry_limit);
205 if (value > 0x7)
206 value = 0x7;
207 return CR_RL(value);
208 }
209
mite_drq_reqs(unsigned int drq_line)210 static unsigned int mite_drq_reqs(unsigned int drq_line)
211 {
212 /* This also works on m-series when using channels (drq_line) 4 or 5. */
213 return CR_REQS((drq_line & 0x3) | 0x4);
214 }
215
mite_fifo_size(struct mite * mite,unsigned int channel)216 static unsigned int mite_fifo_size(struct mite *mite, unsigned int channel)
217 {
218 unsigned int fcr_bits = readl(mite->mmio + MITE_FCR(channel));
219 unsigned int empty_count = (fcr_bits >> 16) & 0xff;
220 unsigned int full_count = fcr_bits & 0xff;
221
222 return empty_count + full_count;
223 }
224
mite_device_bytes_transferred(struct mite_channel * mite_chan)225 static u32 mite_device_bytes_transferred(struct mite_channel *mite_chan)
226 {
227 struct mite *mite = mite_chan->mite;
228
229 return readl(mite->mmio + MITE_DAR(mite_chan->channel));
230 }
231
232 /**
233 * mite_bytes_in_transit() - Returns the number of unread bytes in the fifo.
234 * @mite_chan: MITE dma channel.
235 */
mite_bytes_in_transit(struct mite_channel * mite_chan)236 u32 mite_bytes_in_transit(struct mite_channel *mite_chan)
237 {
238 struct mite *mite = mite_chan->mite;
239
240 return readl(mite->mmio + MITE_FCR(mite_chan->channel)) & 0xff;
241 }
242 EXPORT_SYMBOL_GPL(mite_bytes_in_transit);
243
244 /* returns lower bound for number of bytes transferred from device to memory */
mite_bytes_written_to_memory_lb(struct mite_channel * mite_chan)245 static u32 mite_bytes_written_to_memory_lb(struct mite_channel *mite_chan)
246 {
247 u32 device_byte_count;
248
249 device_byte_count = mite_device_bytes_transferred(mite_chan);
250 return device_byte_count - mite_bytes_in_transit(mite_chan);
251 }
252
253 /* returns upper bound for number of bytes transferred from device to memory */
mite_bytes_written_to_memory_ub(struct mite_channel * mite_chan)254 static u32 mite_bytes_written_to_memory_ub(struct mite_channel *mite_chan)
255 {
256 u32 in_transit_count;
257
258 in_transit_count = mite_bytes_in_transit(mite_chan);
259 return mite_device_bytes_transferred(mite_chan) - in_transit_count;
260 }
261
262 /* returns lower bound for number of bytes read from memory to device */
mite_bytes_read_from_memory_lb(struct mite_channel * mite_chan)263 static u32 mite_bytes_read_from_memory_lb(struct mite_channel *mite_chan)
264 {
265 u32 device_byte_count;
266
267 device_byte_count = mite_device_bytes_transferred(mite_chan);
268 return device_byte_count + mite_bytes_in_transit(mite_chan);
269 }
270
271 /* returns upper bound for number of bytes read from memory to device */
mite_bytes_read_from_memory_ub(struct mite_channel * mite_chan)272 static u32 mite_bytes_read_from_memory_ub(struct mite_channel *mite_chan)
273 {
274 u32 in_transit_count;
275
276 in_transit_count = mite_bytes_in_transit(mite_chan);
277 return mite_device_bytes_transferred(mite_chan) + in_transit_count;
278 }
279
mite_sync_input_dma(struct mite_channel * mite_chan,struct comedi_subdevice * s)280 static void mite_sync_input_dma(struct mite_channel *mite_chan,
281 struct comedi_subdevice *s)
282 {
283 struct comedi_async *async = s->async;
284 int count;
285 unsigned int nbytes, old_alloc_count;
286
287 old_alloc_count = async->buf_write_alloc_count;
288 /* write alloc as much as we can */
289 comedi_buf_write_alloc(s, async->prealloc_bufsz);
290
291 nbytes = mite_bytes_written_to_memory_lb(mite_chan);
292 if ((int)(mite_bytes_written_to_memory_ub(mite_chan) -
293 old_alloc_count) > 0) {
294 dev_warn(s->device->class_dev,
295 "mite: DMA overwrite of free area\n");
296 async->events |= COMEDI_CB_OVERFLOW;
297 return;
298 }
299
300 count = nbytes - async->buf_write_count;
301 /*
302 * it's possible count will be negative due to conservative value
303 * returned by mite_bytes_written_to_memory_lb
304 */
305 if (count > 0) {
306 comedi_buf_write_free(s, count);
307 comedi_inc_scan_progress(s, count);
308 async->events |= COMEDI_CB_BLOCK;
309 }
310 }
311
mite_sync_output_dma(struct mite_channel * mite_chan,struct comedi_subdevice * s)312 static void mite_sync_output_dma(struct mite_channel *mite_chan,
313 struct comedi_subdevice *s)
314 {
315 struct comedi_async *async = s->async;
316 struct comedi_cmd *cmd = &async->cmd;
317 u32 stop_count = cmd->stop_arg * comedi_bytes_per_scan(s);
318 unsigned int old_alloc_count = async->buf_read_alloc_count;
319 u32 nbytes_ub, nbytes_lb;
320 int count;
321 bool finite_regen = (cmd->stop_src == TRIG_NONE && stop_count != 0);
322
323 /* read alloc as much as we can */
324 comedi_buf_read_alloc(s, async->prealloc_bufsz);
325 nbytes_lb = mite_bytes_read_from_memory_lb(mite_chan);
326 if (cmd->stop_src == TRIG_COUNT && (int)(nbytes_lb - stop_count) > 0)
327 nbytes_lb = stop_count;
328 nbytes_ub = mite_bytes_read_from_memory_ub(mite_chan);
329 if (cmd->stop_src == TRIG_COUNT && (int)(nbytes_ub - stop_count) > 0)
330 nbytes_ub = stop_count;
331
332 if ((!finite_regen || stop_count > old_alloc_count) &&
333 ((int)(nbytes_ub - old_alloc_count) > 0)) {
334 dev_warn(s->device->class_dev, "mite: DMA underrun\n");
335 async->events |= COMEDI_CB_OVERFLOW;
336 return;
337 }
338
339 if (finite_regen) {
340 /*
341 * This is a special case where we continuously output a finite
342 * buffer. In this case, we do not free any of the memory,
343 * hence we expect that old_alloc_count will reach a maximum of
344 * stop_count bytes.
345 */
346 return;
347 }
348
349 count = nbytes_lb - async->buf_read_count;
350 if (count > 0) {
351 comedi_buf_read_free(s, count);
352 async->events |= COMEDI_CB_BLOCK;
353 }
354 }
355
356 /**
357 * mite_sync_dma() - Sync the MITE dma with the COMEDI async buffer.
358 * @mite_chan: MITE dma channel.
359 * @s: COMEDI subdevice.
360 */
mite_sync_dma(struct mite_channel * mite_chan,struct comedi_subdevice * s)361 void mite_sync_dma(struct mite_channel *mite_chan, struct comedi_subdevice *s)
362 {
363 if (mite_chan->dir == COMEDI_INPUT)
364 mite_sync_input_dma(mite_chan, s);
365 else
366 mite_sync_output_dma(mite_chan, s);
367 }
368 EXPORT_SYMBOL_GPL(mite_sync_dma);
369
mite_get_status(struct mite_channel * mite_chan)370 static unsigned int mite_get_status(struct mite_channel *mite_chan)
371 {
372 struct mite *mite = mite_chan->mite;
373 unsigned int status;
374 unsigned long flags;
375
376 spin_lock_irqsave(&mite->lock, flags);
377 status = readl(mite->mmio + MITE_CHSR(mite_chan->channel));
378 if (status & CHSR_DONE) {
379 mite_chan->done = 1;
380 writel(CHOR_CLRDONE,
381 mite->mmio + MITE_CHOR(mite_chan->channel));
382 }
383 mmiowb();
384 spin_unlock_irqrestore(&mite->lock, flags);
385 return status;
386 }
387
388 /**
389 * mite_ack_linkc() - Check and ack the LINKC interrupt,
390 * @mite_chan: MITE dma channel.
391 * @s: COMEDI subdevice.
392 * @sync: flag to force a mite_sync_dma().
393 *
394 * This will also ack the DONE interrupt if active.
395 */
mite_ack_linkc(struct mite_channel * mite_chan,struct comedi_subdevice * s,bool sync)396 void mite_ack_linkc(struct mite_channel *mite_chan,
397 struct comedi_subdevice *s,
398 bool sync)
399 {
400 struct mite *mite = mite_chan->mite;
401 unsigned int status;
402
403 status = mite_get_status(mite_chan);
404 if (status & CHSR_LINKC) {
405 writel(CHOR_CLRLC, mite->mmio + MITE_CHOR(mite_chan->channel));
406 sync = true;
407 }
408 if (sync)
409 mite_sync_dma(mite_chan, s);
410
411 if (status & CHSR_XFERR) {
412 dev_err(s->device->class_dev,
413 "mite: transfer error %08x\n", status);
414 s->async->events |= COMEDI_CB_ERROR;
415 }
416 }
417 EXPORT_SYMBOL_GPL(mite_ack_linkc);
418
419 /**
420 * mite_done() - Check is a MITE dma transfer is complete.
421 * @mite_chan: MITE dma channel.
422 *
423 * This will also ack the DONE interrupt if active.
424 */
mite_done(struct mite_channel * mite_chan)425 int mite_done(struct mite_channel *mite_chan)
426 {
427 struct mite *mite = mite_chan->mite;
428 unsigned long flags;
429 int done;
430
431 mite_get_status(mite_chan);
432 spin_lock_irqsave(&mite->lock, flags);
433 done = mite_chan->done;
434 spin_unlock_irqrestore(&mite->lock, flags);
435 return done;
436 }
437 EXPORT_SYMBOL_GPL(mite_done);
438
mite_dma_reset(struct mite_channel * mite_chan)439 static void mite_dma_reset(struct mite_channel *mite_chan)
440 {
441 writel(CHOR_DMARESET | CHOR_FRESET,
442 mite_chan->mite->mmio + MITE_CHOR(mite_chan->channel));
443 }
444
445 /**
446 * mite_dma_arm() - Start a MITE dma transfer.
447 * @mite_chan: MITE dma channel.
448 */
mite_dma_arm(struct mite_channel * mite_chan)449 void mite_dma_arm(struct mite_channel *mite_chan)
450 {
451 struct mite *mite = mite_chan->mite;
452 unsigned long flags;
453
454 /*
455 * memory barrier is intended to insure any twiddling with the buffer
456 * is done before writing to the mite to arm dma transfer
457 */
458 smp_mb();
459 spin_lock_irqsave(&mite->lock, flags);
460 mite_chan->done = 0;
461 /* arm */
462 writel(CHOR_START, mite->mmio + MITE_CHOR(mite_chan->channel));
463 mmiowb();
464 spin_unlock_irqrestore(&mite->lock, flags);
465 }
466 EXPORT_SYMBOL_GPL(mite_dma_arm);
467
468 /**
469 * mite_dma_disarm() - Stop a MITE dma transfer.
470 * @mite_chan: MITE dma channel.
471 */
mite_dma_disarm(struct mite_channel * mite_chan)472 void mite_dma_disarm(struct mite_channel *mite_chan)
473 {
474 struct mite *mite = mite_chan->mite;
475
476 /* disarm */
477 writel(CHOR_ABORT, mite->mmio + MITE_CHOR(mite_chan->channel));
478 }
479 EXPORT_SYMBOL_GPL(mite_dma_disarm);
480
481 /**
482 * mite_prep_dma() - Prepare a MITE dma channel for transfers.
483 * @mite_chan: MITE dma channel.
484 * @num_device_bits: device transfer size (8, 16, or 32-bits).
485 * @num_memory_bits: memory transfer size (8, 16, or 32-bits).
486 */
mite_prep_dma(struct mite_channel * mite_chan,unsigned int num_device_bits,unsigned int num_memory_bits)487 void mite_prep_dma(struct mite_channel *mite_chan,
488 unsigned int num_device_bits, unsigned int num_memory_bits)
489 {
490 struct mite *mite = mite_chan->mite;
491 unsigned int chcr, mcr, dcr, lkcr;
492
493 mite_dma_reset(mite_chan);
494
495 /* short link chaining mode */
496 chcr = CHCR_SET_DMA_IE | CHCR_LINKSHORT | CHCR_SET_DONE_IE |
497 CHCR_BURSTEN;
498 /*
499 * Link Complete Interrupt: interrupt every time a link
500 * in MITE_RING is completed. This can generate a lot of
501 * extra interrupts, but right now we update the values
502 * of buf_int_ptr and buf_int_count at each interrupt. A
503 * better method is to poll the MITE before each user
504 * "read()" to calculate the number of bytes available.
505 */
506 chcr |= CHCR_SET_LC_IE;
507 if (num_memory_bits == 32 && num_device_bits == 16) {
508 /*
509 * Doing a combined 32 and 16 bit byteswap gets the 16 bit
510 * samples into the fifo in the right order. Tested doing 32 bit
511 * memory to 16 bit device transfers to the analog out of a
512 * pxi-6281, which has mite version = 1, type = 4. This also
513 * works for dma reads from the counters on e-series boards.
514 */
515 chcr |= CHCR_BYTE_SWAP_DEVICE | CHCR_BYTE_SWAP_MEMORY;
516 }
517 if (mite_chan->dir == COMEDI_INPUT)
518 chcr |= CHCR_DEV_TO_MEM;
519
520 writel(chcr, mite->mmio + MITE_CHCR(mite_chan->channel));
521
522 /* to/from memory */
523 mcr = mite_retry_limit(64) | CR_ASEQUP;
524 switch (num_memory_bits) {
525 case 8:
526 mcr |= CR_PSIZE8;
527 break;
528 case 16:
529 mcr |= CR_PSIZE16;
530 break;
531 case 32:
532 mcr |= CR_PSIZE32;
533 break;
534 default:
535 pr_warn("bug! invalid mem bit width for dma transfer\n");
536 break;
537 }
538 writel(mcr, mite->mmio + MITE_MCR(mite_chan->channel));
539
540 /* from/to device */
541 dcr = mite_retry_limit(64) | CR_ASEQUP;
542 dcr |= CR_PORTIO | CR_AMDEVICE | mite_drq_reqs(mite_chan->channel);
543 switch (num_device_bits) {
544 case 8:
545 dcr |= CR_PSIZE8;
546 break;
547 case 16:
548 dcr |= CR_PSIZE16;
549 break;
550 case 32:
551 dcr |= CR_PSIZE32;
552 break;
553 default:
554 pr_warn("bug! invalid dev bit width for dma transfer\n");
555 break;
556 }
557 writel(dcr, mite->mmio + MITE_DCR(mite_chan->channel));
558
559 /* reset the DAR */
560 writel(0, mite->mmio + MITE_DAR(mite_chan->channel));
561
562 /* the link is 32bits */
563 lkcr = mite_retry_limit(64) | CR_ASEQUP | CR_PSIZE32;
564 writel(lkcr, mite->mmio + MITE_LKCR(mite_chan->channel));
565
566 /* starting address for link chaining */
567 writel(mite_chan->ring->dma_addr,
568 mite->mmio + MITE_LKAR(mite_chan->channel));
569 }
570 EXPORT_SYMBOL_GPL(mite_prep_dma);
571
__mite_request_channel(struct mite * mite,struct mite_ring * ring,unsigned int min_channel,unsigned int max_channel)572 static struct mite_channel *__mite_request_channel(struct mite *mite,
573 struct mite_ring *ring,
574 unsigned int min_channel,
575 unsigned int max_channel)
576 {
577 struct mite_channel *mite_chan = NULL;
578 unsigned long flags;
579 int i;
580
581 /*
582 * spin lock so mite_release_channel can be called safely
583 * from interrupts
584 */
585 spin_lock_irqsave(&mite->lock, flags);
586 for (i = min_channel; i <= max_channel; ++i) {
587 mite_chan = &mite->channels[i];
588 if (!mite_chan->ring) {
589 mite_chan->ring = ring;
590 break;
591 }
592 mite_chan = NULL;
593 }
594 spin_unlock_irqrestore(&mite->lock, flags);
595 return mite_chan;
596 }
597
598 /**
599 * mite_request_channel_in_range() - Request a MITE dma channel.
600 * @mite: MITE device.
601 * @ring: MITE dma ring.
602 * @min_channel: minimum channel index to use.
603 * @max_channel: maximum channel index to use.
604 */
mite_request_channel_in_range(struct mite * mite,struct mite_ring * ring,unsigned int min_channel,unsigned int max_channel)605 struct mite_channel *mite_request_channel_in_range(struct mite *mite,
606 struct mite_ring *ring,
607 unsigned int min_channel,
608 unsigned int max_channel)
609 {
610 return __mite_request_channel(mite, ring, min_channel, max_channel);
611 }
612 EXPORT_SYMBOL_GPL(mite_request_channel_in_range);
613
614 /**
615 * mite_request_channel() - Request a MITE dma channel.
616 * @mite: MITE device.
617 * @ring: MITE dma ring.
618 */
mite_request_channel(struct mite * mite,struct mite_ring * ring)619 struct mite_channel *mite_request_channel(struct mite *mite,
620 struct mite_ring *ring)
621 {
622 return __mite_request_channel(mite, ring, 0, mite->num_channels - 1);
623 }
624 EXPORT_SYMBOL_GPL(mite_request_channel);
625
626 /**
627 * mite_release_channel() - Release a MITE dma channel.
628 * @mite_chan: MITE dma channel.
629 */
mite_release_channel(struct mite_channel * mite_chan)630 void mite_release_channel(struct mite_channel *mite_chan)
631 {
632 struct mite *mite = mite_chan->mite;
633 unsigned long flags;
634
635 /* spin lock to prevent races with mite_request_channel */
636 spin_lock_irqsave(&mite->lock, flags);
637 if (mite_chan->ring) {
638 mite_dma_disarm(mite_chan);
639 mite_dma_reset(mite_chan);
640 /*
641 * disable all channel's interrupts (do it after disarm/reset so
642 * MITE_CHCR reg isn't changed while dma is still active!)
643 */
644 writel(CHCR_CLR_DMA_IE | CHCR_CLR_LINKP_IE |
645 CHCR_CLR_SAR_IE | CHCR_CLR_DONE_IE |
646 CHCR_CLR_MRDY_IE | CHCR_CLR_DRDY_IE |
647 CHCR_CLR_LC_IE | CHCR_CLR_CONT_RB_IE,
648 mite->mmio + MITE_CHCR(mite_chan->channel));
649 mite_chan->ring = NULL;
650 mmiowb();
651 }
652 spin_unlock_irqrestore(&mite->lock, flags);
653 }
654 EXPORT_SYMBOL_GPL(mite_release_channel);
655
656 /**
657 * mite_init_ring_descriptors() - Initialize a MITE dma ring descriptors.
658 * @ring: MITE dma ring.
659 * @s: COMEDI subdevice.
660 * @nbytes: the size of the dma ring (in bytes).
661 *
662 * Initializes the ring buffer descriptors to provide correct DMA transfer
663 * links to the exact amount of memory required. When the ring buffer is
664 * allocated by mite_buf_change(), the default is to initialize the ring
665 * to refer to the entire DMA data buffer. A command may call this function
666 * later to re-initialize and shorten the amount of memory that will be
667 * transferred.
668 */
mite_init_ring_descriptors(struct mite_ring * ring,struct comedi_subdevice * s,unsigned int nbytes)669 int mite_init_ring_descriptors(struct mite_ring *ring,
670 struct comedi_subdevice *s,
671 unsigned int nbytes)
672 {
673 struct comedi_async *async = s->async;
674 struct mite_dma_desc *desc = NULL;
675 unsigned int n_full_links = nbytes >> PAGE_SHIFT;
676 unsigned int remainder = nbytes % PAGE_SIZE;
677 int i;
678
679 dev_dbg(s->device->class_dev,
680 "mite: init ring buffer to %u bytes\n", nbytes);
681
682 if ((n_full_links + (remainder > 0 ? 1 : 0)) > ring->n_links) {
683 dev_err(s->device->class_dev,
684 "mite: ring buffer too small for requested init\n");
685 return -ENOMEM;
686 }
687
688 /* We set the descriptors for all full links. */
689 for (i = 0; i < n_full_links; ++i) {
690 desc = &ring->descs[i];
691 desc->count = cpu_to_le32(PAGE_SIZE);
692 desc->addr = cpu_to_le32(async->buf_map->page_list[i].dma_addr);
693 desc->next = cpu_to_le32(ring->dma_addr +
694 (i + 1) * sizeof(*desc));
695 }
696
697 /* the last link is either a remainder or was a full link. */
698 if (remainder > 0) {
699 desc = &ring->descs[i];
700 /* set the lesser count for the remainder link */
701 desc->count = cpu_to_le32(remainder);
702 desc->addr = cpu_to_le32(async->buf_map->page_list[i].dma_addr);
703 }
704
705 /* Assign the last link->next to point back to the head of the list. */
706 desc->next = cpu_to_le32(ring->dma_addr);
707
708 /*
709 * barrier is meant to insure that all the writes to the dma descriptors
710 * have completed before the dma controller is commanded to read them
711 */
712 smp_wmb();
713 return 0;
714 }
715 EXPORT_SYMBOL_GPL(mite_init_ring_descriptors);
716
mite_free_dma_descs(struct mite_ring * ring)717 static void mite_free_dma_descs(struct mite_ring *ring)
718 {
719 struct mite_dma_desc *descs = ring->descs;
720
721 if (descs) {
722 dma_free_coherent(ring->hw_dev,
723 ring->n_links * sizeof(*descs),
724 descs, ring->dma_addr);
725 ring->descs = NULL;
726 ring->dma_addr = 0;
727 ring->n_links = 0;
728 }
729 }
730
731 /**
732 * mite_buf_change() - COMEDI subdevice (*buf_change) for a MITE dma ring.
733 * @ring: MITE dma ring.
734 * @s: COMEDI subdevice.
735 */
mite_buf_change(struct mite_ring * ring,struct comedi_subdevice * s)736 int mite_buf_change(struct mite_ring *ring, struct comedi_subdevice *s)
737 {
738 struct comedi_async *async = s->async;
739 struct mite_dma_desc *descs;
740 unsigned int n_links;
741
742 mite_free_dma_descs(ring);
743
744 if (async->prealloc_bufsz == 0)
745 return 0;
746
747 n_links = async->prealloc_bufsz >> PAGE_SHIFT;
748
749 descs = dma_alloc_coherent(ring->hw_dev,
750 n_links * sizeof(*descs),
751 &ring->dma_addr, GFP_KERNEL);
752 if (!descs) {
753 dev_err(s->device->class_dev,
754 "mite: ring buffer allocation failed\n");
755 return -ENOMEM;
756 }
757 ring->descs = descs;
758 ring->n_links = n_links;
759
760 return mite_init_ring_descriptors(ring, s, n_links << PAGE_SHIFT);
761 }
762 EXPORT_SYMBOL_GPL(mite_buf_change);
763
764 /**
765 * mite_alloc_ring() - Allocate a MITE dma ring.
766 * @mite: MITE device.
767 */
mite_alloc_ring(struct mite * mite)768 struct mite_ring *mite_alloc_ring(struct mite *mite)
769 {
770 struct mite_ring *ring;
771
772 ring = kmalloc(sizeof(*ring), GFP_KERNEL);
773 if (!ring)
774 return NULL;
775 ring->hw_dev = get_device(&mite->pcidev->dev);
776 if (!ring->hw_dev) {
777 kfree(ring);
778 return NULL;
779 }
780 ring->n_links = 0;
781 ring->descs = NULL;
782 ring->dma_addr = 0;
783 return ring;
784 }
785 EXPORT_SYMBOL_GPL(mite_alloc_ring);
786
787 /**
788 * mite_free_ring() - Free a MITE dma ring and its descriptors.
789 * @ring: MITE dma ring.
790 */
mite_free_ring(struct mite_ring * ring)791 void mite_free_ring(struct mite_ring *ring)
792 {
793 if (ring) {
794 mite_free_dma_descs(ring);
795 put_device(ring->hw_dev);
796 kfree(ring);
797 }
798 }
799 EXPORT_SYMBOL_GPL(mite_free_ring);
800
mite_setup(struct comedi_device * dev,struct mite * mite,bool use_win1)801 static int mite_setup(struct comedi_device *dev, struct mite *mite,
802 bool use_win1)
803 {
804 resource_size_t daq_phys_addr;
805 unsigned long length;
806 int i;
807 u32 csigr_bits;
808 unsigned int unknown_dma_burst_bits;
809 unsigned int wpdep;
810
811 pci_set_master(mite->pcidev);
812
813 mite->mmio = pci_ioremap_bar(mite->pcidev, 0);
814 if (!mite->mmio)
815 return -ENOMEM;
816
817 dev->mmio = pci_ioremap_bar(mite->pcidev, 1);
818 if (!dev->mmio)
819 return -ENOMEM;
820 daq_phys_addr = pci_resource_start(mite->pcidev, 1);
821 length = pci_resource_len(mite->pcidev, 1);
822
823 if (use_win1) {
824 writel(0, mite->mmio + MITE_IODWBSR);
825 dev_dbg(dev->class_dev,
826 "mite: using I/O Window Base Size register 1\n");
827 writel(daq_phys_addr | WENAB |
828 MITE_IODWBSR_1_WSIZE_bits(length),
829 mite->mmio + MITE_IODWBSR_1);
830 writel(0, mite->mmio + MITE_IODWCR_1);
831 } else {
832 writel(daq_phys_addr | WENAB, mite->mmio + MITE_IODWBSR);
833 }
834 /*
835 * Make sure dma bursts work. I got this from running a bus analyzer
836 * on a pxi-6281 and a pxi-6713. 6713 powered up with register value
837 * of 0x61f and bursts worked. 6281 powered up with register value of
838 * 0x1f and bursts didn't work. The NI windows driver reads the
839 * register, then does a bitwise-or of 0x600 with it and writes it back.
840 *
841 * The bits 0x90180700 in MITE_UNKNOWN_DMA_BURST_REG can be
842 * written and read back. The bits 0x1f always read as 1.
843 * The rest always read as zero.
844 */
845 unknown_dma_burst_bits = readl(mite->mmio + MITE_UNKNOWN_DMA_BURST_REG);
846 unknown_dma_burst_bits |= UNKNOWN_DMA_BURST_ENABLE_BITS;
847 writel(unknown_dma_burst_bits, mite->mmio + MITE_UNKNOWN_DMA_BURST_REG);
848
849 csigr_bits = readl(mite->mmio + MITE_CSIGR);
850 mite->num_channels = CSIGR_TO_DMAC(csigr_bits);
851 if (mite->num_channels > MAX_MITE_DMA_CHANNELS) {
852 dev_warn(dev->class_dev,
853 "mite: bug? chip claims to have %i dma channels. Setting to %i.\n",
854 mite->num_channels, MAX_MITE_DMA_CHANNELS);
855 mite->num_channels = MAX_MITE_DMA_CHANNELS;
856 }
857
858 /* get the wpdep bits and convert it to the write port fifo depth */
859 wpdep = CSIGR_TO_WPDEP(csigr_bits);
860 if (wpdep)
861 wpdep = BIT(wpdep);
862
863 dev_dbg(dev->class_dev,
864 "mite: version = %i, type = %i, mite mode = %i, interface mode = %i\n",
865 CSIGR_TO_VER(csigr_bits), CSIGR_TO_TYPE(csigr_bits),
866 CSIGR_TO_MMODE(csigr_bits), CSIGR_TO_IMODE(csigr_bits));
867 dev_dbg(dev->class_dev,
868 "mite: num channels = %i, write post fifo depth = %i, wins = %i, iowins = %i\n",
869 CSIGR_TO_DMAC(csigr_bits), wpdep,
870 CSIGR_TO_WINS(csigr_bits), CSIGR_TO_IOWINS(csigr_bits));
871
872 for (i = 0; i < mite->num_channels; i++) {
873 writel(CHOR_DMARESET, mite->mmio + MITE_CHOR(i));
874 /* disable interrupts */
875 writel(CHCR_CLR_DMA_IE | CHCR_CLR_LINKP_IE | CHCR_CLR_SAR_IE |
876 CHCR_CLR_DONE_IE | CHCR_CLR_MRDY_IE | CHCR_CLR_DRDY_IE |
877 CHCR_CLR_LC_IE | CHCR_CLR_CONT_RB_IE,
878 mite->mmio + MITE_CHCR(i));
879 }
880 mite->fifo_size = mite_fifo_size(mite, 0);
881 dev_dbg(dev->class_dev, "mite: fifo size is %i.\n", mite->fifo_size);
882 return 0;
883 }
884
885 /**
886 * mite_attach() - Allocate and initialize a MITE device for a comedi driver.
887 * @dev: COMEDI device.
888 * @use_win1: flag to use I/O Window 1 instead of I/O Window 0.
889 *
890 * Called by a COMEDI drivers (*auto_attach).
891 *
892 * Returns a pointer to the MITE device on success, or NULL if the MITE cannot
893 * be allocated or remapped.
894 */
mite_attach(struct comedi_device * dev,bool use_win1)895 struct mite *mite_attach(struct comedi_device *dev, bool use_win1)
896 {
897 struct pci_dev *pcidev = comedi_to_pci_dev(dev);
898 struct mite *mite;
899 unsigned int i;
900 int ret;
901
902 mite = kzalloc(sizeof(*mite), GFP_KERNEL);
903 if (!mite)
904 return NULL;
905
906 spin_lock_init(&mite->lock);
907 mite->pcidev = pcidev;
908 for (i = 0; i < MAX_MITE_DMA_CHANNELS; ++i) {
909 mite->channels[i].mite = mite;
910 mite->channels[i].channel = i;
911 mite->channels[i].done = 1;
912 }
913
914 ret = mite_setup(dev, mite, use_win1);
915 if (ret) {
916 if (mite->mmio)
917 iounmap(mite->mmio);
918 kfree(mite);
919 return NULL;
920 }
921
922 return mite;
923 }
924 EXPORT_SYMBOL_GPL(mite_attach);
925
926 /**
927 * mite_detach() - Unmap and free a MITE device for a comedi driver.
928 * @mite: MITE device.
929 *
930 * Called by a COMEDI drivers (*detach).
931 */
mite_detach(struct mite * mite)932 void mite_detach(struct mite *mite)
933 {
934 if (!mite)
935 return;
936
937 if (mite->mmio)
938 iounmap(mite->mmio);
939
940 kfree(mite);
941 }
942 EXPORT_SYMBOL_GPL(mite_detach);
943
mite_module_init(void)944 static int __init mite_module_init(void)
945 {
946 return 0;
947 }
948 module_init(mite_module_init);
949
mite_module_exit(void)950 static void __exit mite_module_exit(void)
951 {
952 }
953 module_exit(mite_module_exit);
954
955 MODULE_AUTHOR("Comedi http://www.comedi.org");
956 MODULE_DESCRIPTION("Comedi helper for NI Mite PCI interface chip");
957 MODULE_LICENSE("GPL");
958