• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  Omnitek Scatter-Gather DMA Controller
3  *
4  *  Copyright 2012-2015 Cisco Systems, Inc. and/or its affiliates.
5  *  All rights reserved.
6  *
7  *  This program is free software; you may redistribute it and/or modify
8  *  it under the terms of the GNU General Public License as published by
9  *  the Free Software Foundation; version 2 of the License.
10  *
11  *  THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
12  *  EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
13  *  MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
14  *  NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
15  *  BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
16  *  ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
17  *  CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
18  *  SOFTWARE.
19  */
20 
21 #include <linux/string.h>
22 #include <linux/io.h>
23 #include <linux/pci_regs.h>
24 #include <linux/spinlock.h>
25 
26 #include "cobalt-driver.h"
27 #include "cobalt-omnitek.h"
28 
29 /* descriptor */
30 #define END_OF_CHAIN		(1 << 1)
31 #define INTERRUPT_ENABLE	(1 << 2)
32 #define WRITE_TO_PCI		(1 << 3)
33 #define READ_FROM_PCI		(0 << 3)
34 #define DESCRIPTOR_FLAG_MSK	(END_OF_CHAIN | INTERRUPT_ENABLE | WRITE_TO_PCI)
35 #define NEXT_ADRS_MSK		0xffffffe0
36 
37 /* control/status register */
38 #define ENABLE                  (1 << 0)
39 #define START                   (1 << 1)
40 #define ABORT                   (1 << 2)
41 #define DONE                    (1 << 4)
42 #define SG_INTERRUPT            (1 << 5)
43 #define EVENT_INTERRUPT         (1 << 6)
44 #define SCATTER_GATHER_MODE     (1 << 8)
45 #define DISABLE_VIDEO_RESYNC    (1 << 9)
46 #define EVENT_INTERRUPT_ENABLE  (1 << 10)
47 #define DIRECTIONAL_MSK         (3 << 16)
48 #define INPUT_ONLY              (0 << 16)
49 #define OUTPUT_ONLY             (1 << 16)
50 #define BIDIRECTIONAL           (2 << 16)
51 #define DMA_TYPE_MEMORY         (0 << 18)
52 #define DMA_TYPE_FIFO		(1 << 18)
53 
54 #define BASE			(cobalt->bar0)
55 #define CAPABILITY_HEADER	(BASE)
56 #define CAPABILITY_REGISTER	(BASE + 0x04)
57 #define PCI_64BIT		(1 << 8)
58 #define LOCAL_64BIT		(1 << 9)
59 #define INTERRUPT_STATUS	(BASE + 0x08)
60 #define PCI(c)			(BASE + 0x40 + ((c) * 0x40))
61 #define SIZE(c)			(BASE + 0x58 + ((c) * 0x40))
62 #define DESCRIPTOR(c)		(BASE + 0x50 + ((c) * 0x40))
63 #define CS_REG(c)		(BASE + 0x60 + ((c) * 0x40))
64 #define BYTES_TRANSFERRED(c)	(BASE + 0x64 + ((c) * 0x40))
65 
66 
get_dma_direction(u32 status)67 static char *get_dma_direction(u32 status)
68 {
69 	switch (status & DIRECTIONAL_MSK) {
70 	case INPUT_ONLY: return "Input";
71 	case OUTPUT_ONLY: return "Output";
72 	case BIDIRECTIONAL: return "Bidirectional";
73 	}
74 	return "";
75 }
76 
show_dma_capability(struct cobalt * cobalt)77 static void show_dma_capability(struct cobalt *cobalt)
78 {
79 	u32 header = ioread32(CAPABILITY_HEADER);
80 	u32 capa = ioread32(CAPABILITY_REGISTER);
81 	u32 i;
82 
83 	cobalt_info("Omnitek DMA capability: ID 0x%02x Version 0x%02x Next 0x%x Size 0x%x\n",
84 		    header & 0xff, (header >> 8) & 0xff,
85 		    (header >> 16) & 0xffff, (capa >> 24) & 0xff);
86 
87 	switch ((capa >> 8) & 0x3) {
88 	case 0:
89 		cobalt_info("Omnitek DMA: 32 bits PCIe and Local\n");
90 		break;
91 	case 1:
92 		cobalt_info("Omnitek DMA: 64 bits PCIe, 32 bits Local\n");
93 		break;
94 	case 3:
95 		cobalt_info("Omnitek DMA: 64 bits PCIe and Local\n");
96 		break;
97 	}
98 
99 	for (i = 0;  i < (capa & 0xf);  i++) {
100 		u32 status = ioread32(CS_REG(i));
101 
102 		cobalt_info("Omnitek DMA channel #%d: %s %s\n", i,
103 			    status & DMA_TYPE_FIFO ? "FIFO" : "MEMORY",
104 			    get_dma_direction(status));
105 	}
106 }
107 
omni_sg_dma_start(struct cobalt_stream * s,struct sg_dma_desc_info * desc)108 void omni_sg_dma_start(struct cobalt_stream *s, struct sg_dma_desc_info *desc)
109 {
110 	struct cobalt *cobalt = s->cobalt;
111 
112 	iowrite32((u32)((u64)desc->bus >> 32), DESCRIPTOR(s->dma_channel) + 4);
113 	iowrite32((u32)desc->bus & NEXT_ADRS_MSK, DESCRIPTOR(s->dma_channel));
114 	iowrite32(ENABLE | SCATTER_GATHER_MODE | START, CS_REG(s->dma_channel));
115 }
116 
is_dma_done(struct cobalt_stream * s)117 bool is_dma_done(struct cobalt_stream *s)
118 {
119 	struct cobalt *cobalt = s->cobalt;
120 
121 	if (ioread32(CS_REG(s->dma_channel)) & DONE)
122 		return true;
123 
124 	return false;
125 }
126 
omni_sg_dma_abort_channel(struct cobalt_stream * s)127 void omni_sg_dma_abort_channel(struct cobalt_stream *s)
128 {
129 	struct cobalt *cobalt = s->cobalt;
130 
131 	if (is_dma_done(s) == false)
132 		iowrite32(ABORT, CS_REG(s->dma_channel));
133 }
134 
omni_sg_dma_init(struct cobalt * cobalt)135 int omni_sg_dma_init(struct cobalt *cobalt)
136 {
137 	u32 capa = ioread32(CAPABILITY_REGISTER);
138 	int i;
139 
140 	cobalt->first_fifo_channel = 0;
141 	cobalt->dma_channels = capa & 0xf;
142 	if (capa & PCI_64BIT)
143 		cobalt->pci_32_bit = false;
144 	else
145 		cobalt->pci_32_bit = true;
146 
147 	for (i = 0; i < cobalt->dma_channels; i++) {
148 		u32 status = ioread32(CS_REG(i));
149 		u32 ctrl = ioread32(CS_REG(i));
150 
151 		if (!(ctrl & DONE))
152 			iowrite32(ABORT, CS_REG(i));
153 
154 		if (!(status & DMA_TYPE_FIFO))
155 			cobalt->first_fifo_channel++;
156 	}
157 	show_dma_capability(cobalt);
158 	return 0;
159 }
160 
descriptor_list_create(struct cobalt * cobalt,struct scatterlist * scatter_list,bool to_pci,unsigned sglen,unsigned size,unsigned width,unsigned stride,struct sg_dma_desc_info * desc)161 int descriptor_list_create(struct cobalt *cobalt,
162 		struct scatterlist *scatter_list, bool to_pci, unsigned sglen,
163 		unsigned size, unsigned width, unsigned stride,
164 		struct sg_dma_desc_info *desc)
165 {
166 	struct sg_dma_descriptor *d = (struct sg_dma_descriptor *)desc->virt;
167 	dma_addr_t next = desc->bus;
168 	unsigned offset = 0;
169 	unsigned copy_bytes = width;
170 	unsigned copied = 0;
171 	bool first = true;
172 
173 	/* Must be 4-byte aligned */
174 	WARN_ON(sg_dma_address(scatter_list) & 3);
175 	WARN_ON(size & 3);
176 	WARN_ON(next & 3);
177 	WARN_ON(stride & 3);
178 	WARN_ON(stride < width);
179 	if (width >= stride)
180 		copy_bytes = stride = size;
181 
182 	while (size) {
183 		dma_addr_t addr = sg_dma_address(scatter_list) + offset;
184 		unsigned bytes;
185 
186 		if (addr == 0)
187 			return -EFAULT;
188 		if (cobalt->pci_32_bit) {
189 			WARN_ON((u64)addr >> 32);
190 			if ((u64)addr >> 32)
191 				return -EFAULT;
192 		}
193 
194 		/* PCIe address */
195 		d->pci_l = addr & 0xffffffff;
196 		/* If dma_addr_t is 32 bits, then addr >> 32 is actually the
197 		   equivalent of addr >> 0 in gcc. So must cast to u64. */
198 		d->pci_h = (u64)addr >> 32;
199 
200 		/* Sync to start of streaming frame */
201 		d->local = 0;
202 		d->reserved0 = 0;
203 
204 		/* Transfer bytes */
205 		bytes = min(sg_dma_len(scatter_list) - offset,
206 				copy_bytes - copied);
207 
208 		if (first) {
209 			if (to_pci)
210 				d->local = 0x11111111;
211 			first = false;
212 			if (sglen == 1) {
213 				/* Make sure there are always at least two
214 				 * descriptors */
215 				d->bytes = (bytes / 2) & ~3;
216 				d->reserved1 = 0;
217 				size -= d->bytes;
218 				copied += d->bytes;
219 				offset += d->bytes;
220 				addr += d->bytes;
221 				next += sizeof(struct sg_dma_descriptor);
222 				d->next_h = (u32)((u64)next >> 32);
223 				d->next_l = (u32)next |
224 					(to_pci ? WRITE_TO_PCI : 0);
225 				bytes -= d->bytes;
226 				d++;
227 				/* PCIe address */
228 				d->pci_l = addr & 0xffffffff;
229 				/* If dma_addr_t is 32 bits, then addr >> 32
230 				 * is actually the equivalent of addr >> 0 in
231 				 * gcc. So must cast to u64. */
232 				d->pci_h = (u64)addr >> 32;
233 
234 				/* Sync to start of streaming frame */
235 				d->local = 0;
236 				d->reserved0 = 0;
237 			}
238 		}
239 
240 		d->bytes = bytes;
241 		d->reserved1 = 0;
242 		size -= bytes;
243 		copied += bytes;
244 		offset += bytes;
245 
246 		if (copied == copy_bytes) {
247 			while (copied < stride) {
248 				bytes = min(sg_dma_len(scatter_list) - offset,
249 						stride - copied);
250 				copied += bytes;
251 				offset += bytes;
252 				size -= bytes;
253 				if (sg_dma_len(scatter_list) == offset) {
254 					offset = 0;
255 					scatter_list = sg_next(scatter_list);
256 				}
257 			}
258 			copied = 0;
259 		} else {
260 			offset = 0;
261 			scatter_list = sg_next(scatter_list);
262 		}
263 
264 		/* Next descriptor + control bits */
265 		next += sizeof(struct sg_dma_descriptor);
266 		if (size == 0) {
267 			/* Loopback to the first descriptor */
268 			d->next_h = (u32)((u64)desc->bus >> 32);
269 			d->next_l = (u32)desc->bus |
270 				(to_pci ? WRITE_TO_PCI : 0) | INTERRUPT_ENABLE;
271 			if (!to_pci)
272 				d->local = 0x22222222;
273 			desc->last_desc_virt = d;
274 		} else {
275 			d->next_h = (u32)((u64)next >> 32);
276 			d->next_l = (u32)next | (to_pci ? WRITE_TO_PCI : 0);
277 		}
278 		d++;
279 	}
280 	return 0;
281 }
282 
descriptor_list_chain(struct sg_dma_desc_info * this,struct sg_dma_desc_info * next)283 void descriptor_list_chain(struct sg_dma_desc_info *this,
284 			   struct sg_dma_desc_info *next)
285 {
286 	struct sg_dma_descriptor *d = this->last_desc_virt;
287 	u32 direction = d->next_l & WRITE_TO_PCI;
288 
289 	if (next == NULL) {
290 		d->next_h = 0;
291 		d->next_l = direction | INTERRUPT_ENABLE | END_OF_CHAIN;
292 	} else {
293 		d->next_h = (u32)((u64)next->bus >> 32);
294 		d->next_l = (u32)next->bus | direction | INTERRUPT_ENABLE;
295 	}
296 }
297 
descriptor_list_allocate(struct sg_dma_desc_info * desc,size_t bytes)298 void *descriptor_list_allocate(struct sg_dma_desc_info *desc, size_t bytes)
299 {
300 	desc->size = bytes;
301 	desc->virt = dma_alloc_coherent(desc->dev, bytes,
302 					&desc->bus, GFP_KERNEL);
303 	return desc->virt;
304 }
305 
descriptor_list_free(struct sg_dma_desc_info * desc)306 void descriptor_list_free(struct sg_dma_desc_info *desc)
307 {
308 	if (desc->virt)
309 		dma_free_coherent(desc->dev, desc->size,
310 				  desc->virt, desc->bus);
311 	desc->virt = NULL;
312 }
313 
descriptor_list_interrupt_enable(struct sg_dma_desc_info * desc)314 void descriptor_list_interrupt_enable(struct sg_dma_desc_info *desc)
315 {
316 	struct sg_dma_descriptor *d = desc->last_desc_virt;
317 
318 	d->next_l |= INTERRUPT_ENABLE;
319 }
320 
descriptor_list_interrupt_disable(struct sg_dma_desc_info * desc)321 void descriptor_list_interrupt_disable(struct sg_dma_desc_info *desc)
322 {
323 	struct sg_dma_descriptor *d = desc->last_desc_virt;
324 
325 	d->next_l &= ~INTERRUPT_ENABLE;
326 }
327 
descriptor_list_loopback(struct sg_dma_desc_info * desc)328 void descriptor_list_loopback(struct sg_dma_desc_info *desc)
329 {
330 	struct sg_dma_descriptor *d = desc->last_desc_virt;
331 
332 	d->next_h = (u32)((u64)desc->bus >> 32);
333 	d->next_l = (u32)desc->bus | (d->next_l & DESCRIPTOR_FLAG_MSK);
334 }
335 
descriptor_list_end_of_chain(struct sg_dma_desc_info * desc)336 void descriptor_list_end_of_chain(struct sg_dma_desc_info *desc)
337 {
338 	struct sg_dma_descriptor *d = desc->last_desc_virt;
339 
340 	d->next_l |= END_OF_CHAIN;
341 }
342