1 /*
2 * c8sectpfe-core.c - C8SECTPFE STi DVB driver
3 *
4 * Copyright (c) STMicroelectronics 2015
5 *
6 * Author:Peter Bennett <peter.bennett@st.com>
7 * Peter Griffin <peter.griffin@linaro.org>
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation; either version 2 of
12 * the License, or (at your option) any later version.
13 */
14 #include <linux/atomic.h>
15 #include <linux/clk.h>
16 #include <linux/completion.h>
17 #include <linux/delay.h>
18 #include <linux/device.h>
19 #include <linux/dma-mapping.h>
20 #include <linux/dvb/dmx.h>
21 #include <linux/dvb/frontend.h>
22 #include <linux/errno.h>
23 #include <linux/firmware.h>
24 #include <linux/init.h>
25 #include <linux/interrupt.h>
26 #include <linux/io.h>
27 #include <linux/module.h>
28 #include <linux/of_gpio.h>
29 #include <linux/of_platform.h>
30 #include <linux/platform_device.h>
31 #include <linux/usb.h>
32 #include <linux/slab.h>
33 #include <linux/time.h>
34 #include <linux/version.h>
35 #include <linux/wait.h>
36 #include <linux/pinctrl/pinctrl.h>
37
38 #include "c8sectpfe-core.h"
39 #include "c8sectpfe-common.h"
40 #include "c8sectpfe-debugfs.h"
41 #include "dmxdev.h"
42 #include "dvb_demux.h"
43 #include "dvb_frontend.h"
44 #include "dvb_net.h"
45
46 #define FIRMWARE_MEMDMA "pti_memdma_h407.elf"
47 MODULE_FIRMWARE(FIRMWARE_MEMDMA);
48
49 #define PID_TABLE_SIZE 1024
50 #define POLL_MSECS 50
51
52 static int load_c8sectpfe_fw(struct c8sectpfei *fei);
53
54 #define TS_PKT_SIZE 188
55 #define HEADER_SIZE (4)
56 #define PACKET_SIZE (TS_PKT_SIZE+HEADER_SIZE)
57
58 #define FEI_ALIGNMENT (32)
59 /* hw requires minimum of 8*PACKET_SIZE and padded to 8byte boundary */
60 #define FEI_BUFFER_SIZE (8*PACKET_SIZE*340)
61
62 #define FIFO_LEN 1024
63
c8sectpfe_timer_interrupt(unsigned long ac8sectpfei)64 static void c8sectpfe_timer_interrupt(unsigned long ac8sectpfei)
65 {
66 struct c8sectpfei *fei = (struct c8sectpfei *)ac8sectpfei;
67 struct channel_info *channel;
68 int chan_num;
69
70 /* iterate through input block channels */
71 for (chan_num = 0; chan_num < fei->tsin_count; chan_num++) {
72 channel = fei->channel_data[chan_num];
73
74 /* is this descriptor initialised and TP enabled */
75 if (channel->irec && readl(channel->irec + DMA_PRDS_TPENABLE))
76 tasklet_schedule(&channel->tsklet);
77 }
78
79 fei->timer.expires = jiffies + msecs_to_jiffies(POLL_MSECS);
80 add_timer(&fei->timer);
81 }
82
channel_swdemux_tsklet(unsigned long data)83 static void channel_swdemux_tsklet(unsigned long data)
84 {
85 struct channel_info *channel = (struct channel_info *)data;
86 struct c8sectpfei *fei;
87 unsigned long wp, rp;
88 int pos, num_packets, n, size;
89 u8 *buf;
90
91 if (unlikely(!channel || !channel->irec))
92 return;
93
94 fei = channel->fei;
95
96 wp = readl(channel->irec + DMA_PRDS_BUSWP_TP(0));
97 rp = readl(channel->irec + DMA_PRDS_BUSRP_TP(0));
98
99 pos = rp - channel->back_buffer_busaddr;
100
101 /* has it wrapped */
102 if (wp < rp)
103 wp = channel->back_buffer_busaddr + FEI_BUFFER_SIZE;
104
105 size = wp - rp;
106 num_packets = size / PACKET_SIZE;
107
108 /* manage cache so data is visible to CPU */
109 dma_sync_single_for_cpu(fei->dev,
110 rp,
111 size,
112 DMA_FROM_DEVICE);
113
114 buf = (u8 *) channel->back_buffer_aligned;
115
116 dev_dbg(fei->dev,
117 "chan=%d channel=%p num_packets = %d, buf = %p, pos = 0x%x\n\t"
118 "rp=0x%lx, wp=0x%lx\n",
119 channel->tsin_id, channel, num_packets, buf, pos, rp, wp);
120
121 for (n = 0; n < num_packets; n++) {
122 dvb_dmx_swfilter_packets(
123 &fei->c8sectpfe[0]->
124 demux[channel->demux_mapping].dvb_demux,
125 &buf[pos], 1);
126
127 pos += PACKET_SIZE;
128 }
129
130 /* advance the read pointer */
131 if (wp == (channel->back_buffer_busaddr + FEI_BUFFER_SIZE))
132 writel(channel->back_buffer_busaddr, channel->irec +
133 DMA_PRDS_BUSRP_TP(0));
134 else
135 writel(wp, channel->irec + DMA_PRDS_BUSWP_TP(0));
136 }
137
c8sectpfe_start_feed(struct dvb_demux_feed * dvbdmxfeed)138 static int c8sectpfe_start_feed(struct dvb_demux_feed *dvbdmxfeed)
139 {
140 struct dvb_demux *demux = dvbdmxfeed->demux;
141 struct stdemux *stdemux = (struct stdemux *)demux->priv;
142 struct c8sectpfei *fei = stdemux->c8sectpfei;
143 struct channel_info *channel;
144 u32 tmp;
145 unsigned long *bitmap;
146 int ret;
147
148 switch (dvbdmxfeed->type) {
149 case DMX_TYPE_TS:
150 break;
151 case DMX_TYPE_SEC:
152 break;
153 default:
154 dev_err(fei->dev, "%s:%d Error bailing\n"
155 , __func__, __LINE__);
156 return -EINVAL;
157 }
158
159 if (dvbdmxfeed->type == DMX_TYPE_TS) {
160 switch (dvbdmxfeed->pes_type) {
161 case DMX_PES_VIDEO:
162 case DMX_PES_AUDIO:
163 case DMX_PES_TELETEXT:
164 case DMX_PES_PCR:
165 case DMX_PES_OTHER:
166 break;
167 default:
168 dev_err(fei->dev, "%s:%d Error bailing\n"
169 , __func__, __LINE__);
170 return -EINVAL;
171 }
172 }
173
174 if (!atomic_read(&fei->fw_loaded)) {
175 ret = load_c8sectpfe_fw(fei);
176 if (ret)
177 return ret;
178 }
179
180 mutex_lock(&fei->lock);
181
182 channel = fei->channel_data[stdemux->tsin_index];
183
184 bitmap = (unsigned long *) channel->pid_buffer_aligned;
185
186 /* 8192 is a special PID */
187 if (dvbdmxfeed->pid == 8192) {
188 tmp = readl(fei->io + C8SECTPFE_IB_PID_SET(channel->tsin_id));
189 tmp &= ~C8SECTPFE_PID_ENABLE;
190 writel(tmp, fei->io + C8SECTPFE_IB_PID_SET(channel->tsin_id));
191
192 } else {
193 bitmap_set(bitmap, dvbdmxfeed->pid, 1);
194 }
195
196 /* manage cache so PID bitmap is visible to HW */
197 dma_sync_single_for_device(fei->dev,
198 channel->pid_buffer_busaddr,
199 PID_TABLE_SIZE,
200 DMA_TO_DEVICE);
201
202 channel->active = 1;
203
204 if (fei->global_feed_count == 0) {
205 fei->timer.expires = jiffies +
206 msecs_to_jiffies(msecs_to_jiffies(POLL_MSECS));
207
208 add_timer(&fei->timer);
209 }
210
211 if (stdemux->running_feed_count == 0) {
212
213 dev_dbg(fei->dev, "Starting channel=%p\n", channel);
214
215 tasklet_init(&channel->tsklet, channel_swdemux_tsklet,
216 (unsigned long) channel);
217
218 /* Reset the internal inputblock sram pointers */
219 writel(channel->fifo,
220 fei->io + C8SECTPFE_IB_BUFF_STRT(channel->tsin_id));
221 writel(channel->fifo + FIFO_LEN - 1,
222 fei->io + C8SECTPFE_IB_BUFF_END(channel->tsin_id));
223
224 writel(channel->fifo,
225 fei->io + C8SECTPFE_IB_READ_PNT(channel->tsin_id));
226 writel(channel->fifo,
227 fei->io + C8SECTPFE_IB_WRT_PNT(channel->tsin_id));
228
229
230 /* reset read / write memdma ptrs for this channel */
231 writel(channel->back_buffer_busaddr, channel->irec +
232 DMA_PRDS_BUSBASE_TP(0));
233
234 tmp = channel->back_buffer_busaddr + FEI_BUFFER_SIZE - 1;
235 writel(tmp, channel->irec + DMA_PRDS_BUSTOP_TP(0));
236
237 writel(channel->back_buffer_busaddr, channel->irec +
238 DMA_PRDS_BUSWP_TP(0));
239
240 /* Issue a reset and enable InputBlock */
241 writel(C8SECTPFE_SYS_ENABLE | C8SECTPFE_SYS_RESET
242 , fei->io + C8SECTPFE_IB_SYS(channel->tsin_id));
243
244 /* and enable the tp */
245 writel(0x1, channel->irec + DMA_PRDS_TPENABLE);
246
247 dev_dbg(fei->dev, "%s:%d Starting DMA feed on stdemux=%p\n"
248 , __func__, __LINE__, stdemux);
249 }
250
251 stdemux->running_feed_count++;
252 fei->global_feed_count++;
253
254 mutex_unlock(&fei->lock);
255
256 return 0;
257 }
258
c8sectpfe_stop_feed(struct dvb_demux_feed * dvbdmxfeed)259 static int c8sectpfe_stop_feed(struct dvb_demux_feed *dvbdmxfeed)
260 {
261
262 struct dvb_demux *demux = dvbdmxfeed->demux;
263 struct stdemux *stdemux = (struct stdemux *)demux->priv;
264 struct c8sectpfei *fei = stdemux->c8sectpfei;
265 struct channel_info *channel;
266 int idlereq;
267 u32 tmp;
268 int ret;
269 unsigned long *bitmap;
270
271 if (!atomic_read(&fei->fw_loaded)) {
272 ret = load_c8sectpfe_fw(fei);
273 if (ret)
274 return ret;
275 }
276
277 mutex_lock(&fei->lock);
278
279 channel = fei->channel_data[stdemux->tsin_index];
280
281 bitmap = (unsigned long *) channel->pid_buffer_aligned;
282
283 if (dvbdmxfeed->pid == 8192) {
284 tmp = readl(fei->io + C8SECTPFE_IB_PID_SET(channel->tsin_id));
285 tmp |= C8SECTPFE_PID_ENABLE;
286 writel(tmp, fei->io + C8SECTPFE_IB_PID_SET(channel->tsin_id));
287 } else {
288 bitmap_clear(bitmap, dvbdmxfeed->pid, 1);
289 }
290
291 /* manage cache so data is visible to HW */
292 dma_sync_single_for_device(fei->dev,
293 channel->pid_buffer_busaddr,
294 PID_TABLE_SIZE,
295 DMA_TO_DEVICE);
296
297 if (--stdemux->running_feed_count == 0) {
298
299 channel = fei->channel_data[stdemux->tsin_index];
300
301 /* TP re-configuration on page 168 of functional spec */
302
303 /* disable IB (prevents more TS data going to memdma) */
304 writel(0, fei->io + C8SECTPFE_IB_SYS(channel->tsin_id));
305
306 /* disable this channels descriptor */
307 writel(0, channel->irec + DMA_PRDS_TPENABLE);
308
309 tasklet_disable(&channel->tsklet);
310
311 /* now request memdma channel goes idle */
312 idlereq = (1 << channel->tsin_id) | IDLEREQ;
313 writel(idlereq, fei->io + DMA_IDLE_REQ);
314
315 /* wait for idle irq handler to signal completion */
316 ret = wait_for_completion_timeout(&channel->idle_completion,
317 msecs_to_jiffies(100));
318
319 if (ret == 0)
320 dev_warn(fei->dev,
321 "Timeout waiting for idle irq on tsin%d\n",
322 channel->tsin_id);
323
324 reinit_completion(&channel->idle_completion);
325
326 /* reset read / write ptrs for this channel */
327
328 writel(channel->back_buffer_busaddr,
329 channel->irec + DMA_PRDS_BUSBASE_TP(0));
330
331 tmp = channel->back_buffer_busaddr + FEI_BUFFER_SIZE - 1;
332 writel(tmp, channel->irec + DMA_PRDS_BUSTOP_TP(0));
333
334 writel(channel->back_buffer_busaddr,
335 channel->irec + DMA_PRDS_BUSWP_TP(0));
336
337 dev_dbg(fei->dev,
338 "%s:%d stopping DMA feed on stdemux=%p channel=%d\n",
339 __func__, __LINE__, stdemux, channel->tsin_id);
340
341 /* turn off all PIDS in the bitmap */
342 memset((void *)channel->pid_buffer_aligned
343 , 0x00, PID_TABLE_SIZE);
344
345 /* manage cache so data is visible to HW */
346 dma_sync_single_for_device(fei->dev,
347 channel->pid_buffer_busaddr,
348 PID_TABLE_SIZE,
349 DMA_TO_DEVICE);
350
351 channel->active = 0;
352 }
353
354 if (--fei->global_feed_count == 0) {
355 dev_dbg(fei->dev, "%s:%d global_feed_count=%d\n"
356 , __func__, __LINE__, fei->global_feed_count);
357
358 del_timer(&fei->timer);
359 }
360
361 mutex_unlock(&fei->lock);
362
363 return 0;
364 }
365
find_channel(struct c8sectpfei * fei,int tsin_num)366 static struct channel_info *find_channel(struct c8sectpfei *fei, int tsin_num)
367 {
368 int i;
369
370 for (i = 0; i < C8SECTPFE_MAX_TSIN_CHAN; i++) {
371 if (!fei->channel_data[i])
372 continue;
373
374 if (fei->channel_data[i]->tsin_id == tsin_num)
375 return fei->channel_data[i];
376 }
377
378 return NULL;
379 }
380
c8sectpfe_getconfig(struct c8sectpfei * fei)381 static void c8sectpfe_getconfig(struct c8sectpfei *fei)
382 {
383 struct c8sectpfe_hw *hw = &fei->hw_stats;
384
385 hw->num_ib = readl(fei->io + SYS_CFG_NUM_IB);
386 hw->num_mib = readl(fei->io + SYS_CFG_NUM_MIB);
387 hw->num_swts = readl(fei->io + SYS_CFG_NUM_SWTS);
388 hw->num_tsout = readl(fei->io + SYS_CFG_NUM_TSOUT);
389 hw->num_ccsc = readl(fei->io + SYS_CFG_NUM_CCSC);
390 hw->num_ram = readl(fei->io + SYS_CFG_NUM_RAM);
391 hw->num_tp = readl(fei->io + SYS_CFG_NUM_TP);
392
393 dev_info(fei->dev, "C8SECTPFE hw supports the following:\n");
394 dev_info(fei->dev, "Input Blocks: %d\n", hw->num_ib);
395 dev_info(fei->dev, "Merged Input Blocks: %d\n", hw->num_mib);
396 dev_info(fei->dev, "Software Transport Stream Inputs: %d\n"
397 , hw->num_swts);
398 dev_info(fei->dev, "Transport Stream Output: %d\n", hw->num_tsout);
399 dev_info(fei->dev, "Cable Card Converter: %d\n", hw->num_ccsc);
400 dev_info(fei->dev, "RAMs supported by C8SECTPFE: %d\n", hw->num_ram);
401 dev_info(fei->dev, "Tango TPs supported by C8SECTPFE: %d\n"
402 , hw->num_tp);
403 }
404
c8sectpfe_idle_irq_handler(int irq,void * priv)405 static irqreturn_t c8sectpfe_idle_irq_handler(int irq, void *priv)
406 {
407 struct c8sectpfei *fei = priv;
408 struct channel_info *chan;
409 int bit;
410 unsigned long tmp = readl(fei->io + DMA_IDLE_REQ);
411
412 /* page 168 of functional spec: Clear the idle request
413 by writing 0 to the C8SECTPFE_DMA_IDLE_REQ register. */
414
415 /* signal idle completion */
416 for_each_set_bit(bit, &tmp, fei->hw_stats.num_ib) {
417
418 chan = find_channel(fei, bit);
419
420 if (chan)
421 complete(&chan->idle_completion);
422 }
423
424 writel(0, fei->io + DMA_IDLE_REQ);
425
426 return IRQ_HANDLED;
427 }
428
429
free_input_block(struct c8sectpfei * fei,struct channel_info * tsin)430 static void free_input_block(struct c8sectpfei *fei, struct channel_info *tsin)
431 {
432 if (!fei || !tsin)
433 return;
434
435 if (tsin->back_buffer_busaddr)
436 if (!dma_mapping_error(fei->dev, tsin->back_buffer_busaddr))
437 dma_unmap_single(fei->dev, tsin->back_buffer_busaddr,
438 FEI_BUFFER_SIZE, DMA_BIDIRECTIONAL);
439
440 kfree(tsin->back_buffer_start);
441
442 if (tsin->pid_buffer_busaddr)
443 if (!dma_mapping_error(fei->dev, tsin->pid_buffer_busaddr))
444 dma_unmap_single(fei->dev, tsin->pid_buffer_busaddr,
445 PID_TABLE_SIZE, DMA_BIDIRECTIONAL);
446
447 kfree(tsin->pid_buffer_start);
448 }
449
450 #define MAX_NAME 20
451
configure_memdma_and_inputblock(struct c8sectpfei * fei,struct channel_info * tsin)452 static int configure_memdma_and_inputblock(struct c8sectpfei *fei,
453 struct channel_info *tsin)
454 {
455 int ret;
456 u32 tmp;
457 char tsin_pin_name[MAX_NAME];
458
459 if (!fei || !tsin)
460 return -EINVAL;
461
462 dev_dbg(fei->dev, "%s:%d Configuring channel=%p tsin=%d\n"
463 , __func__, __LINE__, tsin, tsin->tsin_id);
464
465 init_completion(&tsin->idle_completion);
466
467 tsin->back_buffer_start = kzalloc(FEI_BUFFER_SIZE +
468 FEI_ALIGNMENT, GFP_KERNEL);
469
470 if (!tsin->back_buffer_start) {
471 ret = -ENOMEM;
472 goto err_unmap;
473 }
474
475 /* Ensure backbuffer is 32byte aligned */
476 tsin->back_buffer_aligned = tsin->back_buffer_start
477 + FEI_ALIGNMENT;
478
479 tsin->back_buffer_aligned = (void *)
480 (((uintptr_t) tsin->back_buffer_aligned) & ~0x1F);
481
482 tsin->back_buffer_busaddr = dma_map_single(fei->dev,
483 (void *)tsin->back_buffer_aligned,
484 FEI_BUFFER_SIZE,
485 DMA_BIDIRECTIONAL);
486
487 if (dma_mapping_error(fei->dev, tsin->back_buffer_busaddr)) {
488 dev_err(fei->dev, "failed to map back_buffer\n");
489 ret = -EFAULT;
490 goto err_unmap;
491 }
492
493 /*
494 * The pid buffer can be configured (in hw) for byte or bit
495 * per pid. By powers of deduction we conclude stih407 family
496 * is configured (at SoC design stage) for bit per pid.
497 */
498 tsin->pid_buffer_start = kzalloc(2048, GFP_KERNEL);
499
500 if (!tsin->pid_buffer_start) {
501 ret = -ENOMEM;
502 goto err_unmap;
503 }
504
505 /*
506 * PID buffer needs to be aligned to size of the pid table
507 * which at bit per pid is 1024 bytes (8192 pids / 8).
508 * PIDF_BASE register enforces this alignment when writing
509 * the register.
510 */
511
512 tsin->pid_buffer_aligned = tsin->pid_buffer_start +
513 PID_TABLE_SIZE;
514
515 tsin->pid_buffer_aligned = (void *)
516 (((uintptr_t) tsin->pid_buffer_aligned) & ~0x3ff);
517
518 tsin->pid_buffer_busaddr = dma_map_single(fei->dev,
519 tsin->pid_buffer_aligned,
520 PID_TABLE_SIZE,
521 DMA_BIDIRECTIONAL);
522
523 if (dma_mapping_error(fei->dev, tsin->pid_buffer_busaddr)) {
524 dev_err(fei->dev, "failed to map pid_bitmap\n");
525 ret = -EFAULT;
526 goto err_unmap;
527 }
528
529 /* manage cache so pid bitmap is visible to HW */
530 dma_sync_single_for_device(fei->dev,
531 tsin->pid_buffer_busaddr,
532 PID_TABLE_SIZE,
533 DMA_TO_DEVICE);
534
535 snprintf(tsin_pin_name, MAX_NAME, "tsin%d-%s", tsin->tsin_id,
536 (tsin->serial_not_parallel ? "serial" : "parallel"));
537
538 tsin->pstate = pinctrl_lookup_state(fei->pinctrl, tsin_pin_name);
539 if (IS_ERR(tsin->pstate)) {
540 dev_err(fei->dev, "%s: pinctrl_lookup_state couldn't find %s state\n"
541 , __func__, tsin_pin_name);
542 ret = PTR_ERR(tsin->pstate);
543 goto err_unmap;
544 }
545
546 ret = pinctrl_select_state(fei->pinctrl, tsin->pstate);
547
548 if (ret) {
549 dev_err(fei->dev, "%s: pinctrl_select_state failed\n"
550 , __func__);
551 goto err_unmap;
552 }
553
554 /* Enable this input block */
555 tmp = readl(fei->io + SYS_INPUT_CLKEN);
556 tmp |= BIT(tsin->tsin_id);
557 writel(tmp, fei->io + SYS_INPUT_CLKEN);
558
559 if (tsin->serial_not_parallel)
560 tmp |= C8SECTPFE_SERIAL_NOT_PARALLEL;
561
562 if (tsin->invert_ts_clk)
563 tmp |= C8SECTPFE_INVERT_TSCLK;
564
565 if (tsin->async_not_sync)
566 tmp |= C8SECTPFE_ASYNC_NOT_SYNC;
567
568 tmp |= C8SECTPFE_ALIGN_BYTE_SOP | C8SECTPFE_BYTE_ENDIANNESS_MSB;
569
570 writel(tmp, fei->io + C8SECTPFE_IB_IP_FMT_CFG(tsin->tsin_id));
571
572 writel(C8SECTPFE_SYNC(0x9) |
573 C8SECTPFE_DROP(0x9) |
574 C8SECTPFE_TOKEN(0x47),
575 fei->io + C8SECTPFE_IB_SYNCLCKDRP_CFG(tsin->tsin_id));
576
577 writel(TS_PKT_SIZE, fei->io + C8SECTPFE_IB_PKT_LEN(tsin->tsin_id));
578
579 /* Place the FIFO's at the end of the irec descriptors */
580
581 tsin->fifo = (tsin->tsin_id * FIFO_LEN);
582
583 writel(tsin->fifo, fei->io + C8SECTPFE_IB_BUFF_STRT(tsin->tsin_id));
584 writel(tsin->fifo + FIFO_LEN - 1,
585 fei->io + C8SECTPFE_IB_BUFF_END(tsin->tsin_id));
586
587 writel(tsin->fifo, fei->io + C8SECTPFE_IB_READ_PNT(tsin->tsin_id));
588 writel(tsin->fifo, fei->io + C8SECTPFE_IB_WRT_PNT(tsin->tsin_id));
589
590 writel(tsin->pid_buffer_busaddr,
591 fei->io + PIDF_BASE(tsin->tsin_id));
592
593 dev_info(fei->dev, "chan=%d PIDF_BASE=0x%x pid_bus_addr=%pad\n",
594 tsin->tsin_id, readl(fei->io + PIDF_BASE(tsin->tsin_id)),
595 &tsin->pid_buffer_busaddr);
596
597 /* Configure and enable HW PID filtering */
598
599 /*
600 * The PID value is created by assembling the first 8 bytes of
601 * the TS packet into a 64-bit word in big-endian format. A
602 * slice of that 64-bit word is taken from
603 * (PID_OFFSET+PID_NUM_BITS-1) to PID_OFFSET.
604 */
605 tmp = (C8SECTPFE_PID_ENABLE | C8SECTPFE_PID_NUMBITS(13)
606 | C8SECTPFE_PID_OFFSET(40));
607
608 writel(tmp, fei->io + C8SECTPFE_IB_PID_SET(tsin->tsin_id));
609
610 dev_dbg(fei->dev, "chan=%d setting wp: %d, rp: %d, buf: %d-%d\n",
611 tsin->tsin_id,
612 readl(fei->io + C8SECTPFE_IB_WRT_PNT(tsin->tsin_id)),
613 readl(fei->io + C8SECTPFE_IB_READ_PNT(tsin->tsin_id)),
614 readl(fei->io + C8SECTPFE_IB_BUFF_STRT(tsin->tsin_id)),
615 readl(fei->io + C8SECTPFE_IB_BUFF_END(tsin->tsin_id)));
616
617 /* Get base addpress of pointer record block from DMEM */
618 tsin->irec = fei->io + DMA_MEMDMA_OFFSET + DMA_DMEM_OFFSET +
619 readl(fei->io + DMA_PTRREC_BASE);
620
621 /* fill out pointer record data structure */
622
623 /* advance pointer record block to our channel */
624 tsin->irec += (tsin->tsin_id * DMA_PRDS_SIZE);
625
626 writel(tsin->fifo, tsin->irec + DMA_PRDS_MEMBASE);
627
628 writel(tsin->fifo + FIFO_LEN - 1, tsin->irec + DMA_PRDS_MEMTOP);
629
630 writel((188 + 7)&~7, tsin->irec + DMA_PRDS_PKTSIZE);
631
632 writel(0x1, tsin->irec + DMA_PRDS_TPENABLE);
633
634 /* read/write pointers with physical bus address */
635
636 writel(tsin->back_buffer_busaddr, tsin->irec + DMA_PRDS_BUSBASE_TP(0));
637
638 tmp = tsin->back_buffer_busaddr + FEI_BUFFER_SIZE - 1;
639 writel(tmp, tsin->irec + DMA_PRDS_BUSTOP_TP(0));
640
641 writel(tsin->back_buffer_busaddr, tsin->irec + DMA_PRDS_BUSWP_TP(0));
642 writel(tsin->back_buffer_busaddr, tsin->irec + DMA_PRDS_BUSRP_TP(0));
643
644 /* initialize tasklet */
645 tasklet_init(&tsin->tsklet, channel_swdemux_tsklet,
646 (unsigned long) tsin);
647
648 return 0;
649
650 err_unmap:
651 free_input_block(fei, tsin);
652 return ret;
653 }
654
c8sectpfe_error_irq_handler(int irq,void * priv)655 static irqreturn_t c8sectpfe_error_irq_handler(int irq, void *priv)
656 {
657 struct c8sectpfei *fei = priv;
658
659 dev_err(fei->dev, "%s: error handling not yet implemented\n"
660 , __func__);
661
662 /*
663 * TODO FIXME we should detect some error conditions here
664 * and ideally so something about them!
665 */
666
667 return IRQ_HANDLED;
668 }
669
c8sectpfe_probe(struct platform_device * pdev)670 static int c8sectpfe_probe(struct platform_device *pdev)
671 {
672 struct device *dev = &pdev->dev;
673 struct device_node *child, *np = dev->of_node;
674 struct c8sectpfei *fei;
675 struct resource *res;
676 int ret, index = 0;
677 struct channel_info *tsin;
678
679 /* Allocate the c8sectpfei structure */
680 fei = devm_kzalloc(dev, sizeof(struct c8sectpfei), GFP_KERNEL);
681 if (!fei)
682 return -ENOMEM;
683
684 fei->dev = dev;
685
686 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "c8sectpfe");
687 fei->io = devm_ioremap_resource(dev, res);
688 if (IS_ERR(fei->io))
689 return PTR_ERR(fei->io);
690
691 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
692 "c8sectpfe-ram");
693 fei->sram = devm_ioremap_resource(dev, res);
694 if (IS_ERR(fei->sram))
695 return PTR_ERR(fei->sram);
696
697 fei->sram_size = res->end - res->start;
698
699 fei->idle_irq = platform_get_irq_byname(pdev, "c8sectpfe-idle-irq");
700 if (fei->idle_irq < 0) {
701 dev_err(dev, "Can't get c8sectpfe-idle-irq\n");
702 return fei->idle_irq;
703 }
704
705 fei->error_irq = platform_get_irq_byname(pdev, "c8sectpfe-error-irq");
706 if (fei->error_irq < 0) {
707 dev_err(dev, "Can't get c8sectpfe-error-irq\n");
708 return fei->error_irq;
709 }
710
711 platform_set_drvdata(pdev, fei);
712
713 fei->c8sectpfeclk = devm_clk_get(dev, "c8sectpfe");
714 if (IS_ERR(fei->c8sectpfeclk)) {
715 dev_err(dev, "c8sectpfe clk not found\n");
716 return PTR_ERR(fei->c8sectpfeclk);
717 }
718
719 ret = clk_prepare_enable(fei->c8sectpfeclk);
720 if (ret) {
721 dev_err(dev, "Failed to enable c8sectpfe clock\n");
722 return ret;
723 }
724
725 /* to save power disable all IP's (on by default) */
726 writel(0, fei->io + SYS_INPUT_CLKEN);
727
728 /* Enable memdma clock */
729 writel(MEMDMAENABLE, fei->io + SYS_OTHER_CLKEN);
730
731 /* clear internal sram */
732 memset_io(fei->sram, 0x0, fei->sram_size);
733
734 c8sectpfe_getconfig(fei);
735
736 ret = devm_request_irq(dev, fei->idle_irq, c8sectpfe_idle_irq_handler,
737 0, "c8sectpfe-idle-irq", fei);
738 if (ret) {
739 dev_err(dev, "Can't register c8sectpfe-idle-irq IRQ.\n");
740 goto err_clk_disable;
741 }
742
743 ret = devm_request_irq(dev, fei->error_irq,
744 c8sectpfe_error_irq_handler, 0,
745 "c8sectpfe-error-irq", fei);
746 if (ret) {
747 dev_err(dev, "Can't register c8sectpfe-error-irq IRQ.\n");
748 goto err_clk_disable;
749 }
750
751 fei->tsin_count = of_get_child_count(np);
752
753 if (fei->tsin_count > C8SECTPFE_MAX_TSIN_CHAN ||
754 fei->tsin_count > fei->hw_stats.num_ib) {
755
756 dev_err(dev, "More tsin declared than exist on SoC!\n");
757 ret = -EINVAL;
758 goto err_clk_disable;
759 }
760
761 fei->pinctrl = devm_pinctrl_get(dev);
762
763 if (IS_ERR(fei->pinctrl)) {
764 dev_err(dev, "Error getting tsin pins\n");
765 ret = PTR_ERR(fei->pinctrl);
766 goto err_clk_disable;
767 }
768
769 for_each_child_of_node(np, child) {
770 struct device_node *i2c_bus;
771
772 fei->channel_data[index] = devm_kzalloc(dev,
773 sizeof(struct channel_info),
774 GFP_KERNEL);
775
776 if (!fei->channel_data[index]) {
777 ret = -ENOMEM;
778 goto err_clk_disable;
779 }
780
781 tsin = fei->channel_data[index];
782
783 tsin->fei = fei;
784
785 ret = of_property_read_u32(child, "tsin-num", &tsin->tsin_id);
786 if (ret) {
787 dev_err(&pdev->dev, "No tsin_num found\n");
788 goto err_clk_disable;
789 }
790
791 /* sanity check value */
792 if (tsin->tsin_id > fei->hw_stats.num_ib) {
793 dev_err(&pdev->dev,
794 "tsin-num %d specified greater than number\n\t"
795 "of input block hw in SoC! (%d)",
796 tsin->tsin_id, fei->hw_stats.num_ib);
797 ret = -EINVAL;
798 goto err_clk_disable;
799 }
800
801 tsin->invert_ts_clk = of_property_read_bool(child,
802 "invert-ts-clk");
803
804 tsin->serial_not_parallel = of_property_read_bool(child,
805 "serial-not-parallel");
806
807 tsin->async_not_sync = of_property_read_bool(child,
808 "async-not-sync");
809
810 ret = of_property_read_u32(child, "dvb-card",
811 &tsin->dvb_card);
812 if (ret) {
813 dev_err(&pdev->dev, "No dvb-card found\n");
814 goto err_clk_disable;
815 }
816
817 i2c_bus = of_parse_phandle(child, "i2c-bus", 0);
818 if (!i2c_bus) {
819 dev_err(&pdev->dev, "No i2c-bus found\n");
820 goto err_clk_disable;
821 }
822 tsin->i2c_adapter =
823 of_find_i2c_adapter_by_node(i2c_bus);
824 if (!tsin->i2c_adapter) {
825 dev_err(&pdev->dev, "No i2c adapter found\n");
826 of_node_put(i2c_bus);
827 goto err_clk_disable;
828 }
829 of_node_put(i2c_bus);
830
831 tsin->rst_gpio = of_get_named_gpio(child, "rst-gpio", 0);
832
833 ret = gpio_is_valid(tsin->rst_gpio);
834 if (!ret) {
835 dev_err(dev,
836 "reset gpio for tsin%d not valid (gpio=%d)\n",
837 tsin->tsin_id, tsin->rst_gpio);
838 goto err_clk_disable;
839 }
840
841 ret = devm_gpio_request_one(dev, tsin->rst_gpio,
842 GPIOF_OUT_INIT_LOW, "NIM reset");
843 if (ret && ret != -EBUSY) {
844 dev_err(dev, "Can't request tsin%d reset gpio\n"
845 , fei->channel_data[index]->tsin_id);
846 goto err_clk_disable;
847 }
848
849 if (!ret) {
850 /* toggle reset lines */
851 gpio_direction_output(tsin->rst_gpio, 0);
852 usleep_range(3500, 5000);
853 gpio_direction_output(tsin->rst_gpio, 1);
854 usleep_range(3000, 5000);
855 }
856
857 tsin->demux_mapping = index;
858
859 dev_dbg(fei->dev,
860 "channel=%p n=%d tsin_num=%d, invert-ts-clk=%d\n\t"
861 "serial-not-parallel=%d pkt-clk-valid=%d dvb-card=%d\n",
862 fei->channel_data[index], index,
863 tsin->tsin_id, tsin->invert_ts_clk,
864 tsin->serial_not_parallel, tsin->async_not_sync,
865 tsin->dvb_card);
866
867 index++;
868 }
869
870 /* Setup timer interrupt */
871 init_timer(&fei->timer);
872 fei->timer.function = c8sectpfe_timer_interrupt;
873 fei->timer.data = (unsigned long)fei;
874
875 mutex_init(&fei->lock);
876
877 /* Get the configuration information about the tuners */
878 ret = c8sectpfe_tuner_register_frontend(&fei->c8sectpfe[0],
879 (void *)fei,
880 c8sectpfe_start_feed,
881 c8sectpfe_stop_feed);
882 if (ret) {
883 dev_err(dev, "c8sectpfe_tuner_register_frontend failed (%d)\n",
884 ret);
885 goto err_clk_disable;
886 }
887
888 c8sectpfe_debugfs_init(fei);
889
890 return 0;
891
892 err_clk_disable:
893 /* TODO uncomment when upstream has taken a reference on this clk */
894 /*clk_disable_unprepare(fei->c8sectpfeclk);*/
895 return ret;
896 }
897
c8sectpfe_remove(struct platform_device * pdev)898 static int c8sectpfe_remove(struct platform_device *pdev)
899 {
900 struct c8sectpfei *fei = platform_get_drvdata(pdev);
901 struct channel_info *channel;
902 int i;
903
904 wait_for_completion(&fei->fw_ack);
905
906 c8sectpfe_tuner_unregister_frontend(fei->c8sectpfe[0], fei);
907
908 /*
909 * Now loop through and un-configure each of the InputBlock resources
910 */
911 for (i = 0; i < fei->tsin_count; i++) {
912 channel = fei->channel_data[i];
913 free_input_block(fei, channel);
914 }
915
916 c8sectpfe_debugfs_exit(fei);
917
918 dev_info(fei->dev, "Stopping memdma SLIM core\n");
919 if (readl(fei->io + DMA_CPU_RUN))
920 writel(0x0, fei->io + DMA_CPU_RUN);
921
922 /* unclock all internal IP's */
923 if (readl(fei->io + SYS_INPUT_CLKEN))
924 writel(0, fei->io + SYS_INPUT_CLKEN);
925
926 if (readl(fei->io + SYS_OTHER_CLKEN))
927 writel(0, fei->io + SYS_OTHER_CLKEN);
928
929 /* TODO uncomment when upstream has taken a reference on this clk */
930 /*
931 if (fei->c8sectpfeclk)
932 clk_disable_unprepare(fei->c8sectpfeclk);
933 */
934
935 return 0;
936 }
937
938
configure_channels(struct c8sectpfei * fei)939 static int configure_channels(struct c8sectpfei *fei)
940 {
941 int index = 0, ret;
942 struct channel_info *tsin;
943 struct device_node *child, *np = fei->dev->of_node;
944
945 /* iterate round each tsin and configure memdma descriptor and IB hw */
946 for_each_child_of_node(np, child) {
947
948 tsin = fei->channel_data[index];
949
950 ret = configure_memdma_and_inputblock(fei,
951 fei->channel_data[index]);
952
953 if (ret) {
954 dev_err(fei->dev,
955 "configure_memdma_and_inputblock failed\n");
956 goto err_unmap;
957 }
958 index++;
959 }
960
961 return 0;
962
963 err_unmap:
964 for (index = 0; index < fei->tsin_count; index++) {
965 tsin = fei->channel_data[index];
966 free_input_block(fei, tsin);
967 }
968 return ret;
969 }
970
971 static int
c8sectpfe_elf_sanity_check(struct c8sectpfei * fei,const struct firmware * fw)972 c8sectpfe_elf_sanity_check(struct c8sectpfei *fei, const struct firmware *fw)
973 {
974 struct elf32_hdr *ehdr;
975 char class;
976
977 if (!fw) {
978 dev_err(fei->dev, "failed to load %s\n", FIRMWARE_MEMDMA);
979 return -EINVAL;
980 }
981
982 if (fw->size < sizeof(struct elf32_hdr)) {
983 dev_err(fei->dev, "Image is too small\n");
984 return -EINVAL;
985 }
986
987 ehdr = (struct elf32_hdr *)fw->data;
988
989 /* We only support ELF32 at this point */
990 class = ehdr->e_ident[EI_CLASS];
991 if (class != ELFCLASS32) {
992 dev_err(fei->dev, "Unsupported class: %d\n", class);
993 return -EINVAL;
994 }
995
996 if (ehdr->e_ident[EI_DATA] != ELFDATA2LSB) {
997 dev_err(fei->dev, "Unsupported firmware endianness\n");
998 return -EINVAL;
999 }
1000
1001 if (fw->size < ehdr->e_shoff + sizeof(struct elf32_shdr)) {
1002 dev_err(fei->dev, "Image is too small\n");
1003 return -EINVAL;
1004 }
1005
1006 if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG)) {
1007 dev_err(fei->dev, "Image is corrupted (bad magic)\n");
1008 return -EINVAL;
1009 }
1010
1011 /* Check ELF magic */
1012 ehdr = (Elf32_Ehdr *)fw->data;
1013 if (ehdr->e_ident[EI_MAG0] != ELFMAG0 ||
1014 ehdr->e_ident[EI_MAG1] != ELFMAG1 ||
1015 ehdr->e_ident[EI_MAG2] != ELFMAG2 ||
1016 ehdr->e_ident[EI_MAG3] != ELFMAG3) {
1017 dev_err(fei->dev, "Invalid ELF magic\n");
1018 return -EINVAL;
1019 }
1020
1021 if (ehdr->e_type != ET_EXEC) {
1022 dev_err(fei->dev, "Unsupported ELF header type\n");
1023 return -EINVAL;
1024 }
1025
1026 if (ehdr->e_phoff > fw->size) {
1027 dev_err(fei->dev, "Firmware size is too small\n");
1028 return -EINVAL;
1029 }
1030
1031 return 0;
1032 }
1033
1034
load_imem_segment(struct c8sectpfei * fei,Elf32_Phdr * phdr,const struct firmware * fw,u8 __iomem * dest,int seg_num)1035 static void load_imem_segment(struct c8sectpfei *fei, Elf32_Phdr *phdr,
1036 const struct firmware *fw, u8 __iomem *dest,
1037 int seg_num)
1038 {
1039 const u8 *imem_src = fw->data + phdr->p_offset;
1040 int i;
1041
1042 /*
1043 * For IMEM segments, the segment contains 24-bit
1044 * instructions which must be padded to 32-bit
1045 * instructions before being written. The written
1046 * segment is padded with NOP instructions.
1047 */
1048
1049 dev_dbg(fei->dev,
1050 "Loading IMEM segment %d 0x%08x\n\t"
1051 " (0x%x bytes) -> 0x%p (0x%x bytes)\n", seg_num,
1052 phdr->p_paddr, phdr->p_filesz,
1053 dest, phdr->p_memsz + phdr->p_memsz / 3);
1054
1055 for (i = 0; i < phdr->p_filesz; i++) {
1056
1057 writeb(readb((void __iomem *)imem_src), (void __iomem *)dest);
1058
1059 /* Every 3 bytes, add an additional
1060 * padding zero in destination */
1061 if (i % 3 == 2) {
1062 dest++;
1063 writeb(0x00, (void __iomem *)dest);
1064 }
1065
1066 dest++;
1067 imem_src++;
1068 }
1069 }
1070
load_dmem_segment(struct c8sectpfei * fei,Elf32_Phdr * phdr,const struct firmware * fw,u8 __iomem * dst,int seg_num)1071 static void load_dmem_segment(struct c8sectpfei *fei, Elf32_Phdr *phdr,
1072 const struct firmware *fw, u8 __iomem *dst, int seg_num)
1073 {
1074 /*
1075 * For DMEM segments copy the segment data from the ELF
1076 * file and pad segment with zeroes
1077 */
1078
1079 dev_dbg(fei->dev,
1080 "Loading DMEM segment %d 0x%08x\n\t"
1081 "(0x%x bytes) -> 0x%p (0x%x bytes)\n",
1082 seg_num, phdr->p_paddr, phdr->p_filesz,
1083 dst, phdr->p_memsz);
1084
1085 memcpy((void __force *)dst, (void *)fw->data + phdr->p_offset,
1086 phdr->p_filesz);
1087
1088 memset((void __force *)dst + phdr->p_filesz, 0,
1089 phdr->p_memsz - phdr->p_filesz);
1090 }
1091
load_slim_core_fw(const struct firmware * fw,struct c8sectpfei * fei)1092 static int load_slim_core_fw(const struct firmware *fw, struct c8sectpfei *fei)
1093 {
1094 Elf32_Ehdr *ehdr;
1095 Elf32_Phdr *phdr;
1096 u8 __iomem *dst;
1097 int err = 0, i;
1098
1099 if (!fw || !fei)
1100 return -EINVAL;
1101
1102 ehdr = (Elf32_Ehdr *)fw->data;
1103 phdr = (Elf32_Phdr *)(fw->data + ehdr->e_phoff);
1104
1105 /* go through the available ELF segments */
1106 for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
1107
1108 /* Only consider LOAD segments */
1109 if (phdr->p_type != PT_LOAD)
1110 continue;
1111
1112 /*
1113 * Check segment is contained within the fw->data buffer
1114 */
1115 if (phdr->p_offset + phdr->p_filesz > fw->size) {
1116 dev_err(fei->dev,
1117 "Segment %d is outside of firmware file\n", i);
1118 err = -EINVAL;
1119 break;
1120 }
1121
1122 /*
1123 * MEMDMA IMEM has executable flag set, otherwise load
1124 * this segment into DMEM.
1125 *
1126 */
1127
1128 if (phdr->p_flags & PF_X) {
1129 dst = (u8 __iomem *) fei->io + DMA_MEMDMA_IMEM;
1130 /*
1131 * The Slim ELF file uses 32-bit word addressing for
1132 * load offsets.
1133 */
1134 dst += (phdr->p_paddr & 0xFFFFF) * sizeof(unsigned int);
1135 load_imem_segment(fei, phdr, fw, dst, i);
1136 } else {
1137 dst = (u8 __iomem *) fei->io + DMA_MEMDMA_DMEM;
1138 /*
1139 * The Slim ELF file uses 32-bit word addressing for
1140 * load offsets.
1141 */
1142 dst += (phdr->p_paddr & 0xFFFFF) * sizeof(unsigned int);
1143 load_dmem_segment(fei, phdr, fw, dst, i);
1144 }
1145 }
1146
1147 release_firmware(fw);
1148 return err;
1149 }
1150
load_c8sectpfe_fw(struct c8sectpfei * fei)1151 static int load_c8sectpfe_fw(struct c8sectpfei *fei)
1152 {
1153 const struct firmware *fw;
1154 int err;
1155
1156 dev_info(fei->dev, "Loading firmware: %s\n", FIRMWARE_MEMDMA);
1157
1158 err = request_firmware(&fw, FIRMWARE_MEMDMA, fei->dev);
1159 if (err)
1160 return err;
1161
1162 err = c8sectpfe_elf_sanity_check(fei, fw);
1163 if (err) {
1164 dev_err(fei->dev, "c8sectpfe_elf_sanity_check failed err=(%d)\n"
1165 , err);
1166 return err;
1167 }
1168
1169 err = load_slim_core_fw(fw, fei);
1170 if (err) {
1171 dev_err(fei->dev, "load_slim_core_fw failed err=(%d)\n", err);
1172 return err;
1173 }
1174
1175 /* now the firmware is loaded configure the input blocks */
1176 err = configure_channels(fei);
1177 if (err) {
1178 dev_err(fei->dev, "configure_channels failed err=(%d)\n", err);
1179 return err;
1180 }
1181
1182 /*
1183 * STBus target port can access IMEM and DMEM ports
1184 * without waiting for CPU
1185 */
1186 writel(0x1, fei->io + DMA_PER_STBUS_SYNC);
1187
1188 dev_info(fei->dev, "Boot the memdma SLIM core\n");
1189 writel(0x1, fei->io + DMA_CPU_RUN);
1190
1191 atomic_set(&fei->fw_loaded, 1);
1192
1193 return 0;
1194 }
1195
1196 static const struct of_device_id c8sectpfe_match[] = {
1197 { .compatible = "st,stih407-c8sectpfe" },
1198 { /* sentinel */ },
1199 };
1200 MODULE_DEVICE_TABLE(of, c8sectpfe_match);
1201
1202 static struct platform_driver c8sectpfe_driver = {
1203 .driver = {
1204 .name = "c8sectpfe",
1205 .of_match_table = of_match_ptr(c8sectpfe_match),
1206 },
1207 .probe = c8sectpfe_probe,
1208 .remove = c8sectpfe_remove,
1209 };
1210
1211 module_platform_driver(c8sectpfe_driver);
1212
1213 MODULE_AUTHOR("Peter Bennett <peter.bennett@st.com>");
1214 MODULE_AUTHOR("Peter Griffin <peter.griffin@linaro.org>");
1215 MODULE_DESCRIPTION("C8SECTPFE STi DVB Driver");
1216 MODULE_LICENSE("GPL");
1217