• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * dim2_hal.c - DIM2 HAL implementation
3  * (MediaLB, Device Interface Macro IP, OS62420)
4  *
5  * Copyright (C) 2015, Microchip Technology Germany II GmbH & Co. KG
6  *
7  * This program is distributed in the hope that it will be useful,
8  * but WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
10  * GNU General Public License for more details.
11  *
12  * This file is licensed under GPLv2.
13  */
14 
15 /* Author: Andrey Shvetsov <andrey.shvetsov@k2l.de> */
16 
17 #include "dim2_hal.h"
18 #include "dim2_errors.h"
19 #include "dim2_reg.h"
20 #include <linux/stddef.h>
21 
22 /*
23  * The number of frames per sub-buffer for synchronous channels.
24  * Allowed values: 1, 2, 4, 8, 16, 32, 64.
25  */
26 #define FRAMES_PER_SUBBUFF 16
27 
28 /*
29  * Size factor for synchronous DBR buffer.
30  * Minimal value is 4*FRAMES_PER_SUBBUFF.
31  */
32 #define SYNC_DBR_FACTOR (4u * (u16)FRAMES_PER_SUBBUFF)
33 
34 /*
35  * Size factor for isochronous DBR buffer.
36  * Minimal value is 3.
37  */
38 #define ISOC_DBR_FACTOR 3u
39 
40 /*
41  * Number of 32-bit units for DBR map.
42  *
43  * 1: block size is 512, max allocation is 16K
44  * 2: block size is 256, max allocation is 8K
45  * 4: block size is 128, max allocation is 4K
46  * 8: block size is 64, max allocation is 2K
47  *
48  * Min allocated space is block size.
49  * Max possible allocated space is 32 blocks.
50  */
51 #define DBR_MAP_SIZE 2
52 
53 /* -------------------------------------------------------------------------- */
54 /* not configurable area */
55 
56 #define CDT 0x00
57 #define ADT 0x40
58 #define MLB_CAT 0x80
59 #define AHB_CAT 0x88
60 
61 #define DBR_SIZE  (16 * 1024) /* specified by IP */
62 #define DBR_BLOCK_SIZE  (DBR_SIZE / 32 / DBR_MAP_SIZE)
63 
64 /* -------------------------------------------------------------------------- */
65 /* generic helper functions and macros */
66 
67 #define MLBC0_FCNT_VAL_MACRO(n) MLBC0_FCNT_VAL_ ## n ## FPSB
68 #define MLBC0_FCNT_VAL(fpsb) MLBC0_FCNT_VAL_MACRO(fpsb)
69 
bit_mask(u8 position)70 static inline u32 bit_mask(u8 position)
71 {
72 	return (u32)1 << position;
73 }
74 
dim_on_error(u8 error_id,const char * error_message)75 static inline bool dim_on_error(u8 error_id, const char *error_message)
76 {
77 	DIMCB_OnError(error_id, error_message);
78 	return false;
79 }
80 
81 /* -------------------------------------------------------------------------- */
82 /* types and local variables */
83 
84 struct lld_global_vars_t {
85 	bool dim_is_initialized;
86 	bool mcm_is_initialized;
87 	struct dim2_regs *dim2; /* DIM2 core base address */
88 	u32 dbr_map[DBR_MAP_SIZE];
89 };
90 
91 static struct lld_global_vars_t g = { false };
92 
93 /* -------------------------------------------------------------------------- */
94 
dbr_get_mask_size(u16 size)95 static int dbr_get_mask_size(u16 size)
96 {
97 	int i;
98 
99 	for (i = 0; i < 6; i++)
100 		if (size <= (DBR_BLOCK_SIZE << i))
101 			return 1 << i;
102 	return 0;
103 }
104 
105 /**
106  * Allocates DBR memory.
107  * @param size Allocating memory size.
108  * @return Offset in DBR memory by success or DBR_SIZE if out of memory.
109  */
alloc_dbr(u16 size)110 static int alloc_dbr(u16 size)
111 {
112 	int mask_size;
113 	int i, block_idx = 0;
114 
115 	if (size <= 0)
116 		return DBR_SIZE; /* out of memory */
117 
118 	mask_size = dbr_get_mask_size(size);
119 	if (mask_size == 0)
120 		return DBR_SIZE; /* out of memory */
121 
122 	for (i = 0; i < DBR_MAP_SIZE; i++) {
123 		u32 const blocks = (size + DBR_BLOCK_SIZE - 1) / DBR_BLOCK_SIZE;
124 		u32 mask = ~((~(u32)0) << blocks);
125 
126 		do {
127 			if ((g.dbr_map[i] & mask) == 0) {
128 				g.dbr_map[i] |= mask;
129 				return block_idx * DBR_BLOCK_SIZE;
130 			}
131 			block_idx += mask_size;
132 			/* do shift left with 2 steps in case mask_size == 32 */
133 			mask <<= mask_size - 1;
134 		} while ((mask <<= 1) != 0);
135 	}
136 
137 	return DBR_SIZE; /* out of memory */
138 }
139 
free_dbr(int offs,int size)140 static void free_dbr(int offs, int size)
141 {
142 	int block_idx = offs / DBR_BLOCK_SIZE;
143 	u32 const blocks = (size + DBR_BLOCK_SIZE - 1) / DBR_BLOCK_SIZE;
144 	u32 mask = ~((~(u32)0) << blocks);
145 
146 	mask <<= block_idx % 32;
147 	g.dbr_map[block_idx / 32] &= ~mask;
148 }
149 
150 /* -------------------------------------------------------------------------- */
151 
dim2_read_ctr(u32 ctr_addr,u16 mdat_idx)152 static u32 dim2_read_ctr(u32 ctr_addr, u16 mdat_idx)
153 {
154 	DIMCB_IoWrite(&g.dim2->MADR, ctr_addr);
155 
156 	/* wait till transfer is completed */
157 	while ((DIMCB_IoRead(&g.dim2->MCTL) & 1) != 1)
158 		continue;
159 
160 	DIMCB_IoWrite(&g.dim2->MCTL, 0);   /* clear transfer complete */
161 
162 	return DIMCB_IoRead((&g.dim2->MDAT0) + mdat_idx);
163 }
164 
dim2_write_ctr_mask(u32 ctr_addr,const u32 * mask,const u32 * value)165 static void dim2_write_ctr_mask(u32 ctr_addr, const u32 *mask, const u32 *value)
166 {
167 	enum { MADR_WNR_BIT = 31 };
168 
169 	DIMCB_IoWrite(&g.dim2->MCTL, 0);   /* clear transfer complete */
170 
171 	if (mask[0] != 0)
172 		DIMCB_IoWrite(&g.dim2->MDAT0, value[0]);
173 	if (mask[1] != 0)
174 		DIMCB_IoWrite(&g.dim2->MDAT1, value[1]);
175 	if (mask[2] != 0)
176 		DIMCB_IoWrite(&g.dim2->MDAT2, value[2]);
177 	if (mask[3] != 0)
178 		DIMCB_IoWrite(&g.dim2->MDAT3, value[3]);
179 
180 	DIMCB_IoWrite(&g.dim2->MDWE0, mask[0]);
181 	DIMCB_IoWrite(&g.dim2->MDWE1, mask[1]);
182 	DIMCB_IoWrite(&g.dim2->MDWE2, mask[2]);
183 	DIMCB_IoWrite(&g.dim2->MDWE3, mask[3]);
184 
185 	DIMCB_IoWrite(&g.dim2->MADR, bit_mask(MADR_WNR_BIT) | ctr_addr);
186 
187 	/* wait till transfer is completed */
188 	while ((DIMCB_IoRead(&g.dim2->MCTL) & 1) != 1)
189 		continue;
190 
191 	DIMCB_IoWrite(&g.dim2->MCTL, 0);   /* clear transfer complete */
192 }
193 
dim2_write_ctr(u32 ctr_addr,const u32 * value)194 static inline void dim2_write_ctr(u32 ctr_addr, const u32 *value)
195 {
196 	u32 const mask[4] = { 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF };
197 
198 	dim2_write_ctr_mask(ctr_addr, mask, value);
199 }
200 
dim2_clear_ctr(u32 ctr_addr)201 static inline void dim2_clear_ctr(u32 ctr_addr)
202 {
203 	u32 const value[4] = { 0, 0, 0, 0 };
204 
205 	dim2_write_ctr(ctr_addr, value);
206 }
207 
dim2_configure_cat(u8 cat_base,u8 ch_addr,u8 ch_type,bool read_not_write,bool sync_mfe)208 static void dim2_configure_cat(u8 cat_base, u8 ch_addr, u8 ch_type,
209 			       bool read_not_write, bool sync_mfe)
210 {
211 	u16 const cat =
212 		(read_not_write << CAT_RNW_BIT) |
213 		(ch_type << CAT_CT_SHIFT) |
214 		(ch_addr << CAT_CL_SHIFT) |
215 		(sync_mfe << CAT_MFE_BIT) |
216 		(false << CAT_MT_BIT) |
217 		(true << CAT_CE_BIT);
218 	u8 const ctr_addr = cat_base + ch_addr / 8;
219 	u8 const idx = (ch_addr % 8) / 2;
220 	u8 const shift = (ch_addr % 2) * 16;
221 	u32 mask[4] = { 0, 0, 0, 0 };
222 	u32 value[4] = { 0, 0, 0, 0 };
223 
224 	mask[idx] = (u32)0xFFFF << shift;
225 	value[idx] = cat << shift;
226 	dim2_write_ctr_mask(ctr_addr, mask, value);
227 }
228 
dim2_clear_cat(u8 cat_base,u8 ch_addr)229 static void dim2_clear_cat(u8 cat_base, u8 ch_addr)
230 {
231 	u8 const ctr_addr = cat_base + ch_addr / 8;
232 	u8 const idx = (ch_addr % 8) / 2;
233 	u8 const shift = (ch_addr % 2) * 16;
234 	u32 mask[4] = { 0, 0, 0, 0 };
235 	u32 value[4] = { 0, 0, 0, 0 };
236 
237 	mask[idx] = (u32)0xFFFF << shift;
238 	dim2_write_ctr_mask(ctr_addr, mask, value);
239 }
240 
dim2_configure_cdt(u8 ch_addr,u16 dbr_address,u16 hw_buffer_size,u16 packet_length)241 static void dim2_configure_cdt(u8 ch_addr, u16 dbr_address, u16 hw_buffer_size,
242 			       u16 packet_length)
243 {
244 	u32 cdt[4] = { 0, 0, 0, 0 };
245 
246 	if (packet_length)
247 		cdt[1] = ((packet_length - 1) << CDT1_BS_ISOC_SHIFT);
248 
249 	cdt[3] =
250 		((hw_buffer_size - 1) << CDT3_BD_SHIFT) |
251 		(dbr_address << CDT3_BA_SHIFT);
252 	dim2_write_ctr(CDT + ch_addr, cdt);
253 }
254 
dim2_clear_cdt(u8 ch_addr)255 static void dim2_clear_cdt(u8 ch_addr)
256 {
257 	u32 cdt[4] = { 0, 0, 0, 0 };
258 
259 	dim2_write_ctr(CDT + ch_addr, cdt);
260 }
261 
dim2_configure_adt(u8 ch_addr)262 static void dim2_configure_adt(u8 ch_addr)
263 {
264 	u32 adt[4] = { 0, 0, 0, 0 };
265 
266 	adt[0] =
267 		(true << ADT0_CE_BIT) |
268 		(true << ADT0_LE_BIT) |
269 		(0 << ADT0_PG_BIT);
270 
271 	dim2_write_ctr(ADT + ch_addr, adt);
272 }
273 
dim2_clear_adt(u8 ch_addr)274 static void dim2_clear_adt(u8 ch_addr)
275 {
276 	u32 adt[4] = { 0, 0, 0, 0 };
277 
278 	dim2_write_ctr(ADT + ch_addr, adt);
279 }
280 
dim2_start_ctrl_async(u8 ch_addr,u8 idx,u32 buf_addr,u16 buffer_size)281 static void dim2_start_ctrl_async(u8 ch_addr, u8 idx, u32 buf_addr,
282 				  u16 buffer_size)
283 {
284 	u8 const shift = idx * 16;
285 
286 	u32 mask[4] = { 0, 0, 0, 0 };
287 	u32 adt[4] = { 0, 0, 0, 0 };
288 
289 	mask[1] =
290 		bit_mask(ADT1_PS_BIT + shift) |
291 		bit_mask(ADT1_RDY_BIT + shift) |
292 		(ADT1_CTRL_ASYNC_BD_MASK << (ADT1_BD_SHIFT + shift));
293 	adt[1] =
294 		(true << (ADT1_PS_BIT + shift)) |
295 		(true << (ADT1_RDY_BIT + shift)) |
296 		((buffer_size - 1) << (ADT1_BD_SHIFT + shift));
297 
298 	mask[idx + 2] = 0xFFFFFFFF;
299 	adt[idx + 2] = buf_addr;
300 
301 	dim2_write_ctr_mask(ADT + ch_addr, mask, adt);
302 }
303 
dim2_start_isoc_sync(u8 ch_addr,u8 idx,u32 buf_addr,u16 buffer_size)304 static void dim2_start_isoc_sync(u8 ch_addr, u8 idx, u32 buf_addr,
305 				 u16 buffer_size)
306 {
307 	u8 const shift = idx * 16;
308 
309 	u32 mask[4] = { 0, 0, 0, 0 };
310 	u32 adt[4] = { 0, 0, 0, 0 };
311 
312 	mask[1] =
313 		bit_mask(ADT1_RDY_BIT + shift) |
314 		(ADT1_ISOC_SYNC_BD_MASK << (ADT1_BD_SHIFT + shift));
315 	adt[1] =
316 		(true << (ADT1_RDY_BIT + shift)) |
317 		((buffer_size - 1) << (ADT1_BD_SHIFT + shift));
318 
319 	mask[idx + 2] = 0xFFFFFFFF;
320 	adt[idx + 2] = buf_addr;
321 
322 	dim2_write_ctr_mask(ADT + ch_addr, mask, adt);
323 }
324 
dim2_clear_ctram(void)325 static void dim2_clear_ctram(void)
326 {
327 	u32 ctr_addr;
328 
329 	for (ctr_addr = 0; ctr_addr < 0x90; ctr_addr++)
330 		dim2_clear_ctr(ctr_addr);
331 }
332 
dim2_configure_channel(u8 ch_addr,u8 type,u8 is_tx,u16 dbr_address,u16 hw_buffer_size,u16 packet_length,bool sync_mfe)333 static void dim2_configure_channel(
334 	u8 ch_addr, u8 type, u8 is_tx, u16 dbr_address, u16 hw_buffer_size,
335 	u16 packet_length, bool sync_mfe)
336 {
337 	dim2_configure_cdt(ch_addr, dbr_address, hw_buffer_size, packet_length);
338 	dim2_configure_cat(MLB_CAT, ch_addr, type, is_tx ? 1 : 0, sync_mfe);
339 
340 	dim2_configure_adt(ch_addr);
341 	dim2_configure_cat(AHB_CAT, ch_addr, type, is_tx ? 0 : 1, sync_mfe);
342 
343 	/* unmask interrupt for used channel, enable mlb_sys_int[0] interrupt */
344 	DIMCB_IoWrite(&g.dim2->ACMR0,
345 		      DIMCB_IoRead(&g.dim2->ACMR0) | bit_mask(ch_addr));
346 }
347 
dim2_clear_channel(u8 ch_addr)348 static void dim2_clear_channel(u8 ch_addr)
349 {
350 	/* mask interrupt for used channel, disable mlb_sys_int[0] interrupt */
351 	DIMCB_IoWrite(&g.dim2->ACMR0,
352 		      DIMCB_IoRead(&g.dim2->ACMR0) & ~bit_mask(ch_addr));
353 
354 	dim2_clear_cat(AHB_CAT, ch_addr);
355 	dim2_clear_adt(ch_addr);
356 
357 	dim2_clear_cat(MLB_CAT, ch_addr);
358 	dim2_clear_cdt(ch_addr);
359 }
360 
361 /* -------------------------------------------------------------------------- */
362 /* channel state helpers */
363 
state_init(struct int_ch_state * state)364 static void state_init(struct int_ch_state *state)
365 {
366 	state->request_counter = 0;
367 	state->service_counter = 0;
368 
369 	state->idx1 = 0;
370 	state->idx2 = 0;
371 	state->level = 0;
372 }
373 
374 /* -------------------------------------------------------------------------- */
375 /* macro helper functions */
376 
check_channel_address(u32 ch_address)377 static inline bool check_channel_address(u32 ch_address)
378 {
379 	return ch_address > 0 && (ch_address % 2) == 0 &&
380 	       (ch_address / 2) <= (u32)CAT_CL_MASK;
381 }
382 
check_packet_length(u32 packet_length)383 static inline bool check_packet_length(u32 packet_length)
384 {
385 	u16 const max_size = ((u16)CDT3_BD_ISOC_MASK + 1u) / ISOC_DBR_FACTOR;
386 
387 	if (packet_length <= 0)
388 		return false; /* too small */
389 
390 	if (packet_length > max_size)
391 		return false; /* too big */
392 
393 	if (packet_length - 1u > (u32)CDT1_BS_ISOC_MASK)
394 		return false; /* too big */
395 
396 	return true;
397 }
398 
check_bytes_per_frame(u32 bytes_per_frame)399 static inline bool check_bytes_per_frame(u32 bytes_per_frame)
400 {
401 	u16 const max_size = ((u16)CDT3_BD_MASK + 1u) / SYNC_DBR_FACTOR;
402 
403 	if (bytes_per_frame <= 0)
404 		return false; /* too small */
405 
406 	if (bytes_per_frame > max_size)
407 		return false; /* too big */
408 
409 	return true;
410 }
411 
norm_ctrl_async_buffer_size(u16 buf_size)412 static inline u16 norm_ctrl_async_buffer_size(u16 buf_size)
413 {
414 	u16 const max_size = (u16)ADT1_CTRL_ASYNC_BD_MASK + 1u;
415 
416 	if (buf_size > max_size)
417 		return max_size;
418 
419 	return buf_size;
420 }
421 
norm_isoc_buffer_size(u16 buf_size,u16 packet_length)422 static inline u16 norm_isoc_buffer_size(u16 buf_size, u16 packet_length)
423 {
424 	u16 n;
425 	u16 const max_size = (u16)ADT1_ISOC_SYNC_BD_MASK + 1u;
426 
427 	if (buf_size > max_size)
428 		buf_size = max_size;
429 
430 	n = buf_size / packet_length;
431 
432 	if (n < 2u)
433 		return 0; /* too small buffer for given packet_length */
434 
435 	return packet_length * n;
436 }
437 
norm_sync_buffer_size(u16 buf_size,u16 bytes_per_frame)438 static inline u16 norm_sync_buffer_size(u16 buf_size, u16 bytes_per_frame)
439 {
440 	u16 n;
441 	u16 const max_size = (u16)ADT1_ISOC_SYNC_BD_MASK + 1u;
442 	u32 const unit = bytes_per_frame * (u16)FRAMES_PER_SUBBUFF;
443 
444 	if (buf_size > max_size)
445 		buf_size = max_size;
446 
447 	n = buf_size / unit;
448 
449 	if (n < 1u)
450 		return 0; /* too small buffer for given bytes_per_frame */
451 
452 	return unit * n;
453 }
454 
dim2_cleanup(void)455 static void dim2_cleanup(void)
456 {
457 	/* disable MediaLB */
458 	DIMCB_IoWrite(&g.dim2->MLBC0, false << MLBC0_MLBEN_BIT);
459 
460 	dim2_clear_ctram();
461 
462 	/* disable mlb_int interrupt */
463 	DIMCB_IoWrite(&g.dim2->MIEN, 0);
464 
465 	/* clear status for all dma channels */
466 	DIMCB_IoWrite(&g.dim2->ACSR0, 0xFFFFFFFF);
467 	DIMCB_IoWrite(&g.dim2->ACSR1, 0xFFFFFFFF);
468 
469 	/* mask interrupts for all channels */
470 	DIMCB_IoWrite(&g.dim2->ACMR0, 0);
471 	DIMCB_IoWrite(&g.dim2->ACMR1, 0);
472 }
473 
dim2_initialize(bool enable_6pin,u8 mlb_clock)474 static void dim2_initialize(bool enable_6pin, u8 mlb_clock)
475 {
476 	dim2_cleanup();
477 
478 	/* configure and enable MediaLB */
479 	DIMCB_IoWrite(&g.dim2->MLBC0,
480 		      enable_6pin << MLBC0_MLBPEN_BIT |
481 		      mlb_clock << MLBC0_MLBCLK_SHIFT |
482 		      MLBC0_FCNT_VAL(FRAMES_PER_SUBBUFF) << MLBC0_FCNT_SHIFT |
483 		      true << MLBC0_MLBEN_BIT);
484 
485 	/* activate all HBI channels */
486 	DIMCB_IoWrite(&g.dim2->HCMR0, 0xFFFFFFFF);
487 	DIMCB_IoWrite(&g.dim2->HCMR1, 0xFFFFFFFF);
488 
489 	/* enable HBI */
490 	DIMCB_IoWrite(&g.dim2->HCTL, bit_mask(HCTL_EN_BIT));
491 
492 	/* configure DMA */
493 	DIMCB_IoWrite(&g.dim2->ACTL,
494 		      ACTL_DMA_MODE_VAL_DMA_MODE_1 << ACTL_DMA_MODE_BIT |
495 		      true << ACTL_SCE_BIT);
496 }
497 
dim2_is_mlb_locked(void)498 static bool dim2_is_mlb_locked(void)
499 {
500 	u32 const mask0 = bit_mask(MLBC0_MLBLK_BIT);
501 	u32 const mask1 = bit_mask(MLBC1_CLKMERR_BIT) |
502 			  bit_mask(MLBC1_LOCKERR_BIT);
503 	u32 const c1 = DIMCB_IoRead(&g.dim2->MLBC1);
504 	u32 const nda_mask = (u32)MLBC1_NDA_MASK << MLBC1_NDA_SHIFT;
505 
506 	DIMCB_IoWrite(&g.dim2->MLBC1, c1 & nda_mask);
507 	return (DIMCB_IoRead(&g.dim2->MLBC1) & mask1) == 0 &&
508 	       (DIMCB_IoRead(&g.dim2->MLBC0) & mask0) != 0;
509 }
510 
511 /* -------------------------------------------------------------------------- */
512 /* channel help routines */
513 
service_channel(u8 ch_addr,u8 idx)514 static inline bool service_channel(u8 ch_addr, u8 idx)
515 {
516 	u8 const shift = idx * 16;
517 	u32 const adt1 = dim2_read_ctr(ADT + ch_addr, 1);
518 
519 	if (((adt1 >> (ADT1_DNE_BIT + shift)) & 1) == 0)
520 		return false;
521 
522 	{
523 		u32 mask[4] = { 0, 0, 0, 0 };
524 		u32 adt_w[4] = { 0, 0, 0, 0 };
525 
526 		mask[1] =
527 			bit_mask(ADT1_DNE_BIT + shift) |
528 			bit_mask(ADT1_ERR_BIT + shift) |
529 			bit_mask(ADT1_RDY_BIT + shift);
530 		dim2_write_ctr_mask(ADT + ch_addr, mask, adt_w);
531 	}
532 
533 	/* clear channel status bit */
534 	DIMCB_IoWrite(&g.dim2->ACSR0, bit_mask(ch_addr));
535 
536 	return true;
537 }
538 
539 /* -------------------------------------------------------------------------- */
540 /* channel init routines */
541 
isoc_init(struct dim_channel * ch,u8 ch_addr,u16 packet_length)542 static void isoc_init(struct dim_channel *ch, u8 ch_addr, u16 packet_length)
543 {
544 	state_init(&ch->state);
545 
546 	ch->addr = ch_addr;
547 
548 	ch->packet_length = packet_length;
549 	ch->bytes_per_frame = 0;
550 	ch->done_sw_buffers_number = 0;
551 }
552 
sync_init(struct dim_channel * ch,u8 ch_addr,u16 bytes_per_frame)553 static void sync_init(struct dim_channel *ch, u8 ch_addr, u16 bytes_per_frame)
554 {
555 	state_init(&ch->state);
556 
557 	ch->addr = ch_addr;
558 
559 	ch->packet_length = 0;
560 	ch->bytes_per_frame = bytes_per_frame;
561 	ch->done_sw_buffers_number = 0;
562 }
563 
channel_init(struct dim_channel * ch,u8 ch_addr)564 static void channel_init(struct dim_channel *ch, u8 ch_addr)
565 {
566 	state_init(&ch->state);
567 
568 	ch->addr = ch_addr;
569 
570 	ch->packet_length = 0;
571 	ch->bytes_per_frame = 0;
572 	ch->done_sw_buffers_number = 0;
573 }
574 
575 /* returns true if channel interrupt state is cleared */
channel_service_interrupt(struct dim_channel * ch)576 static bool channel_service_interrupt(struct dim_channel *ch)
577 {
578 	struct int_ch_state *const state = &ch->state;
579 
580 	if (!service_channel(ch->addr, state->idx2))
581 		return false;
582 
583 	state->idx2 ^= 1;
584 	state->request_counter++;
585 	return true;
586 }
587 
channel_start(struct dim_channel * ch,u32 buf_addr,u16 buf_size)588 static bool channel_start(struct dim_channel *ch, u32 buf_addr, u16 buf_size)
589 {
590 	struct int_ch_state *const state = &ch->state;
591 
592 	if (buf_size <= 0)
593 		return dim_on_error(DIM_ERR_BAD_BUFFER_SIZE, "Bad buffer size");
594 
595 	if (ch->packet_length == 0 && ch->bytes_per_frame == 0 &&
596 	    buf_size != norm_ctrl_async_buffer_size(buf_size))
597 		return dim_on_error(DIM_ERR_BAD_BUFFER_SIZE,
598 				    "Bad control/async buffer size");
599 
600 	if (ch->packet_length &&
601 	    buf_size != norm_isoc_buffer_size(buf_size, ch->packet_length))
602 		return dim_on_error(DIM_ERR_BAD_BUFFER_SIZE,
603 				    "Bad isochronous buffer size");
604 
605 	if (ch->bytes_per_frame &&
606 	    buf_size != norm_sync_buffer_size(buf_size, ch->bytes_per_frame))
607 		return dim_on_error(DIM_ERR_BAD_BUFFER_SIZE,
608 				    "Bad synchronous buffer size");
609 
610 	if (state->level >= 2u)
611 		return dim_on_error(DIM_ERR_OVERFLOW, "Channel overflow");
612 
613 	++state->level;
614 
615 	if (ch->packet_length || ch->bytes_per_frame)
616 		dim2_start_isoc_sync(ch->addr, state->idx1, buf_addr, buf_size);
617 	else
618 		dim2_start_ctrl_async(ch->addr, state->idx1, buf_addr,
619 				      buf_size);
620 	state->idx1 ^= 1;
621 
622 	return true;
623 }
624 
channel_service(struct dim_channel * ch)625 static u8 channel_service(struct dim_channel *ch)
626 {
627 	struct int_ch_state *const state = &ch->state;
628 
629 	if (state->service_counter != state->request_counter) {
630 		state->service_counter++;
631 		if (state->level == 0)
632 			return DIM_ERR_UNDERFLOW;
633 
634 		--state->level;
635 		ch->done_sw_buffers_number++;
636 	}
637 
638 	return DIM_NO_ERROR;
639 }
640 
channel_detach_buffers(struct dim_channel * ch,u16 buffers_number)641 static bool channel_detach_buffers(struct dim_channel *ch, u16 buffers_number)
642 {
643 	if (buffers_number > ch->done_sw_buffers_number)
644 		return dim_on_error(DIM_ERR_UNDERFLOW, "Channel underflow");
645 
646 	ch->done_sw_buffers_number -= buffers_number;
647 	return true;
648 }
649 
650 /* -------------------------------------------------------------------------- */
651 /* API */
652 
DIM_Startup(void * dim_base_address,u32 mlb_clock)653 u8 DIM_Startup(void *dim_base_address, u32 mlb_clock)
654 {
655 	g.dim_is_initialized = false;
656 
657 	if (!dim_base_address)
658 		return DIM_INIT_ERR_DIM_ADDR;
659 
660 	/* MediaLB clock: 0 - 256 fs, 1 - 512 fs, 2 - 1024 fs, 3 - 2048 fs */
661 	/* MediaLB clock: 4 - 3072 fs, 5 - 4096 fs, 6 - 6144 fs, 7 - 8192 fs */
662 	if (mlb_clock >= 8)
663 		return DIM_INIT_ERR_MLB_CLOCK;
664 
665 	g.dim2 = dim_base_address;
666 	g.dbr_map[0] = 0;
667 	g.dbr_map[1] = 0;
668 
669 	dim2_initialize(mlb_clock >= 3, mlb_clock);
670 
671 	g.dim_is_initialized = true;
672 
673 	return DIM_NO_ERROR;
674 }
675 
DIM_Shutdown(void)676 void DIM_Shutdown(void)
677 {
678 	g.dim_is_initialized = false;
679 	dim2_cleanup();
680 }
681 
DIM_GetLockState(void)682 bool DIM_GetLockState(void)
683 {
684 	return dim2_is_mlb_locked();
685 }
686 
init_ctrl_async(struct dim_channel * ch,u8 type,u8 is_tx,u16 ch_address,u16 hw_buffer_size)687 static u8 init_ctrl_async(struct dim_channel *ch, u8 type, u8 is_tx,
688 			  u16 ch_address, u16 hw_buffer_size)
689 {
690 	if (!g.dim_is_initialized || !ch)
691 		return DIM_ERR_DRIVER_NOT_INITIALIZED;
692 
693 	if (!check_channel_address(ch_address))
694 		return DIM_INIT_ERR_CHANNEL_ADDRESS;
695 
696 	ch->dbr_size = hw_buffer_size;
697 	ch->dbr_addr = alloc_dbr(ch->dbr_size);
698 	if (ch->dbr_addr >= DBR_SIZE)
699 		return DIM_INIT_ERR_OUT_OF_MEMORY;
700 
701 	channel_init(ch, ch_address / 2);
702 
703 	dim2_configure_channel(ch->addr, type, is_tx,
704 			       ch->dbr_addr, ch->dbr_size, 0, false);
705 
706 	return DIM_NO_ERROR;
707 }
708 
DIM_NormCtrlAsyncBufferSize(u16 buf_size)709 u16 DIM_NormCtrlAsyncBufferSize(u16 buf_size)
710 {
711 	return norm_ctrl_async_buffer_size(buf_size);
712 }
713 
714 /**
715  * Retrieves maximal possible correct buffer size for isochronous data type
716  * conform to given packet length and not bigger than given buffer size.
717  *
718  * Returns non-zero correct buffer size or zero by error.
719  */
DIM_NormIsocBufferSize(u16 buf_size,u16 packet_length)720 u16 DIM_NormIsocBufferSize(u16 buf_size, u16 packet_length)
721 {
722 	if (!check_packet_length(packet_length))
723 		return 0;
724 
725 	return norm_isoc_buffer_size(buf_size, packet_length);
726 }
727 
728 /**
729  * Retrieves maximal possible correct buffer size for synchronous data type
730  * conform to given bytes per frame and not bigger than given buffer size.
731  *
732  * Returns non-zero correct buffer size or zero by error.
733  */
DIM_NormSyncBufferSize(u16 buf_size,u16 bytes_per_frame)734 u16 DIM_NormSyncBufferSize(u16 buf_size, u16 bytes_per_frame)
735 {
736 	if (!check_bytes_per_frame(bytes_per_frame))
737 		return 0;
738 
739 	return norm_sync_buffer_size(buf_size, bytes_per_frame);
740 }
741 
DIM_InitControl(struct dim_channel * ch,u8 is_tx,u16 ch_address,u16 max_buffer_size)742 u8 DIM_InitControl(struct dim_channel *ch, u8 is_tx, u16 ch_address,
743 		   u16 max_buffer_size)
744 {
745 	return init_ctrl_async(ch, CAT_CT_VAL_CONTROL, is_tx, ch_address,
746 			       max_buffer_size);
747 }
748 
DIM_InitAsync(struct dim_channel * ch,u8 is_tx,u16 ch_address,u16 max_buffer_size)749 u8 DIM_InitAsync(struct dim_channel *ch, u8 is_tx, u16 ch_address,
750 		 u16 max_buffer_size)
751 {
752 	return init_ctrl_async(ch, CAT_CT_VAL_ASYNC, is_tx, ch_address,
753 			       max_buffer_size);
754 }
755 
DIM_InitIsoc(struct dim_channel * ch,u8 is_tx,u16 ch_address,u16 packet_length)756 u8 DIM_InitIsoc(struct dim_channel *ch, u8 is_tx, u16 ch_address,
757 		u16 packet_length)
758 {
759 	if (!g.dim_is_initialized || !ch)
760 		return DIM_ERR_DRIVER_NOT_INITIALIZED;
761 
762 	if (!check_channel_address(ch_address))
763 		return DIM_INIT_ERR_CHANNEL_ADDRESS;
764 
765 	if (!check_packet_length(packet_length))
766 		return DIM_ERR_BAD_CONFIG;
767 
768 	ch->dbr_size = packet_length * ISOC_DBR_FACTOR;
769 	ch->dbr_addr = alloc_dbr(ch->dbr_size);
770 	if (ch->dbr_addr >= DBR_SIZE)
771 		return DIM_INIT_ERR_OUT_OF_MEMORY;
772 
773 	isoc_init(ch, ch_address / 2, packet_length);
774 
775 	dim2_configure_channel(ch->addr, CAT_CT_VAL_ISOC, is_tx, ch->dbr_addr,
776 			       ch->dbr_size, packet_length, false);
777 
778 	return DIM_NO_ERROR;
779 }
780 
DIM_InitSync(struct dim_channel * ch,u8 is_tx,u16 ch_address,u16 bytes_per_frame)781 u8 DIM_InitSync(struct dim_channel *ch, u8 is_tx, u16 ch_address,
782 		u16 bytes_per_frame)
783 {
784 	if (!g.dim_is_initialized || !ch)
785 		return DIM_ERR_DRIVER_NOT_INITIALIZED;
786 
787 	if (!check_channel_address(ch_address))
788 		return DIM_INIT_ERR_CHANNEL_ADDRESS;
789 
790 	if (!check_bytes_per_frame(bytes_per_frame))
791 		return DIM_ERR_BAD_CONFIG;
792 
793 	ch->dbr_size = bytes_per_frame * SYNC_DBR_FACTOR;
794 	ch->dbr_addr = alloc_dbr(ch->dbr_size);
795 	if (ch->dbr_addr >= DBR_SIZE)
796 		return DIM_INIT_ERR_OUT_OF_MEMORY;
797 
798 	sync_init(ch, ch_address / 2, bytes_per_frame);
799 
800 	dim2_configure_channel(ch->addr, CAT_CT_VAL_SYNC, is_tx,
801 			       ch->dbr_addr, ch->dbr_size, 0, true);
802 
803 	return DIM_NO_ERROR;
804 }
805 
DIM_DestroyChannel(struct dim_channel * ch)806 u8 DIM_DestroyChannel(struct dim_channel *ch)
807 {
808 	if (!g.dim_is_initialized || !ch)
809 		return DIM_ERR_DRIVER_NOT_INITIALIZED;
810 
811 	dim2_clear_channel(ch->addr);
812 	if (ch->dbr_addr < DBR_SIZE)
813 		free_dbr(ch->dbr_addr, ch->dbr_size);
814 	ch->dbr_addr = DBR_SIZE;
815 
816 	return DIM_NO_ERROR;
817 }
818 
DIM_ServiceIrq(struct dim_channel * const * channels)819 void DIM_ServiceIrq(struct dim_channel *const *channels)
820 {
821 	bool state_changed;
822 
823 	if (!g.dim_is_initialized) {
824 		dim_on_error(DIM_ERR_DRIVER_NOT_INITIALIZED,
825 			     "DIM is not initialized");
826 		return;
827 	}
828 
829 	if (!channels) {
830 		dim_on_error(DIM_ERR_DRIVER_NOT_INITIALIZED, "Bad channels");
831 		return;
832 	}
833 
834 	/*
835 	 * Use while-loop and a flag to make sure the age is changed back at
836 	 * least once, otherwise the interrupt may never come if CPU generates
837 	 * interrupt on changing age.
838 	 * This cycle runs not more than number of channels, because
839 	 * channel_service_interrupt() routine doesn't start the channel again.
840 	 */
841 	do {
842 		struct dim_channel *const *ch = channels;
843 
844 		state_changed = false;
845 
846 		while (*ch) {
847 			state_changed |= channel_service_interrupt(*ch);
848 			++ch;
849 		}
850 	} while (state_changed);
851 
852 	/* clear pending Interrupts */
853 	DIMCB_IoWrite(&g.dim2->MS0, 0);
854 	DIMCB_IoWrite(&g.dim2->MS1, 0);
855 }
856 
DIM_ServiceChannel(struct dim_channel * ch)857 u8 DIM_ServiceChannel(struct dim_channel *ch)
858 {
859 	if (!g.dim_is_initialized || !ch)
860 		return DIM_ERR_DRIVER_NOT_INITIALIZED;
861 
862 	return channel_service(ch);
863 }
864 
DIM_GetChannelState(struct dim_channel * ch,struct dim_ch_state_t * state_ptr)865 struct dim_ch_state_t *DIM_GetChannelState(struct dim_channel *ch,
866 					   struct dim_ch_state_t *state_ptr)
867 {
868 	if (!ch || !state_ptr)
869 		return NULL;
870 
871 	state_ptr->ready = ch->state.level < 2;
872 	state_ptr->done_buffers = ch->done_sw_buffers_number;
873 
874 	return state_ptr;
875 }
876 
DIM_EnqueueBuffer(struct dim_channel * ch,u32 buffer_addr,u16 buffer_size)877 bool DIM_EnqueueBuffer(struct dim_channel *ch, u32 buffer_addr, u16 buffer_size)
878 {
879 	if (!ch)
880 		return dim_on_error(DIM_ERR_DRIVER_NOT_INITIALIZED,
881 				    "Bad channel");
882 
883 	return channel_start(ch, buffer_addr, buffer_size);
884 }
885 
DIM_DetachBuffers(struct dim_channel * ch,u16 buffers_number)886 bool DIM_DetachBuffers(struct dim_channel *ch, u16 buffers_number)
887 {
888 	if (!ch)
889 		return dim_on_error(DIM_ERR_DRIVER_NOT_INITIALIZED,
890 				    "Bad channel");
891 
892 	return channel_detach_buffers(ch, buffers_number);
893 }
894