1 /*
2 * chnl_sm.c
3 *
4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
5 *
6 * Implements upper edge functions for Bridge driver channel module.
7 *
8 * Copyright (C) 2005-2006 Texas Instruments, Inc.
9 *
10 * This package is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
16 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
17 */
18
19 /*
20 * The lower edge functions must be implemented by the Bridge driver
21 * writer, and are declared in chnl_sm.h.
22 *
23 * Care is taken in this code to prevent simultaneous access to channel
24 * queues from
25 * 1. Threads.
26 * 2. io_dpc(), scheduled from the io_isr() as an event.
27 *
28 * This is done primarily by:
29 * - Semaphores.
30 * - state flags in the channel object; and
31 * - ensuring the IO_Dispatch() routine, which is called from both
32 * CHNL_AddIOReq() and the DPC(if implemented), is not re-entered.
33 *
34 * Channel Invariant:
35 * There is an important invariant condition which must be maintained per
36 * channel outside of bridge_chnl_get_ioc() and IO_Dispatch(), violation of
37 * which may cause timeouts and/or failure of function sync_wait_on_event.
38 * This invariant condition is:
39 *
40 * list_empty(&pchnl->io_completions) ==> pchnl->sync_event is reset
41 * and
42 * !list_empty(&pchnl->io_completions) ==> pchnl->sync_event is set.
43 */
44
45 #include <linux/types.h>
46
47 /* ----------------------------------- OS */
48 #include <dspbridge/host_os.h>
49
50 /* ----------------------------------- DSP/BIOS Bridge */
51 #include <dspbridge/dbdefs.h>
52
53 /* ----------------------------------- OS Adaptation Layer */
54 #include <dspbridge/sync.h>
55
56 /* ----------------------------------- Bridge Driver */
57 #include <dspbridge/dspdefs.h>
58 #include <dspbridge/dspchnl.h>
59 #include "_tiomap.h"
60
61 /* ----------------------------------- Platform Manager */
62 #include <dspbridge/dev.h>
63
64 /* ----------------------------------- Others */
65 #include <dspbridge/io_sm.h>
66
67 /* ----------------------------------- Define for This */
68 #define USERMODE_ADDR PAGE_OFFSET
69
70 #define MAILBOX_IRQ INT_MAIL_MPU_IRQ
71
72 /* ----------------------------------- Function Prototypes */
73 static int create_chirp_list(struct list_head *list, u32 chirps);
74
75 static void free_chirp_list(struct list_head *list);
76
77 static int search_free_channel(struct chnl_mgr *chnl_mgr_obj,
78 u32 *chnl);
79
80 /*
81 * ======== bridge_chnl_add_io_req ========
82 * Enqueue an I/O request for data transfer on a channel to the DSP.
83 * The direction (mode) is specified in the channel object. Note the DSP
84 * address is specified for channels opened in direct I/O mode.
85 */
bridge_chnl_add_io_req(struct chnl_object * chnl_obj,void * host_buf,u32 byte_size,u32 buf_size,u32 dw_dsp_addr,u32 dw_arg)86 int bridge_chnl_add_io_req(struct chnl_object *chnl_obj, void *host_buf,
87 u32 byte_size, u32 buf_size,
88 u32 dw_dsp_addr, u32 dw_arg)
89 {
90 int status = 0;
91 struct chnl_object *pchnl = (struct chnl_object *)chnl_obj;
92 struct chnl_irp *chnl_packet_obj = NULL;
93 struct bridge_dev_context *dev_ctxt;
94 struct dev_object *dev_obj;
95 u8 dw_state;
96 bool is_eos;
97 struct chnl_mgr *chnl_mgr_obj;
98 u8 *host_sys_buf = NULL;
99 bool sched_dpc = false;
100 u16 mb_val = 0;
101
102 is_eos = (byte_size == 0);
103
104 /* Validate args */
105 if (!host_buf || !pchnl)
106 return -EFAULT;
107
108 if (is_eos && CHNL_IS_INPUT(pchnl->chnl_mode))
109 return -EPERM;
110
111 /*
112 * Check the channel state: only queue chirp if channel state
113 * allows it.
114 */
115 dw_state = pchnl->state;
116 if (dw_state != CHNL_STATEREADY) {
117 if (dw_state & CHNL_STATECANCEL)
118 return -ECANCELED;
119 if ((dw_state & CHNL_STATEEOS) &&
120 CHNL_IS_OUTPUT(pchnl->chnl_mode))
121 return -EPIPE;
122 /* No other possible states left */
123 }
124
125 dev_obj = dev_get_first();
126 dev_get_bridge_context(dev_obj, &dev_ctxt);
127 if (!dev_ctxt)
128 return -EFAULT;
129
130 if (pchnl->chnl_type == CHNL_PCPY && pchnl->chnl_id > 1 && host_buf) {
131 if (!(host_buf < (void *)USERMODE_ADDR)) {
132 host_sys_buf = host_buf;
133 goto func_cont;
134 }
135 /* if addr in user mode, then copy to kernel space */
136 host_sys_buf = kmalloc(buf_size, GFP_KERNEL);
137 if (host_sys_buf == NULL)
138 return -ENOMEM;
139
140 if (CHNL_IS_OUTPUT(pchnl->chnl_mode)) {
141 status = copy_from_user(host_sys_buf, host_buf,
142 buf_size);
143 if (status) {
144 kfree(host_sys_buf);
145 host_sys_buf = NULL;
146 return -EFAULT;
147 }
148 }
149 }
150 func_cont:
151 /* Mailbox IRQ is disabled to avoid race condition with DMA/ZCPY
152 * channels. DPCCS is held to avoid race conditions with PCPY channels.
153 * If DPC is scheduled in process context (iosm_schedule) and any
154 * non-mailbox interrupt occurs, that DPC will run and break CS. Hence
155 * we disable ALL DPCs. We will try to disable ONLY IO DPC later. */
156 chnl_mgr_obj = pchnl->chnl_mgr_obj;
157 spin_lock_bh(&chnl_mgr_obj->chnl_mgr_lock);
158 omap_mbox_disable_irq(dev_ctxt->mbox, IRQ_RX);
159 if (pchnl->chnl_type == CHNL_PCPY) {
160 /* This is a processor-copy channel. */
161 if (CHNL_IS_OUTPUT(pchnl->chnl_mode)) {
162 /* Check buffer size on output channels for fit. */
163 if (byte_size > io_buf_size(
164 pchnl->chnl_mgr_obj->iomgr)) {
165 status = -EINVAL;
166 goto out;
167 }
168 }
169 }
170
171 /* Get a free chirp: */
172 if (list_empty(&pchnl->free_packets_list)) {
173 status = -EIO;
174 goto out;
175 }
176 chnl_packet_obj = list_first_entry(&pchnl->free_packets_list,
177 struct chnl_irp, link);
178 list_del(&chnl_packet_obj->link);
179
180 /* Enqueue the chirp on the chnl's IORequest queue: */
181 chnl_packet_obj->host_user_buf = chnl_packet_obj->host_sys_buf =
182 host_buf;
183 if (pchnl->chnl_type == CHNL_PCPY && pchnl->chnl_id > 1)
184 chnl_packet_obj->host_sys_buf = host_sys_buf;
185
186 /*
187 * Note: for dma chans dw_dsp_addr contains dsp address
188 * of SM buffer.
189 */
190 /* DSP address */
191 chnl_packet_obj->dsp_tx_addr = dw_dsp_addr / chnl_mgr_obj->word_size;
192 chnl_packet_obj->byte_size = byte_size;
193 chnl_packet_obj->buf_size = buf_size;
194 /* Only valid for output channel */
195 chnl_packet_obj->arg = dw_arg;
196 chnl_packet_obj->status = (is_eos ? CHNL_IOCSTATEOS :
197 CHNL_IOCSTATCOMPLETE);
198 list_add_tail(&chnl_packet_obj->link, &pchnl->io_requests);
199 pchnl->cio_reqs++;
200 /*
201 * If end of stream, update the channel state to prevent
202 * more IOR's.
203 */
204 if (is_eos)
205 pchnl->state |= CHNL_STATEEOS;
206
207 /* Request IO from the DSP */
208 io_request_chnl(chnl_mgr_obj->iomgr, pchnl,
209 (CHNL_IS_INPUT(pchnl->chnl_mode) ? IO_INPUT :
210 IO_OUTPUT), &mb_val);
211 sched_dpc = true;
212 out:
213 omap_mbox_enable_irq(dev_ctxt->mbox, IRQ_RX);
214 spin_unlock_bh(&chnl_mgr_obj->chnl_mgr_lock);
215 if (mb_val != 0)
216 sm_interrupt_dsp(dev_ctxt, mb_val);
217
218 /* Schedule a DPC, to do the actual data transfer */
219 if (sched_dpc)
220 iosm_schedule(chnl_mgr_obj->iomgr);
221
222 return status;
223 }
224
225 /*
226 * ======== bridge_chnl_cancel_io ========
227 * Return all I/O requests to the client which have not yet been
228 * transferred. The channel's I/O completion object is
229 * signalled, and all the I/O requests are queued as IOC's, with the
230 * status field set to CHNL_IOCSTATCANCEL.
231 * This call is typically used in abort situations, and is a prelude to
232 * chnl_close();
233 */
bridge_chnl_cancel_io(struct chnl_object * chnl_obj)234 int bridge_chnl_cancel_io(struct chnl_object *chnl_obj)
235 {
236 struct chnl_object *pchnl = (struct chnl_object *)chnl_obj;
237 u32 chnl_id = -1;
238 s8 chnl_mode;
239 struct chnl_irp *chirp, *tmp;
240 struct chnl_mgr *chnl_mgr_obj = NULL;
241
242 /* Check args: */
243 if (!pchnl || !pchnl->chnl_mgr_obj)
244 return -EFAULT;
245
246 chnl_id = pchnl->chnl_id;
247 chnl_mode = pchnl->chnl_mode;
248 chnl_mgr_obj = pchnl->chnl_mgr_obj;
249
250 /* Mark this channel as cancelled, to prevent further IORequests or
251 * IORequests or dispatching. */
252 spin_lock_bh(&chnl_mgr_obj->chnl_mgr_lock);
253
254 pchnl->state |= CHNL_STATECANCEL;
255
256 if (list_empty(&pchnl->io_requests)) {
257 spin_unlock_bh(&chnl_mgr_obj->chnl_mgr_lock);
258 return 0;
259 }
260
261 if (pchnl->chnl_type == CHNL_PCPY) {
262 /* Indicate we have no more buffers available for transfer: */
263 if (CHNL_IS_INPUT(pchnl->chnl_mode)) {
264 io_cancel_chnl(chnl_mgr_obj->iomgr, chnl_id);
265 } else {
266 /* Record that we no longer have output buffers
267 * available: */
268 chnl_mgr_obj->output_mask &= ~(1 << chnl_id);
269 }
270 }
271 /* Move all IOR's to IOC queue: */
272 list_for_each_entry_safe(chirp, tmp, &pchnl->io_requests, link) {
273 list_del(&chirp->link);
274 chirp->byte_size = 0;
275 chirp->status |= CHNL_IOCSTATCANCEL;
276 list_add_tail(&chirp->link, &pchnl->io_completions);
277 pchnl->cio_cs++;
278 pchnl->cio_reqs--;
279 }
280
281 spin_unlock_bh(&chnl_mgr_obj->chnl_mgr_lock);
282
283 return 0;
284 }
285
286 /*
287 * ======== bridge_chnl_close ========
288 * Purpose:
289 * Ensures all pending I/O on this channel is cancelled, discards all
290 * queued I/O completion notifications, then frees the resources allocated
291 * for this channel, and makes the corresponding logical channel id
292 * available for subsequent use.
293 */
bridge_chnl_close(struct chnl_object * chnl_obj)294 int bridge_chnl_close(struct chnl_object *chnl_obj)
295 {
296 int status;
297 struct chnl_object *pchnl = (struct chnl_object *)chnl_obj;
298
299 /* Check args: */
300 if (!pchnl)
301 return -EFAULT;
302 /* Cancel IO: this ensures no further IO requests or notifications */
303 status = bridge_chnl_cancel_io(chnl_obj);
304 if (status)
305 return status;
306 /* Invalidate channel object: Protects from CHNL_GetIOCompletion() */
307 /* Free the slot in the channel manager: */
308 pchnl->chnl_mgr_obj->channels[pchnl->chnl_id] = NULL;
309 spin_lock_bh(&pchnl->chnl_mgr_obj->chnl_mgr_lock);
310 pchnl->chnl_mgr_obj->open_channels -= 1;
311 spin_unlock_bh(&pchnl->chnl_mgr_obj->chnl_mgr_lock);
312 if (pchnl->ntfy_obj) {
313 ntfy_delete(pchnl->ntfy_obj);
314 kfree(pchnl->ntfy_obj);
315 pchnl->ntfy_obj = NULL;
316 }
317 /* Reset channel event: (NOTE: user_event freed in user context) */
318 if (pchnl->sync_event) {
319 sync_reset_event(pchnl->sync_event);
320 kfree(pchnl->sync_event);
321 pchnl->sync_event = NULL;
322 }
323 /* Free I/O request and I/O completion queues: */
324 free_chirp_list(&pchnl->io_completions);
325 pchnl->cio_cs = 0;
326
327 free_chirp_list(&pchnl->io_requests);
328 pchnl->cio_reqs = 0;
329
330 free_chirp_list(&pchnl->free_packets_list);
331
332 /* Release channel object. */
333 kfree(pchnl);
334
335 return status;
336 }
337
338 /*
339 * ======== bridge_chnl_create ========
340 * Create a channel manager object, responsible for opening new channels
341 * and closing old ones for a given board.
342 */
bridge_chnl_create(struct chnl_mgr ** channel_mgr,struct dev_object * hdev_obj,const struct chnl_mgrattrs * mgr_attrts)343 int bridge_chnl_create(struct chnl_mgr **channel_mgr,
344 struct dev_object *hdev_obj,
345 const struct chnl_mgrattrs *mgr_attrts)
346 {
347 int status = 0;
348 struct chnl_mgr *chnl_mgr_obj = NULL;
349 u8 max_channels;
350
351 /* Allocate channel manager object */
352 chnl_mgr_obj = kzalloc(sizeof(struct chnl_mgr), GFP_KERNEL);
353 if (chnl_mgr_obj) {
354 /*
355 * The max_channels attr must equal the # of supported chnls for
356 * each transport(# chnls for PCPY = DDMA = ZCPY): i.e.
357 * mgr_attrts->max_channels = CHNL_MAXCHANNELS =
358 * DDMA_MAXDDMACHNLS = DDMA_MAXZCPYCHNLS.
359 */
360 max_channels = CHNL_MAXCHANNELS + CHNL_MAXCHANNELS * CHNL_PCPY;
361 /* Create array of channels */
362 chnl_mgr_obj->channels = kzalloc(sizeof(struct chnl_object *)
363 * max_channels, GFP_KERNEL);
364 if (chnl_mgr_obj->channels) {
365 /* Initialize chnl_mgr object */
366 chnl_mgr_obj->type = CHNL_TYPESM;
367 chnl_mgr_obj->word_size = mgr_attrts->word_size;
368 /* Total # chnls supported */
369 chnl_mgr_obj->max_channels = max_channels;
370 chnl_mgr_obj->open_channels = 0;
371 chnl_mgr_obj->output_mask = 0;
372 chnl_mgr_obj->last_output = 0;
373 chnl_mgr_obj->dev_obj = hdev_obj;
374 spin_lock_init(&chnl_mgr_obj->chnl_mgr_lock);
375 } else {
376 status = -ENOMEM;
377 }
378 } else {
379 status = -ENOMEM;
380 }
381
382 if (status) {
383 bridge_chnl_destroy(chnl_mgr_obj);
384 *channel_mgr = NULL;
385 } else {
386 /* Return channel manager object to caller... */
387 *channel_mgr = chnl_mgr_obj;
388 }
389 return status;
390 }
391
392 /*
393 * ======== bridge_chnl_destroy ========
394 * Purpose:
395 * Close all open channels, and destroy the channel manager.
396 */
bridge_chnl_destroy(struct chnl_mgr * hchnl_mgr)397 int bridge_chnl_destroy(struct chnl_mgr *hchnl_mgr)
398 {
399 int status = 0;
400 struct chnl_mgr *chnl_mgr_obj = hchnl_mgr;
401 u32 chnl_id;
402
403 if (hchnl_mgr) {
404 /* Close all open channels: */
405 for (chnl_id = 0; chnl_id < chnl_mgr_obj->max_channels;
406 chnl_id++) {
407 status =
408 bridge_chnl_close(chnl_mgr_obj->channels
409 [chnl_id]);
410 if (status)
411 dev_dbg(bridge, "%s: Error status 0x%x\n",
412 __func__, status);
413 }
414
415 /* Free channel manager object: */
416 kfree(chnl_mgr_obj->channels);
417
418 /* Set hchnl_mgr to NULL in device object. */
419 dev_set_chnl_mgr(chnl_mgr_obj->dev_obj, NULL);
420 /* Free this Chnl Mgr object: */
421 kfree(hchnl_mgr);
422 } else {
423 status = -EFAULT;
424 }
425 return status;
426 }
427
428 /*
429 * ======== bridge_chnl_flush_io ========
430 * purpose:
431 * Flushes all the outstanding data requests on a channel.
432 */
bridge_chnl_flush_io(struct chnl_object * chnl_obj,u32 timeout)433 int bridge_chnl_flush_io(struct chnl_object *chnl_obj, u32 timeout)
434 {
435 int status = 0;
436 struct chnl_object *pchnl = (struct chnl_object *)chnl_obj;
437 s8 chnl_mode = -1;
438 struct chnl_mgr *chnl_mgr_obj;
439 struct chnl_ioc chnl_ioc_obj;
440 /* Check args: */
441 if (pchnl) {
442 if ((timeout == CHNL_IOCNOWAIT)
443 && CHNL_IS_OUTPUT(pchnl->chnl_mode)) {
444 status = -EINVAL;
445 } else {
446 chnl_mode = pchnl->chnl_mode;
447 chnl_mgr_obj = pchnl->chnl_mgr_obj;
448 }
449 } else {
450 status = -EFAULT;
451 }
452 if (!status) {
453 /* Note: Currently, if another thread continues to add IO
454 * requests to this channel, this function will continue to
455 * flush all such queued IO requests. */
456 if (CHNL_IS_OUTPUT(chnl_mode)
457 && (pchnl->chnl_type == CHNL_PCPY)) {
458 /* Wait for IO completions, up to the specified
459 * timeout: */
460 while (!list_empty(&pchnl->io_requests) && !status) {
461 status = bridge_chnl_get_ioc(chnl_obj,
462 timeout, &chnl_ioc_obj);
463 if (status)
464 continue;
465
466 if (chnl_ioc_obj.status & CHNL_IOCSTATTIMEOUT)
467 status = -ETIMEDOUT;
468
469 }
470 } else {
471 status = bridge_chnl_cancel_io(chnl_obj);
472 /* Now, leave the channel in the ready state: */
473 pchnl->state &= ~CHNL_STATECANCEL;
474 }
475 }
476 return status;
477 }
478
479 /*
480 * ======== bridge_chnl_get_info ========
481 * Purpose:
482 * Retrieve information related to a channel.
483 */
bridge_chnl_get_info(struct chnl_object * chnl_obj,struct chnl_info * channel_info)484 int bridge_chnl_get_info(struct chnl_object *chnl_obj,
485 struct chnl_info *channel_info)
486 {
487 int status = 0;
488 struct chnl_object *pchnl = (struct chnl_object *)chnl_obj;
489 if (channel_info != NULL) {
490 if (pchnl) {
491 /* Return the requested information: */
492 channel_info->chnl_mgr = pchnl->chnl_mgr_obj;
493 channel_info->event_obj = pchnl->user_event;
494 channel_info->cnhl_id = pchnl->chnl_id;
495 channel_info->mode = pchnl->chnl_mode;
496 channel_info->bytes_tx = pchnl->bytes_moved;
497 channel_info->process = pchnl->process;
498 channel_info->sync_event = pchnl->sync_event;
499 channel_info->cio_cs = pchnl->cio_cs;
500 channel_info->cio_reqs = pchnl->cio_reqs;
501 channel_info->state = pchnl->state;
502 } else {
503 status = -EFAULT;
504 }
505 } else {
506 status = -EFAULT;
507 }
508 return status;
509 }
510
511 /*
512 * ======== bridge_chnl_get_ioc ========
513 * Optionally wait for I/O completion on a channel. Dequeue an I/O
514 * completion record, which contains information about the completed
515 * I/O request.
516 * Note: Ensures Channel Invariant (see notes above).
517 */
bridge_chnl_get_ioc(struct chnl_object * chnl_obj,u32 timeout,struct chnl_ioc * chan_ioc)518 int bridge_chnl_get_ioc(struct chnl_object *chnl_obj, u32 timeout,
519 struct chnl_ioc *chan_ioc)
520 {
521 int status = 0;
522 struct chnl_object *pchnl = (struct chnl_object *)chnl_obj;
523 struct chnl_irp *chnl_packet_obj;
524 int stat_sync;
525 bool dequeue_ioc = true;
526 struct chnl_ioc ioc = { NULL, 0, 0, 0, 0 };
527 u8 *host_sys_buf = NULL;
528 struct bridge_dev_context *dev_ctxt;
529 struct dev_object *dev_obj;
530
531 /* Check args: */
532 if (!chan_ioc || !pchnl) {
533 status = -EFAULT;
534 } else if (timeout == CHNL_IOCNOWAIT) {
535 if (list_empty(&pchnl->io_completions))
536 status = -EREMOTEIO;
537
538 }
539
540 dev_obj = dev_get_first();
541 dev_get_bridge_context(dev_obj, &dev_ctxt);
542 if (!dev_ctxt)
543 status = -EFAULT;
544
545 if (status)
546 goto func_end;
547
548 ioc.status = CHNL_IOCSTATCOMPLETE;
549 if (timeout !=
550 CHNL_IOCNOWAIT && list_empty(&pchnl->io_completions)) {
551 if (timeout == CHNL_IOCINFINITE)
552 timeout = SYNC_INFINITE;
553
554 stat_sync = sync_wait_on_event(pchnl->sync_event, timeout);
555 if (stat_sync == -ETIME) {
556 /* No response from DSP */
557 ioc.status |= CHNL_IOCSTATTIMEOUT;
558 dequeue_ioc = false;
559 } else if (stat_sync == -EPERM) {
560 /* This can occur when the user mode thread is
561 * aborted (^C), or when _VWIN32_WaitSingleObject()
562 * fails due to unknown causes. */
563 /* Even though Wait failed, there may be something in
564 * the Q: */
565 if (list_empty(&pchnl->io_completions)) {
566 ioc.status |= CHNL_IOCSTATCANCEL;
567 dequeue_ioc = false;
568 }
569 }
570 }
571 /* See comment in AddIOReq */
572 spin_lock_bh(&pchnl->chnl_mgr_obj->chnl_mgr_lock);
573 omap_mbox_disable_irq(dev_ctxt->mbox, IRQ_RX);
574 if (dequeue_ioc) {
575 /* Dequeue IOC and set chan_ioc; */
576 chnl_packet_obj = list_first_entry(&pchnl->io_completions,
577 struct chnl_irp, link);
578 list_del(&chnl_packet_obj->link);
579 /* Update chan_ioc from channel state and chirp: */
580 pchnl->cio_cs--;
581 /*
582 * If this is a zero-copy channel, then set IOC's pbuf
583 * to the DSP's address. This DSP address will get
584 * translated to user's virtual addr later.
585 */
586 host_sys_buf = chnl_packet_obj->host_sys_buf;
587 ioc.buf = chnl_packet_obj->host_user_buf;
588 ioc.byte_size = chnl_packet_obj->byte_size;
589 ioc.buf_size = chnl_packet_obj->buf_size;
590 ioc.arg = chnl_packet_obj->arg;
591 ioc.status |= chnl_packet_obj->status;
592 /* Place the used chirp on the free list: */
593 list_add_tail(&chnl_packet_obj->link,
594 &pchnl->free_packets_list);
595 } else {
596 ioc.buf = NULL;
597 ioc.byte_size = 0;
598 ioc.arg = 0;
599 ioc.buf_size = 0;
600 }
601 /* Ensure invariant: If any IOC's are queued for this channel... */
602 if (!list_empty(&pchnl->io_completions)) {
603 /* Since DSPStream_Reclaim() does not take a timeout
604 * parameter, we pass the stream's timeout value to
605 * bridge_chnl_get_ioc. We cannot determine whether or not
606 * we have waited in user mode. Since the stream's timeout
607 * value may be non-zero, we still have to set the event.
608 * Therefore, this optimization is taken out.
609 *
610 * if (timeout == CHNL_IOCNOWAIT) {
611 * ... ensure event is set..
612 * sync_set_event(pchnl->sync_event);
613 * } */
614 sync_set_event(pchnl->sync_event);
615 } else {
616 /* else, if list is empty, ensure event is reset. */
617 sync_reset_event(pchnl->sync_event);
618 }
619 omap_mbox_enable_irq(dev_ctxt->mbox, IRQ_RX);
620 spin_unlock_bh(&pchnl->chnl_mgr_obj->chnl_mgr_lock);
621 if (dequeue_ioc
622 && (pchnl->chnl_type == CHNL_PCPY && pchnl->chnl_id > 1)) {
623 if (!(ioc.buf < (void *)USERMODE_ADDR))
624 goto func_cont;
625
626 /* If the addr is in user mode, then copy it */
627 if (!host_sys_buf || !ioc.buf) {
628 status = -EFAULT;
629 goto func_cont;
630 }
631 if (!CHNL_IS_INPUT(pchnl->chnl_mode))
632 goto func_cont1;
633
634 /*host_user_buf */
635 status = copy_to_user(ioc.buf, host_sys_buf, ioc.byte_size);
636 if (status) {
637 if (current->flags & PF_EXITING)
638 status = 0;
639 }
640 if (status)
641 status = -EFAULT;
642 func_cont1:
643 kfree(host_sys_buf);
644 }
645 func_cont:
646 /* Update User's IOC block: */
647 *chan_ioc = ioc;
648 func_end:
649 return status;
650 }
651
652 /*
653 * ======== bridge_chnl_get_mgr_info ========
654 * Retrieve information related to the channel manager.
655 */
bridge_chnl_get_mgr_info(struct chnl_mgr * hchnl_mgr,u32 ch_id,struct chnl_mgrinfo * mgr_info)656 int bridge_chnl_get_mgr_info(struct chnl_mgr *hchnl_mgr, u32 ch_id,
657 struct chnl_mgrinfo *mgr_info)
658 {
659 struct chnl_mgr *chnl_mgr_obj = (struct chnl_mgr *)hchnl_mgr;
660
661 if (!mgr_info || !hchnl_mgr)
662 return -EFAULT;
663
664 if (ch_id > CHNL_MAXCHANNELS)
665 return -ECHRNG;
666
667 /* Return the requested information: */
668 mgr_info->chnl_obj = chnl_mgr_obj->channels[ch_id];
669 mgr_info->open_channels = chnl_mgr_obj->open_channels;
670 mgr_info->type = chnl_mgr_obj->type;
671 /* total # of chnls */
672 mgr_info->max_channels = chnl_mgr_obj->max_channels;
673
674 return 0;
675 }
676
677 /*
678 * ======== bridge_chnl_idle ========
679 * Idles a particular channel.
680 */
bridge_chnl_idle(struct chnl_object * chnl_obj,u32 timeout,bool flush_data)681 int bridge_chnl_idle(struct chnl_object *chnl_obj, u32 timeout,
682 bool flush_data)
683 {
684 s8 chnl_mode;
685 struct chnl_mgr *chnl_mgr_obj;
686 int status = 0;
687
688 chnl_mode = chnl_obj->chnl_mode;
689 chnl_mgr_obj = chnl_obj->chnl_mgr_obj;
690
691 if (CHNL_IS_OUTPUT(chnl_mode) && !flush_data) {
692 /* Wait for IO completions, up to the specified timeout: */
693 status = bridge_chnl_flush_io(chnl_obj, timeout);
694 } else {
695 status = bridge_chnl_cancel_io(chnl_obj);
696
697 /* Reset the byte count and put channel back in ready state. */
698 chnl_obj->bytes_moved = 0;
699 chnl_obj->state &= ~CHNL_STATECANCEL;
700 }
701
702 return status;
703 }
704
705 /*
706 * ======== bridge_chnl_open ========
707 * Open a new half-duplex channel to the DSP board.
708 */
bridge_chnl_open(struct chnl_object ** chnl,struct chnl_mgr * hchnl_mgr,s8 chnl_mode,u32 ch_id,const struct chnl_attr * pattrs)709 int bridge_chnl_open(struct chnl_object **chnl,
710 struct chnl_mgr *hchnl_mgr, s8 chnl_mode,
711 u32 ch_id, const struct chnl_attr *pattrs)
712 {
713 int status = 0;
714 struct chnl_mgr *chnl_mgr_obj = hchnl_mgr;
715 struct chnl_object *pchnl = NULL;
716 struct sync_object *sync_event = NULL;
717
718 *chnl = NULL;
719
720 /* Validate Args: */
721 if (!pattrs->uio_reqs)
722 return -EINVAL;
723
724 if (!hchnl_mgr)
725 return -EFAULT;
726
727 if (ch_id != CHNL_PICKFREE) {
728 if (ch_id >= chnl_mgr_obj->max_channels)
729 return -ECHRNG;
730 if (chnl_mgr_obj->channels[ch_id] != NULL)
731 return -EALREADY;
732 } else {
733 /* Check for free channel */
734 status = search_free_channel(chnl_mgr_obj, &ch_id);
735 if (status)
736 return status;
737 }
738
739
740 /* Create channel object: */
741 pchnl = kzalloc(sizeof(struct chnl_object), GFP_KERNEL);
742 if (!pchnl)
743 return -ENOMEM;
744
745 /* Protect queues from io_dpc: */
746 pchnl->state = CHNL_STATECANCEL;
747
748 /* Allocate initial IOR and IOC queues: */
749 status = create_chirp_list(&pchnl->free_packets_list,
750 pattrs->uio_reqs);
751 if (status)
752 goto out_err;
753
754 INIT_LIST_HEAD(&pchnl->io_requests);
755 INIT_LIST_HEAD(&pchnl->io_completions);
756
757 pchnl->chnl_packets = pattrs->uio_reqs;
758 pchnl->cio_cs = 0;
759 pchnl->cio_reqs = 0;
760
761 sync_event = kzalloc(sizeof(struct sync_object), GFP_KERNEL);
762 if (!sync_event) {
763 status = -ENOMEM;
764 goto out_err;
765 }
766 sync_init_event(sync_event);
767
768 pchnl->ntfy_obj = kmalloc(sizeof(struct ntfy_object), GFP_KERNEL);
769 if (!pchnl->ntfy_obj) {
770 status = -ENOMEM;
771 goto out_err;
772 }
773 ntfy_init(pchnl->ntfy_obj);
774
775 /* Initialize CHNL object fields: */
776 pchnl->chnl_mgr_obj = chnl_mgr_obj;
777 pchnl->chnl_id = ch_id;
778 pchnl->chnl_mode = chnl_mode;
779 pchnl->user_event = sync_event;
780 pchnl->sync_event = sync_event;
781 /* Get the process handle */
782 pchnl->process = current->tgid;
783 pchnl->cb_arg = 0;
784 pchnl->bytes_moved = 0;
785 /* Default to proc-copy */
786 pchnl->chnl_type = CHNL_PCPY;
787
788 /* Insert channel object in channel manager: */
789 chnl_mgr_obj->channels[pchnl->chnl_id] = pchnl;
790 spin_lock_bh(&chnl_mgr_obj->chnl_mgr_lock);
791 chnl_mgr_obj->open_channels++;
792 spin_unlock_bh(&chnl_mgr_obj->chnl_mgr_lock);
793 /* Return result... */
794 pchnl->state = CHNL_STATEREADY;
795 *chnl = pchnl;
796
797 return status;
798
799 out_err:
800 /* Free memory */
801 free_chirp_list(&pchnl->io_completions);
802 free_chirp_list(&pchnl->io_requests);
803 free_chirp_list(&pchnl->free_packets_list);
804
805 kfree(sync_event);
806
807 if (pchnl->ntfy_obj) {
808 ntfy_delete(pchnl->ntfy_obj);
809 kfree(pchnl->ntfy_obj);
810 pchnl->ntfy_obj = NULL;
811 }
812 kfree(pchnl);
813
814 return status;
815 }
816
817 /*
818 * ======== bridge_chnl_register_notify ========
819 * Registers for events on a particular channel.
820 */
bridge_chnl_register_notify(struct chnl_object * chnl_obj,u32 event_mask,u32 notify_type,struct dsp_notification * hnotification)821 int bridge_chnl_register_notify(struct chnl_object *chnl_obj,
822 u32 event_mask, u32 notify_type,
823 struct dsp_notification *hnotification)
824 {
825 int status = 0;
826
827
828 if (event_mask)
829 status = ntfy_register(chnl_obj->ntfy_obj, hnotification,
830 event_mask, notify_type);
831 else
832 status = ntfy_unregister(chnl_obj->ntfy_obj, hnotification);
833
834 return status;
835 }
836
837 /*
838 * ======== create_chirp_list ========
839 * Purpose:
840 * Initialize a queue of channel I/O Request/Completion packets.
841 * Parameters:
842 * list: Pointer to a list_head
843 * chirps: Number of Chirps to allocate.
844 * Returns:
845 * 0 if successful, error code otherwise.
846 * Requires:
847 * Ensures:
848 */
create_chirp_list(struct list_head * list,u32 chirps)849 static int create_chirp_list(struct list_head *list, u32 chirps)
850 {
851 struct chnl_irp *chirp;
852 u32 i;
853
854 INIT_LIST_HEAD(list);
855
856 /* Make N chirps and place on queue. */
857 for (i = 0; i < chirps; i++) {
858 chirp = kzalloc(sizeof(struct chnl_irp), GFP_KERNEL);
859 if (!chirp)
860 break;
861 list_add_tail(&chirp->link, list);
862 }
863
864 /* If we couldn't allocate all chirps, free those allocated: */
865 if (i != chirps) {
866 free_chirp_list(list);
867 return -ENOMEM;
868 }
869
870 return 0;
871 }
872
873 /*
874 * ======== free_chirp_list ========
875 * Purpose:
876 * Free the queue of Chirps.
877 */
free_chirp_list(struct list_head * chirp_list)878 static void free_chirp_list(struct list_head *chirp_list)
879 {
880 struct chnl_irp *chirp, *tmp;
881
882 list_for_each_entry_safe(chirp, tmp, chirp_list, link) {
883 list_del(&chirp->link);
884 kfree(chirp);
885 }
886 }
887
888 /*
889 * ======== search_free_channel ========
890 * Search for a free channel slot in the array of channel pointers.
891 */
search_free_channel(struct chnl_mgr * chnl_mgr_obj,u32 * chnl)892 static int search_free_channel(struct chnl_mgr *chnl_mgr_obj,
893 u32 *chnl)
894 {
895 int status = -ENOSR;
896 u32 i;
897
898 for (i = 0; i < chnl_mgr_obj->max_channels; i++) {
899 if (chnl_mgr_obj->channels[i] == NULL) {
900 status = 0;
901 *chnl = i;
902 break;
903 }
904 }
905
906 return status;
907 }
908