Lines Matching refs:ch
141 static void tegra_dma_update_hw(struct tegra_dma_channel *ch,
143 static void tegra_dma_update_hw_partial(struct tegra_dma_channel *ch,
145 static void tegra_dma_stop(struct tegra_dma_channel *ch);
147 void tegra_dma_flush(struct tegra_dma_channel *ch) in tegra_dma_flush() argument
152 void tegra_dma_dequeue(struct tegra_dma_channel *ch) in tegra_dma_dequeue() argument
156 if (tegra_dma_is_empty(ch)) in tegra_dma_dequeue()
159 req = list_entry(ch->list.next, typeof(*req), node); in tegra_dma_dequeue()
161 tegra_dma_dequeue_req(ch, req); in tegra_dma_dequeue()
165 static void tegra_dma_stop(struct tegra_dma_channel *ch) in tegra_dma_stop() argument
170 csr = readl(ch->addr + APB_DMA_CHAN_CSR); in tegra_dma_stop()
172 writel(csr, ch->addr + APB_DMA_CHAN_CSR); in tegra_dma_stop()
175 writel(csr, ch->addr + APB_DMA_CHAN_CSR); in tegra_dma_stop()
177 status = readl(ch->addr + APB_DMA_CHAN_STA); in tegra_dma_stop()
179 writel(status, ch->addr + APB_DMA_CHAN_STA); in tegra_dma_stop()
182 static int tegra_dma_cancel(struct tegra_dma_channel *ch) in tegra_dma_cancel() argument
186 spin_lock_irqsave(&ch->lock, irq_flags); in tegra_dma_cancel()
187 while (!list_empty(&ch->list)) in tegra_dma_cancel()
188 list_del(ch->list.next); in tegra_dma_cancel()
190 tegra_dma_stop(ch); in tegra_dma_cancel()
192 spin_unlock_irqrestore(&ch->lock, irq_flags); in tegra_dma_cancel()
196 static unsigned int get_channel_status(struct tegra_dma_channel *ch, in get_channel_status() argument
215 status = readl(ch->addr + APB_DMA_CHAN_STA); in get_channel_status()
216 tegra_dma_stop(ch); in get_channel_status()
221 writel(status, ch->addr + APB_DMA_CHAN_STA); in get_channel_status()
225 status = readl(ch->addr + APB_DMA_CHAN_STA); in get_channel_status()
231 static unsigned int dma_active_count(struct tegra_dma_channel *ch, in dma_active_count() argument
239 req_transfer_count = ch->req_transfer_count + 1; in dma_active_count()
248 if (ch->mode & TEGRA_DMA_MODE_CONTINOUS) { in dma_active_count()
258 int tegra_dma_dequeue_req(struct tegra_dma_channel *ch, in tegra_dma_dequeue_req() argument
267 spin_lock_irqsave(&ch->lock, irq_flags); in tegra_dma_dequeue_req()
269 if (list_entry(ch->list.next, struct tegra_dma_req, node) == _req) in tegra_dma_dequeue_req()
272 list_for_each_entry(req, &ch->list, node) { in tegra_dma_dequeue_req()
280 spin_unlock_irqrestore(&ch->lock, irq_flags); in tegra_dma_dequeue_req()
287 status = get_channel_status(ch, req, true); in tegra_dma_dequeue_req()
288 req->bytes_transferred = dma_active_count(ch, req, status); in tegra_dma_dequeue_req()
290 if (!list_empty(&ch->list)) { in tegra_dma_dequeue_req()
293 next_req = list_entry(ch->list.next, in tegra_dma_dequeue_req()
295 tegra_dma_update_hw(ch, next_req); in tegra_dma_dequeue_req()
301 spin_unlock_irqrestore(&ch->lock, irq_flags); in tegra_dma_dequeue_req()
309 bool tegra_dma_is_empty(struct tegra_dma_channel *ch) in tegra_dma_is_empty() argument
314 spin_lock_irqsave(&ch->lock, irq_flags); in tegra_dma_is_empty()
315 if (list_empty(&ch->list)) in tegra_dma_is_empty()
319 spin_unlock_irqrestore(&ch->lock, irq_flags); in tegra_dma_is_empty()
324 bool tegra_dma_is_req_inflight(struct tegra_dma_channel *ch, in tegra_dma_is_req_inflight() argument
330 spin_lock_irqsave(&ch->lock, irq_flags); in tegra_dma_is_req_inflight()
331 list_for_each_entry(req, &ch->list, node) { in tegra_dma_is_req_inflight()
333 spin_unlock_irqrestore(&ch->lock, irq_flags); in tegra_dma_is_req_inflight()
337 spin_unlock_irqrestore(&ch->lock, irq_flags); in tegra_dma_is_req_inflight()
342 int tegra_dma_enqueue_req(struct tegra_dma_channel *ch, in tegra_dma_enqueue_req() argument
351 pr_err("Invalid DMA request for channel %d\n", ch->id); in tegra_dma_enqueue_req()
355 spin_lock_irqsave(&ch->lock, irq_flags); in tegra_dma_enqueue_req()
357 list_for_each_entry(_req, &ch->list, node) { in tegra_dma_enqueue_req()
359 spin_unlock_irqrestore(&ch->lock, irq_flags); in tegra_dma_enqueue_req()
367 if (list_empty(&ch->list)) in tegra_dma_enqueue_req()
370 list_add_tail(&req->node, &ch->list); in tegra_dma_enqueue_req()
373 tegra_dma_update_hw(ch, req); in tegra_dma_enqueue_req()
375 spin_unlock_irqrestore(&ch->lock, irq_flags); in tegra_dma_enqueue_req()
384 struct tegra_dma_channel *ch = NULL; in tegra_dma_allocate_channel() local
401 ch = &dma_channels[channel]; in tegra_dma_allocate_channel()
402 ch->mode = mode; in tegra_dma_allocate_channel()
406 return ch; in tegra_dma_allocate_channel()
410 void tegra_dma_free_channel(struct tegra_dma_channel *ch) in tegra_dma_free_channel() argument
412 if (ch->mode & TEGRA_DMA_SHARED) in tegra_dma_free_channel()
414 tegra_dma_cancel(ch); in tegra_dma_free_channel()
416 __clear_bit(ch->id, channel_usage); in tegra_dma_free_channel()
421 static void tegra_dma_update_hw_partial(struct tegra_dma_channel *ch, in tegra_dma_update_hw_partial() argument
434 writel(apb_ptr, ch->addr + APB_DMA_CHAN_APB_PTR); in tegra_dma_update_hw_partial()
435 writel(ahb_ptr, ch->addr + APB_DMA_CHAN_AHB_PTR); in tegra_dma_update_hw_partial()
441 static void tegra_dma_update_hw(struct tegra_dma_channel *ch, in tegra_dma_update_hw() argument
465 if (ch->mode & TEGRA_DMA_MODE_ONESHOT) { in tegra_dma_update_hw()
467 ch->req_transfer_count = (req->size >> 2) - 1; in tegra_dma_update_hw()
474 ch->req_transfer_count = (req->size >> 3) - 1; in tegra_dma_update_hw()
477 csr |= ch->req_transfer_count << CSR_WCOUNT_SHIFT; in tegra_dma_update_hw()
536 writel(csr, ch->addr + APB_DMA_CHAN_CSR); in tegra_dma_update_hw()
537 writel(apb_seq, ch->addr + APB_DMA_CHAN_APB_SEQ); in tegra_dma_update_hw()
538 writel(apb_ptr, ch->addr + APB_DMA_CHAN_APB_PTR); in tegra_dma_update_hw()
539 writel(ahb_seq, ch->addr + APB_DMA_CHAN_AHB_SEQ); in tegra_dma_update_hw()
540 writel(ahb_ptr, ch->addr + APB_DMA_CHAN_AHB_PTR); in tegra_dma_update_hw()
543 writel(csr, ch->addr + APB_DMA_CHAN_CSR); in tegra_dma_update_hw()
548 static void handle_oneshot_dma(struct tegra_dma_channel *ch) in handle_oneshot_dma() argument
553 spin_lock_irqsave(&ch->lock, irq_flags); in handle_oneshot_dma()
554 if (list_empty(&ch->list)) { in handle_oneshot_dma()
555 spin_unlock_irqrestore(&ch->lock, irq_flags); in handle_oneshot_dma()
559 req = list_entry(ch->list.next, typeof(*req), node); in handle_oneshot_dma()
563 bytes_transferred = ch->req_transfer_count; in handle_oneshot_dma()
571 spin_unlock_irqrestore(&ch->lock, irq_flags); in handle_oneshot_dma()
576 spin_lock_irqsave(&ch->lock, irq_flags); in handle_oneshot_dma()
579 if (!list_empty(&ch->list)) { in handle_oneshot_dma()
580 req = list_entry(ch->list.next, typeof(*req), node); in handle_oneshot_dma()
584 tegra_dma_update_hw(ch, req); in handle_oneshot_dma()
586 spin_unlock_irqrestore(&ch->lock, irq_flags); in handle_oneshot_dma()
589 static void handle_continuous_dma(struct tegra_dma_channel *ch) in handle_continuous_dma() argument
594 spin_lock_irqsave(&ch->lock, irq_flags); in handle_continuous_dma()
595 if (list_empty(&ch->list)) { in handle_continuous_dma()
596 spin_unlock_irqrestore(&ch->lock, irq_flags); in handle_continuous_dma()
600 req = list_entry(ch->list.next, typeof(*req), node); in handle_continuous_dma()
604 is_dma_ping_complete = (readl(ch->addr + APB_DMA_CHAN_STA) in handle_continuous_dma()
612 bytes_transferred = ch->req_transfer_count; in handle_continuous_dma()
618 tegra_dma_stop(ch); in handle_continuous_dma()
620 if (!list_is_last(&req->node, &ch->list)) { in handle_continuous_dma()
625 tegra_dma_update_hw(ch, next_req); in handle_continuous_dma()
631 spin_unlock_irqrestore(&ch->lock, irq_flags); in handle_continuous_dma()
637 if (!list_is_last(&req->node, &ch->list)) { in handle_continuous_dma()
642 tegra_dma_update_hw_partial(ch, next_req); in handle_continuous_dma()
647 spin_unlock_irqrestore(&ch->lock, irq_flags); in handle_continuous_dma()
658 bytes_transferred = ch->req_transfer_count; in handle_continuous_dma()
668 spin_unlock_irqrestore(&ch->lock, irq_flags); in handle_continuous_dma()
676 spin_unlock_irqrestore(&ch->lock, irq_flags); in handle_continuous_dma()
681 struct tegra_dma_channel *ch = data; in dma_isr() local
684 status = readl(ch->addr + APB_DMA_CHAN_STA); in dma_isr()
686 writel(status, ch->addr + APB_DMA_CHAN_STA); in dma_isr()
688 pr_warning("Got a spurious ISR for DMA channel %d\n", ch->id); in dma_isr()
696 struct tegra_dma_channel *ch = data; in dma_thread_fn() local
698 if (ch->mode & TEGRA_DMA_MODE_ONESHOT) in dma_thread_fn()
699 handle_oneshot_dma(ch); in dma_thread_fn()
701 handle_continuous_dma(ch); in dma_thread_fn()
736 struct tegra_dma_channel *ch = &dma_channels[i]; in tegra_dma_init() local
738 ch->id = i; in tegra_dma_init()
739 snprintf(ch->name, TEGRA_DMA_NAME_SIZE, "dma_channel_%d", i); in tegra_dma_init()
741 ch->addr = IO_ADDRESS(TEGRA_APB_DMA_CH0_BASE + in tegra_dma_init()
744 spin_lock_init(&ch->lock); in tegra_dma_init()
745 INIT_LIST_HEAD(&ch->list); in tegra_dma_init()
749 dma_channels[i].name, ch); in tegra_dma_init()
755 ch->irq = irq; in tegra_dma_init()
768 struct tegra_dma_channel *ch = &dma_channels[i]; in tegra_dma_init() local
769 if (ch->irq) in tegra_dma_init()
770 free_irq(ch->irq, ch); in tegra_dma_init()