Lines Matching refs:host
25 void tmio_mmc_enable_dma(struct tmio_mmc_host *host, bool enable) in tmio_mmc_enable_dma() argument
27 if (!host->chan_tx || !host->chan_rx) in tmio_mmc_enable_dma()
30 if (host->dma->enable) in tmio_mmc_enable_dma()
31 host->dma->enable(host, enable); in tmio_mmc_enable_dma()
34 void tmio_mmc_abort_dma(struct tmio_mmc_host *host) in tmio_mmc_abort_dma() argument
36 tmio_mmc_enable_dma(host, false); in tmio_mmc_abort_dma()
38 if (host->chan_rx) in tmio_mmc_abort_dma()
39 dmaengine_terminate_all(host->chan_rx); in tmio_mmc_abort_dma()
40 if (host->chan_tx) in tmio_mmc_abort_dma()
41 dmaengine_terminate_all(host->chan_tx); in tmio_mmc_abort_dma()
43 tmio_mmc_enable_dma(host, true); in tmio_mmc_abort_dma()
46 static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host) in tmio_mmc_start_dma_rx() argument
48 struct scatterlist *sg = host->sg_ptr, *sg_tmp; in tmio_mmc_start_dma_rx()
50 struct dma_chan *chan = host->chan_rx; in tmio_mmc_start_dma_rx()
54 unsigned int align = (1 << host->pdata->alignment_shift) - 1; in tmio_mmc_start_dma_rx()
56 for_each_sg(sg, sg_tmp, host->sg_len, i) { in tmio_mmc_start_dma_rx()
65 if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_SIZE || in tmio_mmc_start_dma_rx()
72 host->force_pio = true; in tmio_mmc_start_dma_rx()
76 tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_RXRDY); in tmio_mmc_start_dma_rx()
80 sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length); in tmio_mmc_start_dma_rx()
81 host->sg_ptr = &host->bounce_sg; in tmio_mmc_start_dma_rx()
82 sg = host->sg_ptr; in tmio_mmc_start_dma_rx()
85 ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_FROM_DEVICE); in tmio_mmc_start_dma_rx()
100 tmio_mmc_enable_dma(host, false); in tmio_mmc_start_dma_rx()
103 host->chan_rx = NULL; in tmio_mmc_start_dma_rx()
106 chan = host->chan_tx; in tmio_mmc_start_dma_rx()
108 host->chan_tx = NULL; in tmio_mmc_start_dma_rx()
111 dev_warn(&host->pdev->dev, in tmio_mmc_start_dma_rx()
116 static void tmio_mmc_start_dma_tx(struct tmio_mmc_host *host) in tmio_mmc_start_dma_tx() argument
118 struct scatterlist *sg = host->sg_ptr, *sg_tmp; in tmio_mmc_start_dma_tx()
120 struct dma_chan *chan = host->chan_tx; in tmio_mmc_start_dma_tx()
124 unsigned int align = (1 << host->pdata->alignment_shift) - 1; in tmio_mmc_start_dma_tx()
126 for_each_sg(sg, sg_tmp, host->sg_len, i) { in tmio_mmc_start_dma_tx()
135 if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_SIZE || in tmio_mmc_start_dma_tx()
142 host->force_pio = true; in tmio_mmc_start_dma_tx()
146 tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_TXRQ); in tmio_mmc_start_dma_tx()
152 sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length); in tmio_mmc_start_dma_tx()
153 memcpy(host->bounce_buf, sg_vaddr, host->bounce_sg.length); in tmio_mmc_start_dma_tx()
155 host->sg_ptr = &host->bounce_sg; in tmio_mmc_start_dma_tx()
156 sg = host->sg_ptr; in tmio_mmc_start_dma_tx()
159 ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_TO_DEVICE); in tmio_mmc_start_dma_tx()
174 tmio_mmc_enable_dma(host, false); in tmio_mmc_start_dma_tx()
177 host->chan_tx = NULL; in tmio_mmc_start_dma_tx()
180 chan = host->chan_rx; in tmio_mmc_start_dma_tx()
182 host->chan_rx = NULL; in tmio_mmc_start_dma_tx()
185 dev_warn(&host->pdev->dev, in tmio_mmc_start_dma_tx()
190 void tmio_mmc_start_dma(struct tmio_mmc_host *host, in tmio_mmc_start_dma() argument
194 if (host->chan_rx) in tmio_mmc_start_dma()
195 tmio_mmc_start_dma_rx(host); in tmio_mmc_start_dma()
197 if (host->chan_tx) in tmio_mmc_start_dma()
198 tmio_mmc_start_dma_tx(host); in tmio_mmc_start_dma()
204 struct tmio_mmc_host *host = (struct tmio_mmc_host *)priv; in tmio_mmc_issue_tasklet_fn() local
207 spin_lock_irq(&host->lock); in tmio_mmc_issue_tasklet_fn()
209 if (host && host->data) { in tmio_mmc_issue_tasklet_fn()
210 if (host->data->flags & MMC_DATA_READ) in tmio_mmc_issue_tasklet_fn()
211 chan = host->chan_rx; in tmio_mmc_issue_tasklet_fn()
213 chan = host->chan_tx; in tmio_mmc_issue_tasklet_fn()
216 spin_unlock_irq(&host->lock); in tmio_mmc_issue_tasklet_fn()
218 tmio_mmc_enable_mmc_irqs(host, TMIO_STAT_DATAEND); in tmio_mmc_issue_tasklet_fn()
226 struct tmio_mmc_host *host = (struct tmio_mmc_host *)arg; in tmio_mmc_tasklet_fn() local
228 spin_lock_irq(&host->lock); in tmio_mmc_tasklet_fn()
230 if (!host->data) in tmio_mmc_tasklet_fn()
233 if (host->data->flags & MMC_DATA_READ) in tmio_mmc_tasklet_fn()
234 dma_unmap_sg(host->chan_rx->device->dev, in tmio_mmc_tasklet_fn()
235 host->sg_ptr, host->sg_len, in tmio_mmc_tasklet_fn()
238 dma_unmap_sg(host->chan_tx->device->dev, in tmio_mmc_tasklet_fn()
239 host->sg_ptr, host->sg_len, in tmio_mmc_tasklet_fn()
242 tmio_mmc_do_data_irq(host); in tmio_mmc_tasklet_fn()
244 spin_unlock_irq(&host->lock); in tmio_mmc_tasklet_fn()
247 void tmio_mmc_request_dma(struct tmio_mmc_host *host, struct tmio_mmc_data *pdata) in tmio_mmc_request_dma() argument
250 if (!host->dma || (!host->pdev->dev.of_node && in tmio_mmc_request_dma()
254 if (!host->chan_tx && !host->chan_rx) { in tmio_mmc_request_dma()
255 struct resource *res = platform_get_resource(host->pdev, in tmio_mmc_request_dma()
267 host->chan_tx = dma_request_slave_channel_compat(mask, in tmio_mmc_request_dma()
268 host->dma->filter, pdata->chan_priv_tx, in tmio_mmc_request_dma()
269 &host->pdev->dev, "tx"); in tmio_mmc_request_dma()
270 dev_dbg(&host->pdev->dev, "%s: TX: got channel %p\n", __func__, in tmio_mmc_request_dma()
271 host->chan_tx); in tmio_mmc_request_dma()
273 if (!host->chan_tx) in tmio_mmc_request_dma()
277 cfg.dst_addr = res->start + (CTL_SD_DATA_PORT << host->bus_shift); in tmio_mmc_request_dma()
278 cfg.dst_addr_width = host->dma->dma_buswidth; in tmio_mmc_request_dma()
282 ret = dmaengine_slave_config(host->chan_tx, &cfg); in tmio_mmc_request_dma()
286 host->chan_rx = dma_request_slave_channel_compat(mask, in tmio_mmc_request_dma()
287 host->dma->filter, pdata->chan_priv_rx, in tmio_mmc_request_dma()
288 &host->pdev->dev, "rx"); in tmio_mmc_request_dma()
289 dev_dbg(&host->pdev->dev, "%s: RX: got channel %p\n", __func__, in tmio_mmc_request_dma()
290 host->chan_rx); in tmio_mmc_request_dma()
292 if (!host->chan_rx) in tmio_mmc_request_dma()
296 cfg.src_addr = cfg.dst_addr + host->pdata->dma_rx_offset; in tmio_mmc_request_dma()
297 cfg.src_addr_width = host->dma->dma_buswidth; in tmio_mmc_request_dma()
301 ret = dmaengine_slave_config(host->chan_rx, &cfg); in tmio_mmc_request_dma()
305 host->bounce_buf = (u8 *)__get_free_page(GFP_KERNEL | GFP_DMA); in tmio_mmc_request_dma()
306 if (!host->bounce_buf) in tmio_mmc_request_dma()
309 tasklet_init(&host->dma_complete, tmio_mmc_tasklet_fn, (unsigned long)host); in tmio_mmc_request_dma()
310 tasklet_init(&host->dma_issue, tmio_mmc_issue_tasklet_fn, (unsigned long)host); in tmio_mmc_request_dma()
313 tmio_mmc_enable_dma(host, true); in tmio_mmc_request_dma()
319 dma_release_channel(host->chan_rx); in tmio_mmc_request_dma()
320 host->chan_rx = NULL; in tmio_mmc_request_dma()
323 dma_release_channel(host->chan_tx); in tmio_mmc_request_dma()
324 host->chan_tx = NULL; in tmio_mmc_request_dma()
327 void tmio_mmc_release_dma(struct tmio_mmc_host *host) in tmio_mmc_release_dma() argument
329 if (host->chan_tx) { in tmio_mmc_release_dma()
330 struct dma_chan *chan = host->chan_tx; in tmio_mmc_release_dma()
331 host->chan_tx = NULL; in tmio_mmc_release_dma()
334 if (host->chan_rx) { in tmio_mmc_release_dma()
335 struct dma_chan *chan = host->chan_rx; in tmio_mmc_release_dma()
336 host->chan_rx = NULL; in tmio_mmc_release_dma()
339 if (host->bounce_buf) { in tmio_mmc_release_dma()
340 free_pages((unsigned long)host->bounce_buf, 0); in tmio_mmc_release_dma()
341 host->bounce_buf = NULL; in tmio_mmc_release_dma()