Lines Matching +full:mmci +full:- +full:gpio +full:- +full:supply
2 * linux/drivers/mmc/host/mmci.c - ARM PrimeCell MMCI PL180/1 driver
5 * Copyright (C) 2010 ST-Ericsson SA
27 #include <linux/mmc/slot-gpio.h>
31 #include <linux/gpio.h>
35 #include <linux/dma-mapping.h>
36 #include <linux/amba/mmci.h>
44 #include "mmci.h"
47 #define DRIVER_NAME "mmci-pl18x"
221 spin_lock_irqsave(&host->lock, flags); in mmci_card_busy()
222 if (readl(host->base + MMCISTATUS) & host->variant->busy_detect_flag) in mmci_card_busy()
224 spin_unlock_irqrestore(&host->lock, flags); in mmci_card_busy()
238 if (!is_power_of_2(data->blksz)) { in mmci_validate_data()
239 dev_err(mmc_dev(host->mmc), in mmci_validate_data()
240 "unsupported block size (%d bytes)\n", data->blksz); in mmci_validate_data()
241 return -EINVAL; in mmci_validate_data()
256 if (host->cclk < 25000000) in mmci_reg_delay()
263 * This must be called with host->lock held
267 if (host->clk_reg != clk) { in mmci_write_clkreg()
268 host->clk_reg = clk; in mmci_write_clkreg()
269 writel(clk, host->base + MMCICLOCK); in mmci_write_clkreg()
274 * This must be called with host->lock held
278 if (host->pwr_reg != pwr) { in mmci_write_pwrreg()
279 host->pwr_reg = pwr; in mmci_write_pwrreg()
280 writel(pwr, host->base + MMCIPOWER); in mmci_write_pwrreg()
285 * This must be called with host->lock held
290 datactrl |= host->datactrl_reg & host->variant->busy_dpsm_flag; in mmci_write_datactrlreg()
292 if (host->datactrl_reg != datactrl) { in mmci_write_datactrlreg()
293 host->datactrl_reg = datactrl; in mmci_write_datactrlreg()
294 writel(datactrl, host->base + MMCIDATACTRL); in mmci_write_datactrlreg()
299 * This must be called with host->lock held
303 struct variant_data *variant = host->variant; in mmci_set_clkreg()
304 u32 clk = variant->clkreg; in mmci_set_clkreg()
307 host->cclk = 0; in mmci_set_clkreg()
310 if (variant->explicit_mclk_control) { in mmci_set_clkreg()
311 host->cclk = host->mclk; in mmci_set_clkreg()
312 } else if (desired >= host->mclk) { in mmci_set_clkreg()
314 if (variant->st_clkdiv) in mmci_set_clkreg()
316 host->cclk = host->mclk; in mmci_set_clkreg()
317 } else if (variant->st_clkdiv) { in mmci_set_clkreg()
320 * => clkdiv = (mclk / f) - 2 in mmci_set_clkreg()
324 clk = DIV_ROUND_UP(host->mclk, desired) - 2; in mmci_set_clkreg()
327 host->cclk = host->mclk / (clk + 2); in mmci_set_clkreg()
331 * => clkdiv = mclk / (2 * f) - 1 in mmci_set_clkreg()
333 clk = host->mclk / (2 * desired) - 1; in mmci_set_clkreg()
336 host->cclk = host->mclk / (2 * (clk + 1)); in mmci_set_clkreg()
339 clk |= variant->clkreg_enable; in mmci_set_clkreg()
346 host->mmc->actual_clock = host->cclk; in mmci_set_clkreg()
348 if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4) in mmci_set_clkreg()
350 if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_8) in mmci_set_clkreg()
351 clk |= variant->clkreg_8bit_bus_enable; in mmci_set_clkreg()
353 if (host->mmc->ios.timing == MMC_TIMING_UHS_DDR50 || in mmci_set_clkreg()
354 host->mmc->ios.timing == MMC_TIMING_MMC_DDR52) in mmci_set_clkreg()
355 clk |= variant->clkreg_neg_edge_enable; in mmci_set_clkreg()
363 writel(0, host->base + MMCICOMMAND); in mmci_request_end()
365 BUG_ON(host->data); in mmci_request_end()
367 host->mrq = NULL; in mmci_request_end()
368 host->cmd = NULL; in mmci_request_end()
370 mmc_request_done(host->mmc, mrq); in mmci_request_end()
375 void __iomem *base = host->base; in mmci_set_mask1()
376 struct variant_data *variant = host->variant; in mmci_set_mask1()
378 if (host->singleirq) { in mmci_set_mask1()
387 if (variant->mmcimask1) in mmci_set_mask1()
390 host->mask1_reg = mask; in mmci_set_mask1()
397 host->data = NULL; in mmci_stop_data()
404 if (data->flags & MMC_DATA_READ) in mmci_init_sg()
409 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags); in mmci_init_sg()
422 host->dma_rx_channel = dma_request_slave_channel(mmc_dev(host->mmc), "rx"); in mmci_dma_setup()
423 host->dma_tx_channel = dma_request_slave_channel(mmc_dev(host->mmc), "tx"); in mmci_dma_setup()
426 host->next_data.cookie = 1; in mmci_dma_setup()
433 if (host->dma_rx_channel && !host->dma_tx_channel) in mmci_dma_setup()
434 host->dma_tx_channel = host->dma_rx_channel; in mmci_dma_setup()
436 if (host->dma_rx_channel) in mmci_dma_setup()
437 rxname = dma_chan_name(host->dma_rx_channel); in mmci_dma_setup()
441 if (host->dma_tx_channel) in mmci_dma_setup()
442 txname = dma_chan_name(host->dma_tx_channel); in mmci_dma_setup()
446 dev_info(mmc_dev(host->mmc), "DMA channels RX %s, TX %s\n", in mmci_dma_setup()
453 if (host->dma_tx_channel) { in mmci_dma_setup()
454 struct device *dev = host->dma_tx_channel->device->dev; in mmci_dma_setup()
457 if (max_seg_size < host->mmc->max_seg_size) in mmci_dma_setup()
458 host->mmc->max_seg_size = max_seg_size; in mmci_dma_setup()
460 if (host->dma_rx_channel) { in mmci_dma_setup()
461 struct device *dev = host->dma_rx_channel->device->dev; in mmci_dma_setup()
464 if (max_seg_size < host->mmc->max_seg_size) in mmci_dma_setup()
465 host->mmc->max_seg_size = max_seg_size; in mmci_dma_setup()
468 if (host->ops && host->ops->dma_setup) in mmci_dma_setup()
469 host->ops->dma_setup(host); in mmci_dma_setup()
478 if (host->dma_rx_channel) in mmci_dma_release()
479 dma_release_channel(host->dma_rx_channel); in mmci_dma_release()
480 if (host->dma_tx_channel) in mmci_dma_release()
481 dma_release_channel(host->dma_tx_channel); in mmci_dma_release()
482 host->dma_rx_channel = host->dma_tx_channel = NULL; in mmci_dma_release()
487 dev_err(mmc_dev(host->mmc), "error during DMA transfer!\n"); in mmci_dma_data_error()
488 dmaengine_terminate_all(host->dma_current); in mmci_dma_data_error()
489 host->dma_in_progress = false; in mmci_dma_data_error()
490 host->dma_current = NULL; in mmci_dma_data_error()
491 host->dma_desc_current = NULL; in mmci_dma_data_error()
492 host->data->host_cookie = 0; in mmci_dma_data_error()
499 if (data->flags & MMC_DATA_READ) in mmci_dma_unmap()
500 chan = host->dma_rx_channel; in mmci_dma_unmap()
502 chan = host->dma_tx_channel; in mmci_dma_unmap()
504 dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, in mmci_dma_unmap()
515 status = readl(host->base + MMCISTATUS); in mmci_dma_finalize()
522 * Check to see whether we still have some data left in the FIFO - in mmci_dma_finalize()
524 * DMALBREQ and DMALSREQ signals while allowing us to DMA to non- in mmci_dma_finalize()
529 if (!data->error) in mmci_dma_finalize()
530 data->error = -EIO; in mmci_dma_finalize()
533 if (!data->host_cookie) in mmci_dma_finalize()
537 * Use of DMA with scatter-gather is impossible. in mmci_dma_finalize()
541 dev_err(mmc_dev(host->mmc), "buggy DMA detected. Taking evasive action.\n"); in mmci_dma_finalize()
545 host->dma_in_progress = false; in mmci_dma_finalize()
546 host->dma_current = NULL; in mmci_dma_finalize()
547 host->dma_desc_current = NULL; in mmci_dma_finalize()
550 /* prepares DMA channel and DMA descriptor, returns non-zero on failure */
555 struct variant_data *variant = host->variant; in __mmci_dma_prep_data()
557 .src_addr = host->phybase + MMCIFIFO, in __mmci_dma_prep_data()
558 .dst_addr = host->phybase + MMCIFIFO, in __mmci_dma_prep_data()
561 .src_maxburst = variant->fifohalfsize >> 2, /* # of words */ in __mmci_dma_prep_data()
562 .dst_maxburst = variant->fifohalfsize >> 2, /* # of words */ in __mmci_dma_prep_data()
571 if (data->flags & MMC_DATA_READ) { in __mmci_dma_prep_data()
573 chan = host->dma_rx_channel; in __mmci_dma_prep_data()
576 chan = host->dma_tx_channel; in __mmci_dma_prep_data()
581 return -EINVAL; in __mmci_dma_prep_data()
584 if (data->blksz * data->blocks <= variant->fifosize) in __mmci_dma_prep_data()
585 return -EINVAL; in __mmci_dma_prep_data()
587 device = chan->device; in __mmci_dma_prep_data()
588 nr_sg = dma_map_sg(device->dev, data->sg, data->sg_len, in __mmci_dma_prep_data()
591 return -EINVAL; in __mmci_dma_prep_data()
593 if (host->variant->qcom_dml) in __mmci_dma_prep_data()
597 desc = dmaengine_prep_slave_sg(chan, data->sg, nr_sg, in __mmci_dma_prep_data()
608 dma_unmap_sg(device->dev, data->sg, data->sg_len, in __mmci_dma_prep_data()
610 return -ENOMEM; in __mmci_dma_prep_data()
617 if (host->dma_current && host->dma_desc_current) in mmci_dma_prep_data()
621 return __mmci_dma_prep_data(host, data, &host->dma_current, in mmci_dma_prep_data()
622 &host->dma_desc_current); in mmci_dma_prep_data()
628 struct mmci_host_next *nd = &host->next_data; in mmci_dma_prep_next()
629 return __mmci_dma_prep_data(host, data, &nd->dma_chan, &nd->dma_desc); in mmci_dma_prep_next()
635 struct mmc_data *data = host->data; in mmci_dma_start_data()
637 ret = mmci_dma_prep_data(host, host->data); in mmci_dma_start_data()
642 dev_vdbg(mmc_dev(host->mmc), in mmci_dma_start_data()
643 "Submit MMCI DMA job, sglen %d blksz %04x blks %04x flags %08x\n", in mmci_dma_start_data()
644 data->sg_len, data->blksz, data->blocks, data->flags); in mmci_dma_start_data()
645 host->dma_in_progress = true; in mmci_dma_start_data()
646 dmaengine_submit(host->dma_desc_current); in mmci_dma_start_data()
647 dma_async_issue_pending(host->dma_current); in mmci_dma_start_data()
649 if (host->variant->qcom_dml) in mmci_dma_start_data()
658 * Let the MMCI say when the data is ended and it's time in mmci_dma_start_data()
659 * to fire next DMA request. When that happens, MMCI will in mmci_dma_start_data()
662 writel(readl(host->base + MMCIMASK0) | MCI_DATAENDMASK, in mmci_dma_start_data()
663 host->base + MMCIMASK0); in mmci_dma_start_data()
669 struct mmci_host_next *next = &host->next_data; in mmci_get_next_data()
671 WARN_ON(data->host_cookie && data->host_cookie != next->cookie); in mmci_get_next_data()
672 WARN_ON(!data->host_cookie && (next->dma_desc || next->dma_chan)); in mmci_get_next_data()
674 host->dma_desc_current = next->dma_desc; in mmci_get_next_data()
675 host->dma_current = next->dma_chan; in mmci_get_next_data()
676 next->dma_desc = NULL; in mmci_get_next_data()
677 next->dma_chan = NULL; in mmci_get_next_data()
683 struct mmc_data *data = mrq->data; in mmci_pre_request()
684 struct mmci_host_next *nd = &host->next_data; in mmci_pre_request()
689 BUG_ON(data->host_cookie); in mmci_pre_request()
695 data->host_cookie = ++nd->cookie < 0 ? 1 : nd->cookie; in mmci_pre_request()
702 struct mmc_data *data = mrq->data; in mmci_post_request()
704 if (!data || !data->host_cookie) in mmci_post_request()
710 struct mmci_host_next *next = &host->next_data; in mmci_post_request()
712 if (data->flags & MMC_DATA_READ) in mmci_post_request()
713 chan = host->dma_rx_channel; in mmci_post_request()
715 chan = host->dma_tx_channel; in mmci_post_request()
718 if (host->dma_desc_current == next->dma_desc) in mmci_post_request()
719 host->dma_desc_current = NULL; in mmci_post_request()
721 if (host->dma_current == next->dma_chan) { in mmci_post_request()
722 host->dma_in_progress = false; in mmci_post_request()
723 host->dma_current = NULL; in mmci_post_request()
726 next->dma_desc = NULL; in mmci_post_request()
727 next->dma_chan = NULL; in mmci_post_request()
728 data->host_cookie = 0; in mmci_post_request()
760 return -ENOSYS; in mmci_dma_start_data()
770 struct variant_data *variant = host->variant; in mmci_start_data()
776 dev_dbg(mmc_dev(host->mmc), "blksz %04x blks %04x flags %08x\n", in mmci_start_data()
777 data->blksz, data->blocks, data->flags); in mmci_start_data()
779 host->data = data; in mmci_start_data()
780 host->size = data->blksz * data->blocks; in mmci_start_data()
781 data->bytes_xfered = 0; in mmci_start_data()
783 clks = (unsigned long long)data->timeout_ns * host->cclk; in mmci_start_data()
786 timeout = data->timeout_clks + (unsigned int)clks; in mmci_start_data()
788 base = host->base; in mmci_start_data()
790 writel(host->size, base + MMCIDATALENGTH); in mmci_start_data()
792 blksz_bits = ffs(data->blksz) - 1; in mmci_start_data()
793 BUG_ON(1 << blksz_bits != data->blksz); in mmci_start_data()
795 if (variant->blksz_datactrl16) in mmci_start_data()
796 datactrl = MCI_DPSM_ENABLE | (data->blksz << 16); in mmci_start_data()
797 else if (variant->blksz_datactrl4) in mmci_start_data()
798 datactrl = MCI_DPSM_ENABLE | (data->blksz << 4); in mmci_start_data()
802 if (data->flags & MMC_DATA_READ) in mmci_start_data()
805 if (host->mmc->card && mmc_card_sdio(host->mmc->card)) { in mmci_start_data()
808 datactrl |= variant->datactrl_mask_sdio; in mmci_start_data()
816 if (variant->st_sdio && data->flags & MMC_DATA_WRITE && in mmci_start_data()
817 (host->size < 8 || in mmci_start_data()
818 (host->size <= 8 && host->mclk > 50000000))) in mmci_start_data()
819 clk = host->clk_reg & ~variant->clkreg_enable; in mmci_start_data()
821 clk = host->clk_reg | variant->clkreg_enable; in mmci_start_data()
826 if (host->mmc->ios.timing == MMC_TIMING_UHS_DDR50 || in mmci_start_data()
827 host->mmc->ios.timing == MMC_TIMING_MMC_DDR52) in mmci_start_data()
828 datactrl |= variant->datactrl_mask_ddrmode; in mmci_start_data()
840 if (data->flags & MMC_DATA_READ) { in mmci_start_data()
844 * If we have less than the fifo 'half-full' threshold to in mmci_start_data()
848 if (host->size < variant->fifohalfsize) in mmci_start_data()
866 void __iomem *base = host->base; in mmci_start_command()
868 dev_dbg(mmc_dev(host->mmc), "op %02x arg %08x flags %08x\n", in mmci_start_command()
869 cmd->opcode, cmd->arg, cmd->flags); in mmci_start_command()
876 c |= cmd->opcode | MCI_CPSM_ENABLE; in mmci_start_command()
877 if (cmd->flags & MMC_RSP_PRESENT) { in mmci_start_command()
878 if (cmd->flags & MMC_RSP_136) in mmci_start_command()
886 c |= host->variant->data_cmd_enable; in mmci_start_command()
888 host->cmd = cmd; in mmci_start_command()
890 writel(cmd->arg, base + MMCIARGUMENT); in mmci_start_command()
905 status_err = status & (host->variant->start_err | in mmci_data_irq()
922 * can be as much as a FIFO-worth of data ahead. This in mmci_data_irq()
925 remain = readl(host->base + MMCIDATACNT); in mmci_data_irq()
926 success = data->blksz * data->blocks - remain; in mmci_data_irq()
928 dev_dbg(mmc_dev(host->mmc), "MCI ERROR IRQ, status 0x%08x at 0x%08x\n", in mmci_data_irq()
932 success -= 1; in mmci_data_irq()
933 data->error = -EILSEQ; in mmci_data_irq()
935 data->error = -ETIMEDOUT; in mmci_data_irq()
937 data->error = -ECOMM; in mmci_data_irq()
939 data->error = -EIO; in mmci_data_irq()
941 if (success > host->variant->fifosize) in mmci_data_irq()
942 success -= host->variant->fifosize; in mmci_data_irq()
945 data->error = -EIO; in mmci_data_irq()
947 data->bytes_xfered = round_down(success, data->blksz); in mmci_data_irq()
951 dev_err(mmc_dev(host->mmc), "stray MCI_DATABLOCKEND interrupt\n"); in mmci_data_irq()
953 if (status & MCI_DATAEND || data->error) { in mmci_data_irq()
958 if (!data->error) in mmci_data_irq()
960 data->bytes_xfered = data->blksz * data->blocks; in mmci_data_irq()
962 if (!data->stop || host->mrq->sbc) { in mmci_data_irq()
963 mmci_request_end(host, data->mrq); in mmci_data_irq()
965 mmci_start_command(host, data->stop, 0); in mmci_data_irq()
974 void __iomem *base = host->base; in mmci_cmd_irq()
980 sbc = (cmd == host->mrq->sbc); in mmci_cmd_irq()
987 if (!((status|host->busy_status) & in mmci_cmd_irq()
994 if (host->variant->busy_detect) { in mmci_cmd_irq()
995 bool busy_resp = !!(cmd->flags & MMC_RSP_BUSY); in mmci_cmd_irq()
998 if (host->busy_status && in mmci_cmd_irq()
999 (status & host->variant->busy_detect_flag)) in mmci_cmd_irq()
1004 * something that was not an error, and we double-check in mmci_cmd_irq()
1008 if (!host->busy_status && busy_resp && in mmci_cmd_irq()
1010 (readl(base + MMCISTATUS) & host->variant->busy_detect_flag)) { in mmci_cmd_irq()
1013 writel(host->variant->busy_detect_mask, in mmci_cmd_irq()
1014 host->base + MMCICLEAR); in mmci_cmd_irq()
1018 host->variant->busy_detect_mask, in mmci_cmd_irq()
1024 host->busy_status = in mmci_cmd_irq()
1034 if (host->busy_status) { in mmci_cmd_irq()
1036 writel(host->variant->busy_detect_mask, in mmci_cmd_irq()
1037 host->base + MMCICLEAR); in mmci_cmd_irq()
1040 ~host->variant->busy_detect_mask, in mmci_cmd_irq()
1042 host->busy_status = 0; in mmci_cmd_irq()
1046 host->cmd = NULL; in mmci_cmd_irq()
1049 cmd->error = -ETIMEDOUT; in mmci_cmd_irq()
1050 } else if (status & MCI_CMDCRCFAIL && cmd->flags & MMC_RSP_CRC) { in mmci_cmd_irq()
1051 cmd->error = -EILSEQ; in mmci_cmd_irq()
1053 cmd->resp[0] = readl(base + MMCIRESPONSE0); in mmci_cmd_irq()
1054 cmd->resp[1] = readl(base + MMCIRESPONSE1); in mmci_cmd_irq()
1055 cmd->resp[2] = readl(base + MMCIRESPONSE2); in mmci_cmd_irq()
1056 cmd->resp[3] = readl(base + MMCIRESPONSE3); in mmci_cmd_irq()
1059 if ((!sbc && !cmd->data) || cmd->error) { in mmci_cmd_irq()
1060 if (host->data) { in mmci_cmd_irq()
1064 mmci_dma_unmap(host, host->data); in mmci_cmd_irq()
1068 mmci_request_end(host, host->mrq); in mmci_cmd_irq()
1070 mmci_start_command(host, host->mrq->cmd, 0); in mmci_cmd_irq()
1071 } else if (!(cmd->data->flags & MMC_DATA_READ)) { in mmci_cmd_irq()
1072 mmci_start_data(host, cmd->data); in mmci_cmd_irq()
1078 return remain - (readl(host->base + MMCIFIFOCNT) << 2); in mmci_get_rx_fifocnt()
1088 return host->variant->fifohalfsize; in mmci_qcom_get_rx_fifocnt()
1097 void __iomem *base = host->base; in mmci_pio_read()
1099 u32 status = readl(host->base + MMCISTATUS); in mmci_pio_read()
1100 int host_remain = host->size; in mmci_pio_read()
1103 int count = host->get_rx_fifocnt(host, status, host_remain); in mmci_pio_read()
1115 * while only doing full 32-bit reads towards the FIFO. in mmci_pio_read()
1131 remain -= count; in mmci_pio_read()
1132 host_remain -= count; in mmci_pio_read()
1140 return ptr - buffer; in mmci_pio_read()
1145 struct variant_data *variant = host->variant; in mmci_pio_write()
1146 void __iomem *base = host->base; in mmci_pio_write()
1153 variant->fifosize : variant->fifohalfsize; in mmci_pio_write()
1159 * etc), and the FIFO only accept full 32-bit writes. in mmci_pio_write()
1167 remain -= count; in mmci_pio_write()
1175 return ptr - buffer; in mmci_pio_write()
1184 struct sg_mapping_iter *sg_miter = &host->sg_miter; in mmci_pio_irq()
1185 struct variant_data *variant = host->variant; in mmci_pio_irq()
1186 void __iomem *base = host->base; in mmci_pio_irq()
1191 dev_dbg(mmc_dev(host->mmc), "irq1 (pio) %08x\n", status); in mmci_pio_irq()
1198 * For write, we only need to test the half-empty flag in mmci_pio_irq()
1199 * here - if the FIFO is completely empty, then by in mmci_pio_irq()
1210 buffer = sg_miter->addr; in mmci_pio_irq()
1211 remain = sg_miter->length; in mmci_pio_irq()
1219 sg_miter->consumed = len; in mmci_pio_irq()
1221 host->size -= len; in mmci_pio_irq()
1222 remain -= len; in mmci_pio_irq()
1233 * If we have less than the fifo 'half-full' threshold to transfer, in mmci_pio_irq()
1236 if (status & MCI_RXACTIVE && host->size < variant->fifohalfsize) in mmci_pio_irq()
1245 if (host->size == 0) { in mmci_pio_irq()
1262 spin_lock(&host->lock); in mmci_irq()
1265 status = readl(host->base + MMCISTATUS); in mmci_irq()
1267 if (host->singleirq) { in mmci_irq()
1268 if (status & host->mask1_reg) in mmci_irq()
1284 status &= readl(host->base + MMCIMASK0); in mmci_irq()
1285 if (host->variant->busy_detect) in mmci_irq()
1286 writel(status & ~host->variant->busy_detect_mask, in mmci_irq()
1287 host->base + MMCICLEAR); in mmci_irq()
1289 writel(status, host->base + MMCICLEAR); in mmci_irq()
1291 dev_dbg(mmc_dev(host->mmc), "irq0 (data+cmd) %08x\n", status); in mmci_irq()
1293 if (host->variant->reversed_irq_handling) { in mmci_irq()
1294 mmci_data_irq(host, host->data, status); in mmci_irq()
1295 mmci_cmd_irq(host, host->cmd, status); in mmci_irq()
1297 mmci_cmd_irq(host, host->cmd, status); in mmci_irq()
1298 mmci_data_irq(host, host->data, status); in mmci_irq()
1305 if (host->variant->busy_detect_flag) in mmci_irq()
1306 status &= ~host->variant->busy_detect_flag; in mmci_irq()
1311 spin_unlock(&host->lock); in mmci_irq()
1321 WARN_ON(host->mrq != NULL); in mmci_request()
1323 mrq->cmd->error = mmci_validate_data(host, mrq->data); in mmci_request()
1324 if (mrq->cmd->error) { in mmci_request()
1329 spin_lock_irqsave(&host->lock, flags); in mmci_request()
1331 host->mrq = mrq; in mmci_request()
1333 if (mrq->data) in mmci_request()
1334 mmci_get_next_data(host, mrq->data); in mmci_request()
1336 if (mrq->data && mrq->data->flags & MMC_DATA_READ) in mmci_request()
1337 mmci_start_data(host, mrq->data); in mmci_request()
1339 if (mrq->sbc) in mmci_request()
1340 mmci_start_command(host, mrq->sbc, 0); in mmci_request()
1342 mmci_start_command(host, mrq->cmd, 0); in mmci_request()
1344 spin_unlock_irqrestore(&host->lock, flags); in mmci_request()
1350 struct variant_data *variant = host->variant; in mmci_set_ios()
1355 if (host->plat->ios_handler && in mmci_set_ios()
1356 host->plat->ios_handler(mmc_dev(mmc), ios)) in mmci_set_ios()
1359 switch (ios->power_mode) { in mmci_set_ios()
1361 if (!IS_ERR(mmc->supply.vmmc)) in mmci_set_ios()
1362 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0); in mmci_set_ios()
1364 if (!IS_ERR(mmc->supply.vqmmc) && host->vqmmc_enabled) { in mmci_set_ios()
1365 regulator_disable(mmc->supply.vqmmc); in mmci_set_ios()
1366 host->vqmmc_enabled = false; in mmci_set_ios()
1371 if (!IS_ERR(mmc->supply.vmmc)) in mmci_set_ios()
1372 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd); in mmci_set_ios()
1379 pwr |= variant->pwrreg_powerup; in mmci_set_ios()
1383 if (!IS_ERR(mmc->supply.vqmmc) && !host->vqmmc_enabled) { in mmci_set_ios()
1384 ret = regulator_enable(mmc->supply.vqmmc); in mmci_set_ios()
1389 host->vqmmc_enabled = true; in mmci_set_ios()
1396 if (variant->signal_direction && ios->power_mode != MMC_POWER_OFF) { in mmci_set_ios()
1400 * the SD/MMC bus and feedback-clock usage. in mmci_set_ios()
1402 pwr |= host->pwr_reg_add; in mmci_set_ios()
1404 if (ios->bus_width == MMC_BUS_WIDTH_4) in mmci_set_ios()
1406 else if (ios->bus_width == MMC_BUS_WIDTH_1) in mmci_set_ios()
1412 if (variant->opendrain) { in mmci_set_ios()
1413 if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) in mmci_set_ios()
1414 pwr |= variant->opendrain; in mmci_set_ios()
1420 if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) in mmci_set_ios()
1421 pinctrl_select_state(host->pinctrl, host->pins_opendrain); in mmci_set_ios()
1423 pinctrl_select_state(host->pinctrl, host->pins_default); in mmci_set_ios()
1430 if (!ios->clock && variant->pwrreg_clkgate) in mmci_set_ios()
1433 if (host->variant->explicit_mclk_control && in mmci_set_ios()
1434 ios->clock != host->clock_cache) { in mmci_set_ios()
1435 ret = clk_set_rate(host->clk, ios->clock); in mmci_set_ios()
1437 dev_err(mmc_dev(host->mmc), in mmci_set_ios()
1440 host->mclk = clk_get_rate(host->clk); in mmci_set_ios()
1442 host->clock_cache = ios->clock; in mmci_set_ios()
1444 spin_lock_irqsave(&host->lock, flags); in mmci_set_ios()
1446 mmci_set_clkreg(host, ios->clock); in mmci_set_ios()
1450 spin_unlock_irqrestore(&host->lock, flags); in mmci_set_ios()
1456 struct mmci_platform_data *plat = host->plat; in mmci_get_cd()
1459 if (status == -ENOSYS) { in mmci_get_cd()
1460 if (!plat->status) in mmci_get_cd()
1463 status = plat->status(mmc_dev(host->mmc)); in mmci_get_cd()
1472 if (!IS_ERR(mmc->supply.vqmmc)) { in mmci_sig_volt_switch()
1474 switch (ios->signal_voltage) { in mmci_sig_volt_switch()
1476 ret = regulator_set_voltage(mmc->supply.vqmmc, in mmci_sig_volt_switch()
1480 ret = regulator_set_voltage(mmc->supply.vqmmc, in mmci_sig_volt_switch()
1484 ret = regulator_set_voltage(mmc->supply.vqmmc, in mmci_sig_volt_switch()
1514 if (of_get_property(np, "st,sig-dir-dat0", NULL)) in mmci_of_parse()
1515 host->pwr_reg_add |= MCI_ST_DATA0DIREN; in mmci_of_parse()
1516 if (of_get_property(np, "st,sig-dir-dat2", NULL)) in mmci_of_parse()
1517 host->pwr_reg_add |= MCI_ST_DATA2DIREN; in mmci_of_parse()
1518 if (of_get_property(np, "st,sig-dir-dat31", NULL)) in mmci_of_parse()
1519 host->pwr_reg_add |= MCI_ST_DATA31DIREN; in mmci_of_parse()
1520 if (of_get_property(np, "st,sig-dir-dat74", NULL)) in mmci_of_parse()
1521 host->pwr_reg_add |= MCI_ST_DATA74DIREN; in mmci_of_parse()
1522 if (of_get_property(np, "st,sig-dir-cmd", NULL)) in mmci_of_parse()
1523 host->pwr_reg_add |= MCI_ST_CMDDIREN; in mmci_of_parse()
1524 if (of_get_property(np, "st,sig-pin-fbclk", NULL)) in mmci_of_parse()
1525 host->pwr_reg_add |= MCI_ST_FBCLKEN; in mmci_of_parse()
1527 if (of_get_property(np, "mmc-cap-mmc-highspeed", NULL)) in mmci_of_parse()
1528 mmc->caps |= MMC_CAP_MMC_HIGHSPEED; in mmci_of_parse()
1529 if (of_get_property(np, "mmc-cap-sd-highspeed", NULL)) in mmci_of_parse()
1530 mmc->caps |= MMC_CAP_SD_HIGHSPEED; in mmci_of_parse()
1538 struct mmci_platform_data *plat = dev->dev.platform_data; in mmci_probe()
1539 struct device_node *np = dev->dev.of_node; in mmci_probe()
1540 struct variant_data *variant = id->data; in mmci_probe()
1547 dev_err(&dev->dev, "No plat data or DT found\n"); in mmci_probe()
1548 return -EINVAL; in mmci_probe()
1552 plat = devm_kzalloc(&dev->dev, sizeof(*plat), GFP_KERNEL); in mmci_probe()
1554 return -ENOMEM; in mmci_probe()
1557 mmc = mmc_alloc_host(sizeof(struct mmci_host), &dev->dev); in mmci_probe()
1559 return -ENOMEM; in mmci_probe()
1566 host->mmc = mmc; in mmci_probe()
1572 if (!variant->opendrain) { in mmci_probe()
1573 host->pinctrl = devm_pinctrl_get(&dev->dev); in mmci_probe()
1574 if (IS_ERR(host->pinctrl)) { in mmci_probe()
1575 dev_err(&dev->dev, "failed to get pinctrl"); in mmci_probe()
1576 ret = PTR_ERR(host->pinctrl); in mmci_probe()
1580 host->pins_default = pinctrl_lookup_state(host->pinctrl, in mmci_probe()
1582 if (IS_ERR(host->pins_default)) { in mmci_probe()
1584 ret = PTR_ERR(host->pins_default); in mmci_probe()
1588 host->pins_opendrain = pinctrl_lookup_state(host->pinctrl, in mmci_probe()
1590 if (IS_ERR(host->pins_opendrain)) { in mmci_probe()
1592 ret = PTR_ERR(host->pins_opendrain); in mmci_probe()
1597 host->hw_designer = amba_manf(dev); in mmci_probe()
1598 host->hw_revision = amba_rev(dev); in mmci_probe()
1599 dev_dbg(mmc_dev(mmc), "designer ID = 0x%02x\n", host->hw_designer); in mmci_probe()
1600 dev_dbg(mmc_dev(mmc), "revision = 0x%01x\n", host->hw_revision); in mmci_probe()
1602 host->clk = devm_clk_get(&dev->dev, NULL); in mmci_probe()
1603 if (IS_ERR(host->clk)) { in mmci_probe()
1604 ret = PTR_ERR(host->clk); in mmci_probe()
1608 ret = clk_prepare_enable(host->clk); in mmci_probe()
1612 if (variant->qcom_fifo) in mmci_probe()
1613 host->get_rx_fifocnt = mmci_qcom_get_rx_fifocnt; in mmci_probe()
1615 host->get_rx_fifocnt = mmci_get_rx_fifocnt; in mmci_probe()
1617 host->plat = plat; in mmci_probe()
1618 host->variant = variant; in mmci_probe()
1619 host->mclk = clk_get_rate(host->clk); in mmci_probe()
1625 if (host->mclk > variant->f_max) { in mmci_probe()
1626 ret = clk_set_rate(host->clk, variant->f_max); in mmci_probe()
1629 host->mclk = clk_get_rate(host->clk); in mmci_probe()
1631 host->mclk); in mmci_probe()
1634 host->phybase = dev->res.start; in mmci_probe()
1635 host->base = devm_ioremap_resource(&dev->dev, &dev->res); in mmci_probe()
1636 if (IS_ERR(host->base)) { in mmci_probe()
1637 ret = PTR_ERR(host->base); in mmci_probe()
1641 if (variant->init) in mmci_probe()
1642 variant->init(host); in mmci_probe()
1650 if (variant->st_clkdiv) in mmci_probe()
1651 mmc->f_min = DIV_ROUND_UP(host->mclk, 257); in mmci_probe()
1652 else if (variant->explicit_mclk_control) in mmci_probe()
1653 mmc->f_min = clk_round_rate(host->clk, 100000); in mmci_probe()
1655 mmc->f_min = DIV_ROUND_UP(host->mclk, 512); in mmci_probe()
1662 if (mmc->f_max) in mmci_probe()
1663 mmc->f_max = variant->explicit_mclk_control ? in mmci_probe()
1664 min(variant->f_max, mmc->f_max) : in mmci_probe()
1665 min(host->mclk, mmc->f_max); in mmci_probe()
1667 mmc->f_max = variant->explicit_mclk_control ? in mmci_probe()
1668 fmax : min(host->mclk, fmax); in mmci_probe()
1671 dev_dbg(mmc_dev(mmc), "clocking block at %u Hz\n", mmc->f_max); in mmci_probe()
1678 if (!mmc->ocr_avail) in mmci_probe()
1679 mmc->ocr_avail = plat->ocr_mask; in mmci_probe()
1680 else if (plat->ocr_mask) in mmci_probe()
1685 if (!plat->cd_invert) in mmci_probe()
1686 mmc->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH; in mmci_probe()
1687 mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH; in mmci_probe()
1691 mmc->caps |= MMC_CAP_CMD23; in mmci_probe()
1696 if (variant->busy_detect) { in mmci_probe()
1702 if (variant->busy_dpsm_flag) in mmci_probe()
1704 host->variant->busy_dpsm_flag); in mmci_probe()
1705 mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY; in mmci_probe()
1706 mmc->max_busy_timeout = 0; in mmci_probe()
1709 mmc->ops = &mmci_ops; in mmci_probe()
1712 mmc->pm_caps |= MMC_PM_KEEP_POWER; in mmci_probe()
1717 mmc->max_segs = NR_SG; in mmci_probe()
1721 * register, we must ensure that we don't exceed 2^num-1 bytes in a in mmci_probe()
1724 mmc->max_req_size = (1 << variant->datalength_bits) - 1; in mmci_probe()
1730 mmc->max_seg_size = mmc->max_req_size; in mmci_probe()
1735 mmc->max_blk_size = 1 << 11; in mmci_probe()
1741 mmc->max_blk_count = mmc->max_req_size >> 11; in mmci_probe()
1743 spin_lock_init(&host->lock); in mmci_probe()
1745 writel(0, host->base + MMCIMASK0); in mmci_probe()
1747 if (variant->mmcimask1) in mmci_probe()
1748 writel(0, host->base + MMCIMASK1); in mmci_probe()
1750 writel(0xfff, host->base + MMCICLEAR); in mmci_probe()
1754 * - not using DT but using a descriptor table, or in mmci_probe()
1755 * - using a table of descriptors ALONGSIDE DT, or in mmci_probe()
1762 if (ret == -EPROBE_DEFER) in mmci_probe()
1764 else if (gpio_is_valid(plat->gpio_cd)) { in mmci_probe()
1765 ret = mmc_gpio_request_cd(mmc, plat->gpio_cd, 0); in mmci_probe()
1773 if (ret == -EPROBE_DEFER) in mmci_probe()
1775 else if (gpio_is_valid(plat->gpio_wp)) { in mmci_probe()
1776 ret = mmc_gpio_request_ro(mmc, plat->gpio_wp); in mmci_probe()
1783 ret = devm_request_irq(&dev->dev, dev->irq[0], mmci_irq, IRQF_SHARED, in mmci_probe()
1788 if (!dev->irq[1]) in mmci_probe()
1789 host->singleirq = true; in mmci_probe()
1791 ret = devm_request_irq(&dev->dev, dev->irq[1], mmci_pio_irq, in mmci_probe()
1797 writel(MCI_IRQENABLE | variant->start_err, host->base + MMCIMASK0); in mmci_probe()
1801 dev_info(&dev->dev, "%s: PL%03x manf %x rev%u at 0x%08llx irq %d,%d (pio)\n", in mmci_probe()
1803 amba_rev(dev), (unsigned long long)dev->res.start, in mmci_probe()
1804 dev->irq[0], dev->irq[1]); in mmci_probe()
1808 pm_runtime_set_autosuspend_delay(&dev->dev, 50); in mmci_probe()
1809 pm_runtime_use_autosuspend(&dev->dev); in mmci_probe()
1813 pm_runtime_put(&dev->dev); in mmci_probe()
1817 clk_disable_unprepare(host->clk); in mmci_probe()
1829 struct variant_data *variant = host->variant; in mmci_remove()
1835 pm_runtime_get_sync(&dev->dev); in mmci_remove()
1839 writel(0, host->base + MMCIMASK0); in mmci_remove()
1841 if (variant->mmcimask1) in mmci_remove()
1842 writel(0, host->base + MMCIMASK1); in mmci_remove()
1844 writel(0, host->base + MMCICOMMAND); in mmci_remove()
1845 writel(0, host->base + MMCIDATACTRL); in mmci_remove()
1848 clk_disable_unprepare(host->clk); in mmci_remove()
1860 spin_lock_irqsave(&host->lock, flags); in mmci_save()
1862 writel(0, host->base + MMCIMASK0); in mmci_save()
1863 if (host->variant->pwrreg_nopower) { in mmci_save()
1864 writel(0, host->base + MMCIDATACTRL); in mmci_save()
1865 writel(0, host->base + MMCIPOWER); in mmci_save()
1866 writel(0, host->base + MMCICLOCK); in mmci_save()
1870 spin_unlock_irqrestore(&host->lock, flags); in mmci_save()
1877 spin_lock_irqsave(&host->lock, flags); in mmci_restore()
1879 if (host->variant->pwrreg_nopower) { in mmci_restore()
1880 writel(host->clk_reg, host->base + MMCICLOCK); in mmci_restore()
1881 writel(host->datactrl_reg, host->base + MMCIDATACTRL); in mmci_restore()
1882 writel(host->pwr_reg, host->base + MMCIPOWER); in mmci_restore()
1884 writel(MCI_IRQENABLE | host->variant->start_err, in mmci_restore()
1885 host->base + MMCIMASK0); in mmci_restore()
1888 spin_unlock_irqrestore(&host->lock, flags); in mmci_restore()
1900 clk_disable_unprepare(host->clk); in mmci_runtime_suspend()
1913 clk_prepare_enable(host->clk); in mmci_runtime_resume()