• Home
  • Raw
  • Download

Lines Matching refs:host

269 #define sh_mmcif_host_to_dev(host) (&host->pd->dev)  argument
271 static inline void sh_mmcif_bitset(struct sh_mmcif_host *host, in sh_mmcif_bitset() argument
274 writel(val | readl(host->addr + reg), host->addr + reg); in sh_mmcif_bitset()
277 static inline void sh_mmcif_bitclr(struct sh_mmcif_host *host, in sh_mmcif_bitclr() argument
280 writel(~val & readl(host->addr + reg), host->addr + reg); in sh_mmcif_bitclr()
285 struct sh_mmcif_host *host = arg; in sh_mmcif_dma_complete() local
286 struct mmc_request *mrq = host->mrq; in sh_mmcif_dma_complete()
287 struct device *dev = sh_mmcif_host_to_dev(host); in sh_mmcif_dma_complete()
295 complete(&host->dma_complete); in sh_mmcif_dma_complete()
298 static void sh_mmcif_start_dma_rx(struct sh_mmcif_host *host) in sh_mmcif_start_dma_rx() argument
300 struct mmc_data *data = host->mrq->data; in sh_mmcif_start_dma_rx()
303 struct dma_chan *chan = host->chan_rx; in sh_mmcif_start_dma_rx()
304 struct device *dev = sh_mmcif_host_to_dev(host); in sh_mmcif_start_dma_rx()
311 host->dma_active = true; in sh_mmcif_start_dma_rx()
318 desc->callback_param = host; in sh_mmcif_start_dma_rx()
320 sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN); in sh_mmcif_start_dma_rx()
330 host->chan_rx = NULL; in sh_mmcif_start_dma_rx()
331 host->dma_active = false; in sh_mmcif_start_dma_rx()
334 chan = host->chan_tx; in sh_mmcif_start_dma_rx()
336 host->chan_tx = NULL; in sh_mmcif_start_dma_rx()
341 sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN | BUF_ACC_DMAWEN); in sh_mmcif_start_dma_rx()
348 static void sh_mmcif_start_dma_tx(struct sh_mmcif_host *host) in sh_mmcif_start_dma_tx() argument
350 struct mmc_data *data = host->mrq->data; in sh_mmcif_start_dma_tx()
353 struct dma_chan *chan = host->chan_tx; in sh_mmcif_start_dma_tx()
354 struct device *dev = sh_mmcif_host_to_dev(host); in sh_mmcif_start_dma_tx()
361 host->dma_active = true; in sh_mmcif_start_dma_tx()
368 desc->callback_param = host; in sh_mmcif_start_dma_tx()
370 sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAWEN); in sh_mmcif_start_dma_tx()
380 host->chan_tx = NULL; in sh_mmcif_start_dma_tx()
381 host->dma_active = false; in sh_mmcif_start_dma_tx()
384 chan = host->chan_rx; in sh_mmcif_start_dma_tx()
386 host->chan_rx = NULL; in sh_mmcif_start_dma_tx()
391 sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN | BUF_ACC_DMAWEN); in sh_mmcif_start_dma_tx()
399 sh_mmcif_request_dma_pdata(struct sh_mmcif_host *host, uintptr_t slave_id) in sh_mmcif_request_dma_pdata() argument
411 static int sh_mmcif_dma_slave_config(struct sh_mmcif_host *host, in sh_mmcif_dma_slave_config() argument
418 res = platform_get_resource(host->pd, IORESOURCE_MEM, 0); in sh_mmcif_dma_slave_config()
432 static void sh_mmcif_request_dma(struct sh_mmcif_host *host) in sh_mmcif_request_dma() argument
434 struct device *dev = sh_mmcif_host_to_dev(host); in sh_mmcif_request_dma()
435 host->dma_active = false; in sh_mmcif_request_dma()
441 host->chan_tx = sh_mmcif_request_dma_pdata(host, in sh_mmcif_request_dma()
443 host->chan_rx = sh_mmcif_request_dma_pdata(host, in sh_mmcif_request_dma()
446 host->chan_tx = dma_request_slave_channel(dev, "tx"); in sh_mmcif_request_dma()
447 host->chan_rx = dma_request_slave_channel(dev, "rx"); in sh_mmcif_request_dma()
449 dev_dbg(dev, "%s: got channel TX %p RX %p\n", __func__, host->chan_tx, in sh_mmcif_request_dma()
450 host->chan_rx); in sh_mmcif_request_dma()
452 if (!host->chan_tx || !host->chan_rx || in sh_mmcif_request_dma()
453 sh_mmcif_dma_slave_config(host, host->chan_tx, DMA_MEM_TO_DEV) || in sh_mmcif_request_dma()
454 sh_mmcif_dma_slave_config(host, host->chan_rx, DMA_DEV_TO_MEM)) in sh_mmcif_request_dma()
460 if (host->chan_tx) in sh_mmcif_request_dma()
461 dma_release_channel(host->chan_tx); in sh_mmcif_request_dma()
462 if (host->chan_rx) in sh_mmcif_request_dma()
463 dma_release_channel(host->chan_rx); in sh_mmcif_request_dma()
464 host->chan_tx = host->chan_rx = NULL; in sh_mmcif_request_dma()
467 static void sh_mmcif_release_dma(struct sh_mmcif_host *host) in sh_mmcif_release_dma() argument
469 sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN | BUF_ACC_DMAWEN); in sh_mmcif_release_dma()
471 if (host->chan_tx) { in sh_mmcif_release_dma()
472 struct dma_chan *chan = host->chan_tx; in sh_mmcif_release_dma()
473 host->chan_tx = NULL; in sh_mmcif_release_dma()
476 if (host->chan_rx) { in sh_mmcif_release_dma()
477 struct dma_chan *chan = host->chan_rx; in sh_mmcif_release_dma()
478 host->chan_rx = NULL; in sh_mmcif_release_dma()
482 host->dma_active = false; in sh_mmcif_release_dma()
485 static void sh_mmcif_clock_control(struct sh_mmcif_host *host, unsigned int clk) in sh_mmcif_clock_control() argument
487 struct device *dev = sh_mmcif_host_to_dev(host); in sh_mmcif_clock_control()
490 unsigned int current_clk = clk_get_rate(host->clk); in sh_mmcif_clock_control()
493 sh_mmcif_bitclr(host, MMCIF_CE_CLK_CTRL, CLK_ENABLE); in sh_mmcif_clock_control()
494 sh_mmcif_bitclr(host, MMCIF_CE_CLK_CTRL, CLK_CLEAR); in sh_mmcif_clock_control()
499 if (host->clkdiv_map) { in sh_mmcif_clock_control()
507 if (!((1 << i) & host->clkdiv_map)) in sh_mmcif_clock_control()
516 freq = clk_round_rate(host->clk, clk * div); in sh_mmcif_clock_control()
531 clk_set_rate(host->clk, best_freq); in sh_mmcif_clock_control()
539 sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_CLEAR & clkdiv); in sh_mmcif_clock_control()
540 sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_ENABLE); in sh_mmcif_clock_control()
543 static void sh_mmcif_sync_reset(struct sh_mmcif_host *host) in sh_mmcif_sync_reset() argument
547 tmp = 0x010f0000 & sh_mmcif_readl(host->addr, MMCIF_CE_CLK_CTRL); in sh_mmcif_sync_reset()
549 sh_mmcif_writel(host->addr, MMCIF_CE_VERSION, SOFT_RST_ON); in sh_mmcif_sync_reset()
550 sh_mmcif_writel(host->addr, MMCIF_CE_VERSION, SOFT_RST_OFF); in sh_mmcif_sync_reset()
551 if (host->ccs_enable) in sh_mmcif_sync_reset()
553 if (host->clk_ctrl2_enable) in sh_mmcif_sync_reset()
554 sh_mmcif_writel(host->addr, MMCIF_CE_CLK_CTRL2, 0x0F0F0000); in sh_mmcif_sync_reset()
555 sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, tmp | in sh_mmcif_sync_reset()
558 sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_ATYP); in sh_mmcif_sync_reset()
561 static int sh_mmcif_error_manage(struct sh_mmcif_host *host) in sh_mmcif_error_manage() argument
563 struct device *dev = sh_mmcif_host_to_dev(host); in sh_mmcif_error_manage()
567 host->sd_error = false; in sh_mmcif_error_manage()
569 state1 = sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS1); in sh_mmcif_error_manage()
570 state2 = sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS2); in sh_mmcif_error_manage()
575 sh_mmcif_bitset(host, MMCIF_CE_CMD_CTRL, CMD_CTRL_BREAK); in sh_mmcif_error_manage()
576 sh_mmcif_bitset(host, MMCIF_CE_CMD_CTRL, ~CMD_CTRL_BREAK); in sh_mmcif_error_manage()
578 if (!(sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS1) in sh_mmcif_error_manage()
588 sh_mmcif_sync_reset(host); in sh_mmcif_error_manage()
595 host->state, host->wait_for); in sh_mmcif_error_manage()
599 host->state, host->wait_for); in sh_mmcif_error_manage()
603 host->state, host->wait_for); in sh_mmcif_error_manage()
609 static bool sh_mmcif_next_block(struct sh_mmcif_host *host, u32 *p) in sh_mmcif_next_block() argument
611 struct mmc_data *data = host->mrq->data; in sh_mmcif_next_block()
613 host->sg_blkidx += host->blocksize; in sh_mmcif_next_block()
616 BUG_ON(host->sg_blkidx > data->sg->length); in sh_mmcif_next_block()
618 if (host->sg_blkidx == data->sg->length) { in sh_mmcif_next_block()
619 host->sg_blkidx = 0; in sh_mmcif_next_block()
620 if (++host->sg_idx < data->sg_len) in sh_mmcif_next_block()
621 host->pio_ptr = sg_virt(++data->sg); in sh_mmcif_next_block()
623 host->pio_ptr = p; in sh_mmcif_next_block()
626 return host->sg_idx != data->sg_len; in sh_mmcif_next_block()
629 static void sh_mmcif_single_read(struct sh_mmcif_host *host, in sh_mmcif_single_read() argument
632 host->blocksize = (sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) & in sh_mmcif_single_read()
635 host->wait_for = MMCIF_WAIT_FOR_READ; in sh_mmcif_single_read()
638 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN); in sh_mmcif_single_read()
641 static bool sh_mmcif_read_block(struct sh_mmcif_host *host) in sh_mmcif_read_block() argument
643 struct device *dev = sh_mmcif_host_to_dev(host); in sh_mmcif_read_block()
644 struct mmc_data *data = host->mrq->data; in sh_mmcif_read_block()
648 if (host->sd_error) { in sh_mmcif_read_block()
649 data->error = sh_mmcif_error_manage(host); in sh_mmcif_read_block()
654 for (i = 0; i < host->blocksize / 4; i++) in sh_mmcif_read_block()
655 *p++ = sh_mmcif_readl(host->addr, MMCIF_CE_DATA); in sh_mmcif_read_block()
658 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFRE); in sh_mmcif_read_block()
659 host->wait_for = MMCIF_WAIT_FOR_READ_END; in sh_mmcif_read_block()
664 static void sh_mmcif_multi_read(struct sh_mmcif_host *host, in sh_mmcif_multi_read() argument
672 host->blocksize = sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) & in sh_mmcif_multi_read()
675 host->wait_for = MMCIF_WAIT_FOR_MREAD; in sh_mmcif_multi_read()
676 host->sg_idx = 0; in sh_mmcif_multi_read()
677 host->sg_blkidx = 0; in sh_mmcif_multi_read()
678 host->pio_ptr = sg_virt(data->sg); in sh_mmcif_multi_read()
680 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN); in sh_mmcif_multi_read()
683 static bool sh_mmcif_mread_block(struct sh_mmcif_host *host) in sh_mmcif_mread_block() argument
685 struct device *dev = sh_mmcif_host_to_dev(host); in sh_mmcif_mread_block()
686 struct mmc_data *data = host->mrq->data; in sh_mmcif_mread_block()
687 u32 *p = host->pio_ptr; in sh_mmcif_mread_block()
690 if (host->sd_error) { in sh_mmcif_mread_block()
691 data->error = sh_mmcif_error_manage(host); in sh_mmcif_mread_block()
698 for (i = 0; i < host->blocksize / 4; i++) in sh_mmcif_mread_block()
699 *p++ = sh_mmcif_readl(host->addr, MMCIF_CE_DATA); in sh_mmcif_mread_block()
701 if (!sh_mmcif_next_block(host, p)) in sh_mmcif_mread_block()
704 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN); in sh_mmcif_mread_block()
709 static void sh_mmcif_single_write(struct sh_mmcif_host *host, in sh_mmcif_single_write() argument
712 host->blocksize = (sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) & in sh_mmcif_single_write()
715 host->wait_for = MMCIF_WAIT_FOR_WRITE; in sh_mmcif_single_write()
718 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN); in sh_mmcif_single_write()
721 static bool sh_mmcif_write_block(struct sh_mmcif_host *host) in sh_mmcif_write_block() argument
723 struct device *dev = sh_mmcif_host_to_dev(host); in sh_mmcif_write_block()
724 struct mmc_data *data = host->mrq->data; in sh_mmcif_write_block()
728 if (host->sd_error) { in sh_mmcif_write_block()
729 data->error = sh_mmcif_error_manage(host); in sh_mmcif_write_block()
734 for (i = 0; i < host->blocksize / 4; i++) in sh_mmcif_write_block()
735 sh_mmcif_writel(host->addr, MMCIF_CE_DATA, *p++); in sh_mmcif_write_block()
738 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MDTRANE); in sh_mmcif_write_block()
739 host->wait_for = MMCIF_WAIT_FOR_WRITE_END; in sh_mmcif_write_block()
744 static void sh_mmcif_multi_write(struct sh_mmcif_host *host, in sh_mmcif_multi_write() argument
752 host->blocksize = sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) & in sh_mmcif_multi_write()
755 host->wait_for = MMCIF_WAIT_FOR_MWRITE; in sh_mmcif_multi_write()
756 host->sg_idx = 0; in sh_mmcif_multi_write()
757 host->sg_blkidx = 0; in sh_mmcif_multi_write()
758 host->pio_ptr = sg_virt(data->sg); in sh_mmcif_multi_write()
760 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN); in sh_mmcif_multi_write()
763 static bool sh_mmcif_mwrite_block(struct sh_mmcif_host *host) in sh_mmcif_mwrite_block() argument
765 struct device *dev = sh_mmcif_host_to_dev(host); in sh_mmcif_mwrite_block()
766 struct mmc_data *data = host->mrq->data; in sh_mmcif_mwrite_block()
767 u32 *p = host->pio_ptr; in sh_mmcif_mwrite_block()
770 if (host->sd_error) { in sh_mmcif_mwrite_block()
771 data->error = sh_mmcif_error_manage(host); in sh_mmcif_mwrite_block()
778 for (i = 0; i < host->blocksize / 4; i++) in sh_mmcif_mwrite_block()
779 sh_mmcif_writel(host->addr, MMCIF_CE_DATA, *p++); in sh_mmcif_mwrite_block()
781 if (!sh_mmcif_next_block(host, p)) in sh_mmcif_mwrite_block()
784 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN); in sh_mmcif_mwrite_block()
789 static void sh_mmcif_get_response(struct sh_mmcif_host *host, in sh_mmcif_get_response() argument
793 cmd->resp[0] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP3); in sh_mmcif_get_response()
794 cmd->resp[1] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP2); in sh_mmcif_get_response()
795 cmd->resp[2] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP1); in sh_mmcif_get_response()
796 cmd->resp[3] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP0); in sh_mmcif_get_response()
798 cmd->resp[0] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP0); in sh_mmcif_get_response()
801 static void sh_mmcif_get_cmd12response(struct sh_mmcif_host *host, in sh_mmcif_get_cmd12response() argument
804 cmd->resp[0] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP_CMD12); in sh_mmcif_get_cmd12response()
807 static u32 sh_mmcif_set_cmd(struct sh_mmcif_host *host, in sh_mmcif_set_cmd() argument
810 struct device *dev = sh_mmcif_host_to_dev(host); in sh_mmcif_set_cmd()
839 switch (host->bus_width) { in sh_mmcif_set_cmd()
853 switch (host->timing) { in sh_mmcif_set_cmd()
872 sh_mmcif_bitset(host, MMCIF_CE_BLOCK_SET, in sh_mmcif_set_cmd()
890 static int sh_mmcif_data_trans(struct sh_mmcif_host *host, in sh_mmcif_data_trans() argument
893 struct device *dev = sh_mmcif_host_to_dev(host); in sh_mmcif_data_trans()
897 sh_mmcif_multi_read(host, mrq); in sh_mmcif_data_trans()
900 sh_mmcif_multi_write(host, mrq); in sh_mmcif_data_trans()
903 sh_mmcif_single_write(host, mrq); in sh_mmcif_data_trans()
907 sh_mmcif_single_read(host, mrq); in sh_mmcif_data_trans()
915 static void sh_mmcif_start_cmd(struct sh_mmcif_host *host, in sh_mmcif_start_cmd() argument
928 if (host->ccs_enable) in sh_mmcif_start_cmd()
932 sh_mmcif_writel(host->addr, MMCIF_CE_BLOCK_SET, 0); in sh_mmcif_start_cmd()
933 sh_mmcif_writel(host->addr, MMCIF_CE_BLOCK_SET, in sh_mmcif_start_cmd()
936 opc = sh_mmcif_set_cmd(host, mrq); in sh_mmcif_start_cmd()
938 if (host->ccs_enable) in sh_mmcif_start_cmd()
939 sh_mmcif_writel(host->addr, MMCIF_CE_INT, 0xD80430C0); in sh_mmcif_start_cmd()
941 sh_mmcif_writel(host->addr, MMCIF_CE_INT, 0xD80430C0 | INT_CCS); in sh_mmcif_start_cmd()
942 sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, mask); in sh_mmcif_start_cmd()
944 sh_mmcif_writel(host->addr, MMCIF_CE_ARG, cmd->arg); in sh_mmcif_start_cmd()
946 spin_lock_irqsave(&host->lock, flags); in sh_mmcif_start_cmd()
947 sh_mmcif_writel(host->addr, MMCIF_CE_CMD_SET, opc); in sh_mmcif_start_cmd()
949 host->wait_for = MMCIF_WAIT_FOR_CMD; in sh_mmcif_start_cmd()
950 schedule_delayed_work(&host->timeout_work, host->timeout); in sh_mmcif_start_cmd()
951 spin_unlock_irqrestore(&host->lock, flags); in sh_mmcif_start_cmd()
954 static void sh_mmcif_stop_cmd(struct sh_mmcif_host *host, in sh_mmcif_stop_cmd() argument
957 struct device *dev = sh_mmcif_host_to_dev(host); in sh_mmcif_stop_cmd()
961 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MCMD12DRE); in sh_mmcif_stop_cmd()
964 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MCMD12RBE); in sh_mmcif_stop_cmd()
968 mrq->stop->error = sh_mmcif_error_manage(host); in sh_mmcif_stop_cmd()
972 host->wait_for = MMCIF_WAIT_FOR_STOP; in sh_mmcif_stop_cmd()
977 struct sh_mmcif_host *host = mmc_priv(mmc); in sh_mmcif_request() local
978 struct device *dev = sh_mmcif_host_to_dev(host); in sh_mmcif_request()
981 spin_lock_irqsave(&host->lock, flags); in sh_mmcif_request()
982 if (host->state != STATE_IDLE) { in sh_mmcif_request()
984 __func__, host->state); in sh_mmcif_request()
985 spin_unlock_irqrestore(&host->lock, flags); in sh_mmcif_request()
991 host->state = STATE_REQUEST; in sh_mmcif_request()
992 spin_unlock_irqrestore(&host->lock, flags); in sh_mmcif_request()
994 host->mrq = mrq; in sh_mmcif_request()
996 sh_mmcif_start_cmd(host, mrq); in sh_mmcif_request()
999 static void sh_mmcif_clk_setup(struct sh_mmcif_host *host) in sh_mmcif_clk_setup() argument
1001 struct device *dev = sh_mmcif_host_to_dev(host); in sh_mmcif_clk_setup()
1003 if (host->mmc->f_max) { in sh_mmcif_clk_setup()
1006 f_max = host->mmc->f_max; in sh_mmcif_clk_setup()
1008 f_min = clk_round_rate(host->clk, f_min_old / 2); in sh_mmcif_clk_setup()
1017 host->clkdiv_map = 0x3ff; in sh_mmcif_clk_setup()
1019 host->mmc->f_max = f_max / (1 << ffs(host->clkdiv_map)); in sh_mmcif_clk_setup()
1020 host->mmc->f_min = f_min / (1 << fls(host->clkdiv_map)); in sh_mmcif_clk_setup()
1022 unsigned int clk = clk_get_rate(host->clk); in sh_mmcif_clk_setup()
1024 host->mmc->f_max = clk / 2; in sh_mmcif_clk_setup()
1025 host->mmc->f_min = clk / 512; in sh_mmcif_clk_setup()
1029 host->mmc->f_max, host->mmc->f_min); in sh_mmcif_clk_setup()
1034 struct sh_mmcif_host *host = mmc_priv(mmc); in sh_mmcif_set_ios() local
1035 struct device *dev = sh_mmcif_host_to_dev(host); in sh_mmcif_set_ios()
1038 spin_lock_irqsave(&host->lock, flags); in sh_mmcif_set_ios()
1039 if (host->state != STATE_IDLE) { in sh_mmcif_set_ios()
1041 __func__, host->state); in sh_mmcif_set_ios()
1042 spin_unlock_irqrestore(&host->lock, flags); in sh_mmcif_set_ios()
1046 host->state = STATE_IOS; in sh_mmcif_set_ios()
1047 spin_unlock_irqrestore(&host->lock, flags); in sh_mmcif_set_ios()
1053 if (!host->power) { in sh_mmcif_set_ios()
1054 clk_prepare_enable(host->clk); in sh_mmcif_set_ios()
1056 sh_mmcif_sync_reset(host); in sh_mmcif_set_ios()
1057 sh_mmcif_request_dma(host); in sh_mmcif_set_ios()
1058 host->power = true; in sh_mmcif_set_ios()
1064 if (host->power) { in sh_mmcif_set_ios()
1065 sh_mmcif_clock_control(host, 0); in sh_mmcif_set_ios()
1066 sh_mmcif_release_dma(host); in sh_mmcif_set_ios()
1068 clk_disable_unprepare(host->clk); in sh_mmcif_set_ios()
1069 host->power = false; in sh_mmcif_set_ios()
1073 sh_mmcif_clock_control(host, ios->clock); in sh_mmcif_set_ios()
1077 host->timing = ios->timing; in sh_mmcif_set_ios()
1078 host->bus_width = ios->bus_width; in sh_mmcif_set_ios()
1079 host->state = STATE_IDLE; in sh_mmcif_set_ios()
1084 struct sh_mmcif_host *host = mmc_priv(mmc); in sh_mmcif_get_cd() local
1085 struct device *dev = sh_mmcif_host_to_dev(host); in sh_mmcif_get_cd()
1095 return p->get_cd(host->pd); in sh_mmcif_get_cd()
1104 static bool sh_mmcif_end_cmd(struct sh_mmcif_host *host) in sh_mmcif_end_cmd() argument
1106 struct mmc_command *cmd = host->mrq->cmd; in sh_mmcif_end_cmd()
1107 struct mmc_data *data = host->mrq->data; in sh_mmcif_end_cmd()
1108 struct device *dev = sh_mmcif_host_to_dev(host); in sh_mmcif_end_cmd()
1111 if (host->sd_error) { in sh_mmcif_end_cmd()
1119 cmd->error = sh_mmcif_error_manage(host); in sh_mmcif_end_cmd()
1124 host->sd_error = false; in sh_mmcif_end_cmd()
1132 sh_mmcif_get_response(host, cmd); in sh_mmcif_end_cmd()
1141 init_completion(&host->dma_complete); in sh_mmcif_end_cmd()
1144 if (host->chan_rx) in sh_mmcif_end_cmd()
1145 sh_mmcif_start_dma_rx(host); in sh_mmcif_end_cmd()
1147 if (host->chan_tx) in sh_mmcif_end_cmd()
1148 sh_mmcif_start_dma_tx(host); in sh_mmcif_end_cmd()
1151 if (!host->dma_active) { in sh_mmcif_end_cmd()
1152 data->error = sh_mmcif_data_trans(host, host->mrq, cmd->opcode); in sh_mmcif_end_cmd()
1157 time = wait_for_completion_interruptible_timeout(&host->dma_complete, in sh_mmcif_end_cmd()
1158 host->timeout); in sh_mmcif_end_cmd()
1161 dma_unmap_sg(host->chan_rx->device->dev, in sh_mmcif_end_cmd()
1165 dma_unmap_sg(host->chan_tx->device->dev, in sh_mmcif_end_cmd()
1169 if (host->sd_error) { in sh_mmcif_end_cmd()
1170 dev_err(host->mmc->parent, in sh_mmcif_end_cmd()
1173 data->error = sh_mmcif_error_manage(host); in sh_mmcif_end_cmd()
1175 dev_err(host->mmc->parent, "DMA timeout!\n"); in sh_mmcif_end_cmd()
1178 dev_err(host->mmc->parent, in sh_mmcif_end_cmd()
1182 sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, in sh_mmcif_end_cmd()
1184 host->dma_active = false; in sh_mmcif_end_cmd()
1190 dmaengine_terminate_all(host->chan_rx); in sh_mmcif_end_cmd()
1192 dmaengine_terminate_all(host->chan_tx); in sh_mmcif_end_cmd()
1200 struct sh_mmcif_host *host = dev_id; in sh_mmcif_irqt() local
1202 struct device *dev = sh_mmcif_host_to_dev(host); in sh_mmcif_irqt()
1207 spin_lock_irqsave(&host->lock, flags); in sh_mmcif_irqt()
1208 wait_work = host->wait_for; in sh_mmcif_irqt()
1209 spin_unlock_irqrestore(&host->lock, flags); in sh_mmcif_irqt()
1211 cancel_delayed_work_sync(&host->timeout_work); in sh_mmcif_irqt()
1213 mutex_lock(&host->thread_lock); in sh_mmcif_irqt()
1215 mrq = host->mrq; in sh_mmcif_irqt()
1218 host->state, host->wait_for); in sh_mmcif_irqt()
1219 mutex_unlock(&host->thread_lock); in sh_mmcif_irqt()
1230 mutex_unlock(&host->thread_lock); in sh_mmcif_irqt()
1234 wait = sh_mmcif_end_cmd(host); in sh_mmcif_irqt()
1238 wait = sh_mmcif_mread_block(host); in sh_mmcif_irqt()
1242 wait = sh_mmcif_read_block(host); in sh_mmcif_irqt()
1246 wait = sh_mmcif_mwrite_block(host); in sh_mmcif_irqt()
1250 wait = sh_mmcif_write_block(host); in sh_mmcif_irqt()
1253 if (host->sd_error) { in sh_mmcif_irqt()
1254 mrq->stop->error = sh_mmcif_error_manage(host); in sh_mmcif_irqt()
1258 sh_mmcif_get_cmd12response(host, mrq->stop); in sh_mmcif_irqt()
1263 if (host->sd_error) { in sh_mmcif_irqt()
1264 mrq->data->error = sh_mmcif_error_manage(host); in sh_mmcif_irqt()
1273 schedule_delayed_work(&host->timeout_work, host->timeout); in sh_mmcif_irqt()
1275 mutex_unlock(&host->thread_lock); in sh_mmcif_irqt()
1279 if (host->wait_for != MMCIF_WAIT_FOR_STOP) { in sh_mmcif_irqt()
1286 sh_mmcif_stop_cmd(host, mrq); in sh_mmcif_irqt()
1288 schedule_delayed_work(&host->timeout_work, host->timeout); in sh_mmcif_irqt()
1289 mutex_unlock(&host->thread_lock); in sh_mmcif_irqt()
1295 host->wait_for = MMCIF_WAIT_FOR_REQUEST; in sh_mmcif_irqt()
1296 host->state = STATE_IDLE; in sh_mmcif_irqt()
1297 host->mrq = NULL; in sh_mmcif_irqt()
1298 mmc_request_done(host->mmc, mrq); in sh_mmcif_irqt()
1300 mutex_unlock(&host->thread_lock); in sh_mmcif_irqt()
1307 struct sh_mmcif_host *host = dev_id; in sh_mmcif_intr() local
1308 struct device *dev = sh_mmcif_host_to_dev(host); in sh_mmcif_intr()
1311 state = sh_mmcif_readl(host->addr, MMCIF_CE_INT); in sh_mmcif_intr()
1312 mask = sh_mmcif_readl(host->addr, MMCIF_CE_INT_MASK); in sh_mmcif_intr()
1313 if (host->ccs_enable) in sh_mmcif_intr()
1314 sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~(state & mask)); in sh_mmcif_intr()
1316 sh_mmcif_writel(host->addr, MMCIF_CE_INT, INT_CCS | ~(state & mask)); in sh_mmcif_intr()
1317 sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, state & MASK_CLEAN); in sh_mmcif_intr()
1324 host->sd_error = true; in sh_mmcif_intr()
1328 if (!host->mrq) in sh_mmcif_intr()
1330 if (!host->dma_active) in sh_mmcif_intr()
1332 else if (host->sd_error) in sh_mmcif_intr()
1333 sh_mmcif_dma_complete(host); in sh_mmcif_intr()
1344 struct sh_mmcif_host *host = container_of(d, struct sh_mmcif_host, timeout_work); in sh_mmcif_timeout_work() local
1345 struct mmc_request *mrq = host->mrq; in sh_mmcif_timeout_work()
1346 struct device *dev = sh_mmcif_host_to_dev(host); in sh_mmcif_timeout_work()
1349 if (host->dying) in sh_mmcif_timeout_work()
1353 spin_lock_irqsave(&host->lock, flags); in sh_mmcif_timeout_work()
1354 if (host->state == STATE_IDLE) { in sh_mmcif_timeout_work()
1355 spin_unlock_irqrestore(&host->lock, flags); in sh_mmcif_timeout_work()
1360 host->wait_for, mrq->cmd->opcode); in sh_mmcif_timeout_work()
1362 host->state = STATE_TIMEOUT; in sh_mmcif_timeout_work()
1363 spin_unlock_irqrestore(&host->lock, flags); in sh_mmcif_timeout_work()
1369 switch (host->wait_for) { in sh_mmcif_timeout_work()
1371 mrq->cmd->error = sh_mmcif_error_manage(host); in sh_mmcif_timeout_work()
1374 mrq->stop->error = sh_mmcif_error_manage(host); in sh_mmcif_timeout_work()
1382 mrq->data->error = sh_mmcif_error_manage(host); in sh_mmcif_timeout_work()
1388 host->state = STATE_IDLE; in sh_mmcif_timeout_work()
1389 host->wait_for = MMCIF_WAIT_FOR_REQUEST; in sh_mmcif_timeout_work()
1390 host->mrq = NULL; in sh_mmcif_timeout_work()
1391 mmc_request_done(host->mmc, mrq); in sh_mmcif_timeout_work()
1394 static void sh_mmcif_init_ocr(struct sh_mmcif_host *host) in sh_mmcif_init_ocr() argument
1396 struct device *dev = sh_mmcif_host_to_dev(host); in sh_mmcif_init_ocr()
1398 struct mmc_host *mmc = host->mmc; in sh_mmcif_init_ocr()
1415 struct sh_mmcif_host *host; in sh_mmcif_probe() local
1442 host = mmc_priv(mmc); in sh_mmcif_probe()
1443 host->mmc = mmc; in sh_mmcif_probe()
1444 host->addr = reg; in sh_mmcif_probe()
1445 host->timeout = msecs_to_jiffies(10000); in sh_mmcif_probe()
1446 host->ccs_enable = !pd || !pd->ccs_unsupported; in sh_mmcif_probe()
1447 host->clk_ctrl2_enable = pd && pd->clk_ctrl2_present; in sh_mmcif_probe()
1449 host->pd = pdev; in sh_mmcif_probe()
1451 spin_lock_init(&host->lock); in sh_mmcif_probe()
1454 sh_mmcif_init_ocr(host); in sh_mmcif_probe()
1468 platform_set_drvdata(pdev, host); in sh_mmcif_probe()
1470 host->clk = devm_clk_get(dev, NULL); in sh_mmcif_probe()
1471 if (IS_ERR(host->clk)) { in sh_mmcif_probe()
1472 ret = PTR_ERR(host->clk); in sh_mmcif_probe()
1477 ret = clk_prepare_enable(host->clk); in sh_mmcif_probe()
1481 sh_mmcif_clk_setup(host); in sh_mmcif_probe()
1484 host->power = false; in sh_mmcif_probe()
1490 INIT_DELAYED_WORK(&host->timeout_work, sh_mmcif_timeout_work); in sh_mmcif_probe()
1492 sh_mmcif_sync_reset(host); in sh_mmcif_probe()
1493 sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL); in sh_mmcif_probe()
1497 sh_mmcif_irqt, 0, name, host); in sh_mmcif_probe()
1505 0, "sh_mmc:int", host); in sh_mmcif_probe()
1518 mutex_init(&host->thread_lock); in sh_mmcif_probe()
1527 sh_mmcif_readl(host->addr, MMCIF_CE_VERSION) & 0xffff, in sh_mmcif_probe()
1528 clk_get_rate(host->clk) / 1000000UL); in sh_mmcif_probe()
1531 clk_disable_unprepare(host->clk); in sh_mmcif_probe()
1535 clk_disable_unprepare(host->clk); in sh_mmcif_probe()
1545 struct sh_mmcif_host *host = platform_get_drvdata(pdev); in sh_mmcif_remove() local
1547 host->dying = true; in sh_mmcif_remove()
1548 clk_prepare_enable(host->clk); in sh_mmcif_remove()
1553 mmc_remove_host(host->mmc); in sh_mmcif_remove()
1554 sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL); in sh_mmcif_remove()
1561 cancel_delayed_work_sync(&host->timeout_work); in sh_mmcif_remove()
1563 clk_disable_unprepare(host->clk); in sh_mmcif_remove()
1564 mmc_free_host(host->mmc); in sh_mmcif_remove()
1574 struct sh_mmcif_host *host = dev_get_drvdata(dev); in sh_mmcif_suspend() local
1577 sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL); in sh_mmcif_suspend()