Lines Matching +full:no +full:- +full:mmc
2 * Shared part of driver for MMC/SDHC controller on Cavium OCTEON and
9 * Copyright (C) 2012-2017 Cavium Inc.
18 #include <linux/dma-direction.h>
19 #include <linux/dma-mapping.h>
22 #include <linux/mmc/mmc.h>
23 #include <linux/mmc/slot-gpio.h>
32 "MMC Buffer",
33 "MMC Command",
34 "MMC DMA",
35 "MMC Command Error",
36 "MMC DMA Error",
37 "MMC Switch",
38 "MMC Switch Error",
39 "MMC DMA int Fifo",
40 "MMC DMA int",
44 * The Cavium MMC host hardware assumes that all commands have fixed
45 * command and response types. These are correct if MMC devices are
46 * being used. However, non-MMC devices like SD use command and
51 * from the values in this table and the flags passed from the MMC
128 cr = cvm_mmc_cr_types + (cmd->opcode & 0x3f); in cvm_mmc_get_cr_mods()
129 hardware_ctype = cr->ctype; in cvm_mmc_get_cr_mods()
130 hardware_rtype = cr->rtype; in cvm_mmc_get_cr_mods()
131 if (cmd->opcode == MMC_GEN_CMD) in cvm_mmc_get_cr_mods()
132 hardware_ctype = (cmd->arg & 1) ? 1 : 2; in cvm_mmc_get_cr_mods()
136 desired_ctype = (cmd->data->flags & MMC_DATA_WRITE) ? 2 : 1; in cvm_mmc_get_cr_mods()
169 emm_switch = readq(host->base + MIO_EMM_SWITCH(host)); in check_switch_errors()
171 dev_err(host->dev, "Switch power class error\n"); in check_switch_errors()
173 dev_err(host->dev, "Switch hs timing error\n"); in check_switch_errors()
175 dev_err(host->dev, "Switch bus width error\n"); in check_switch_errors()
198 * with the commands send by the MMC core.
212 writeq(emm_switch, host->base + MIO_EMM_SWITCH(host)); in do_switch()
215 writeq(emm_switch, host->base + MIO_EMM_SWITCH(host)); in do_switch()
219 rsp_sts = readq(host->base + MIO_EMM_RSP_STS(host)); in do_switch()
223 } while (--retries); in do_switch()
233 return (slot->cached_switch & match) != (new_val & match); in switch_val_changed()
240 if (!slot->clock) in set_wdog()
244 timeout = (slot->clock * ns) / NSEC_PER_SEC; in set_wdog()
246 timeout = (slot->clock * 850ull) / 1000ull; in set_wdog()
247 writeq(timeout, slot->host->base + MIO_EMM_WDOG(slot->host)); in set_wdog()
252 struct cvm_mmc_host *host = slot->host; in cvm_mmc_reset_bus()
255 emm_switch = readq(slot->host->base + MIO_EMM_SWITCH(host)); in cvm_mmc_reset_bus()
258 set_bus_id(&emm_switch, slot->bus_id); in cvm_mmc_reset_bus()
260 wdog = readq(slot->host->base + MIO_EMM_WDOG(host)); in cvm_mmc_reset_bus()
261 do_switch(slot->host, emm_switch); in cvm_mmc_reset_bus()
263 slot->cached_switch = emm_switch; in cvm_mmc_reset_bus()
267 writeq(wdog, slot->host->base + MIO_EMM_WDOG(host)); in cvm_mmc_reset_bus()
273 struct cvm_mmc_host *host = slot->host; in cvm_mmc_switch_to()
277 if (slot->bus_id == host->last_slot) in cvm_mmc_switch_to()
280 if (host->last_slot >= 0 && host->slot[host->last_slot]) { in cvm_mmc_switch_to()
281 old_slot = host->slot[host->last_slot]; in cvm_mmc_switch_to()
282 old_slot->cached_switch = readq(host->base + MIO_EMM_SWITCH(host)); in cvm_mmc_switch_to()
283 old_slot->cached_rca = readq(host->base + MIO_EMM_RCA(host)); in cvm_mmc_switch_to()
286 writeq(slot->cached_rca, host->base + MIO_EMM_RCA(host)); in cvm_mmc_switch_to()
287 emm_switch = slot->cached_switch; in cvm_mmc_switch_to()
288 set_bus_id(&emm_switch, slot->bus_id); in cvm_mmc_switch_to()
291 emm_sample = FIELD_PREP(MIO_EMM_SAMPLE_CMD_CNT, slot->cmd_cnt) | in cvm_mmc_switch_to()
292 FIELD_PREP(MIO_EMM_SAMPLE_DAT_CNT, slot->dat_cnt); in cvm_mmc_switch_to()
293 writeq(emm_sample, host->base + MIO_EMM_SAMPLE(host)); in cvm_mmc_switch_to()
295 host->last_slot = slot->bus_id; in cvm_mmc_switch_to()
301 struct sg_mapping_iter *smi = &host->smi; in do_read()
302 int data_len = req->data->blocks * req->data->blksz; in do_read()
303 int bytes_xfered, shift = -1; in do_read()
307 writeq((0x10000 | (dbuf << 6)), host->base + MIO_EMM_BUF_IDX(host)); in do_read()
310 if (smi->consumed >= smi->length) { in do_read()
313 smi->consumed = 0; in do_read()
317 dat = readq(host->base + MIO_EMM_BUF_DAT(host)); in do_read()
321 while (smi->consumed < smi->length && shift >= 0) { in do_read()
322 ((u8 *)smi->addr)[smi->consumed] = (dat >> shift) & 0xff; in do_read()
324 smi->consumed++; in do_read()
325 shift -= 8; in do_read()
330 req->data->bytes_xfered = bytes_xfered; in do_read()
331 req->data->error = 0; in do_read()
336 req->data->bytes_xfered = req->data->blocks * req->data->blksz; in do_write()
337 req->data->error = 0; in do_write()
348 rsp_lo = readq(host->base + MIO_EMM_RSP_LO(host)); in set_cmd_response()
353 req->cmd->resp[0] = (rsp_lo >> 8) & 0xffffffff; in set_cmd_response()
354 req->cmd->resp[1] = 0; in set_cmd_response()
355 req->cmd->resp[2] = 0; in set_cmd_response()
356 req->cmd->resp[3] = 0; in set_cmd_response()
359 req->cmd->resp[3] = rsp_lo & 0xffffffff; in set_cmd_response()
360 req->cmd->resp[2] = (rsp_lo >> 32) & 0xffffffff; in set_cmd_response()
361 rsp_hi = readq(host->base + MIO_EMM_RSP_HI(host)); in set_cmd_response()
362 req->cmd->resp[1] = rsp_hi & 0xffffffff; in set_cmd_response()
363 req->cmd->resp[0] = (rsp_hi >> 32) & 0xffffffff; in set_cmd_response()
370 return (data->flags & MMC_DATA_WRITE) ? DMA_TO_DEVICE : DMA_FROM_DEVICE; in get_dma_dir()
375 data->bytes_xfered = data->blocks * data->blksz; in finish_dma_single()
376 data->error = 0; in finish_dma_single()
377 dma_unmap_sg(host->dev, data->sg, data->sg_len, get_dma_dir(data)); in finish_dma_single()
387 fifo_cfg = readq(host->dma_base + MIO_EMM_DMA_FIFO_CFG(host)); in finish_dma_sg()
390 dev_err(host->dev, "%u requests still pending\n", count); in finish_dma_sg()
392 data->bytes_xfered = data->blocks * data->blksz; in finish_dma_sg()
393 data->error = 0; in finish_dma_sg()
396 writeq(BIT_ULL(16), host->dma_base + MIO_EMM_DMA_FIFO_CFG(host)); in finish_dma_sg()
397 dma_unmap_sg(host->dev, data->sg, data->sg_len, get_dma_dir(data)); in finish_dma_sg()
403 if (host->use_sg && data->sg_len > 1) in finish_dma()
414 return -EILSEQ; in check_status()
417 return -ETIMEDOUT; in check_status()
419 return -EIO; in check_status()
428 emm_dma = readq(host->base + MIO_EMM_DMA(host)); in cleanup_dma()
432 writeq(emm_dma, host->base + MIO_EMM_DMA(host)); in cleanup_dma()
443 if (host->need_irq_handler_lock) in cvm_mmc_interrupt()
444 spin_lock_irqsave(&host->irq_handler_lock, flags); in cvm_mmc_interrupt()
446 __acquire(&host->irq_handler_lock); in cvm_mmc_interrupt()
449 emm_int = readq(host->base + MIO_EMM_INT(host)); in cvm_mmc_interrupt()
450 writeq(emm_int, host->base + MIO_EMM_INT(host)); in cvm_mmc_interrupt()
455 req = host->current_req; in cvm_mmc_interrupt()
459 rsp_sts = readq(host->base + MIO_EMM_RSP_STS(host)); in cvm_mmc_interrupt()
465 if ((rsp_sts & MIO_EMM_RSP_STS_DMA_VAL) && host->dma_active) in cvm_mmc_interrupt()
468 if (!host->dma_active && req->data && in cvm_mmc_interrupt()
483 if (!(host_done && req->done)) in cvm_mmc_interrupt()
486 req->cmd->error = check_status(rsp_sts); in cvm_mmc_interrupt()
488 if (host->dma_active && req->data) in cvm_mmc_interrupt()
489 if (!finish_dma(host, req->data)) in cvm_mmc_interrupt()
497 host->current_req = NULL; in cvm_mmc_interrupt()
498 req->done(req); in cvm_mmc_interrupt()
501 if (host->dmar_fixup_done) in cvm_mmc_interrupt()
502 host->dmar_fixup_done(host); in cvm_mmc_interrupt()
504 host->release_bus(host); in cvm_mmc_interrupt()
506 if (host->need_irq_handler_lock) in cvm_mmc_interrupt()
507 spin_unlock_irqrestore(&host->irq_handler_lock, flags); in cvm_mmc_interrupt()
509 __release(&host->irq_handler_lock); in cvm_mmc_interrupt()
522 count = dma_map_sg(host->dev, data->sg, data->sg_len, in prepare_dma_single()
527 rw = (data->flags & MMC_DATA_WRITE) ? 1 : 0; in prepare_dma_single()
534 (sg_dma_len(&data->sg[0]) / 8) - 1); in prepare_dma_single()
536 addr = sg_dma_address(&data->sg[0]); in prepare_dma_single()
537 if (!host->big_dma_addr) in prepare_dma_single()
539 writeq(dma_cfg, host->dma_base + MIO_EMM_DMA_CFG(host)); in prepare_dma_single()
542 (rw) ? "W" : "R", sg_dma_len(&data->sg[0]), count); in prepare_dma_single()
544 if (host->big_dma_addr) in prepare_dma_single()
545 writeq(addr, host->dma_base + MIO_EMM_DMA_ADR(host)); in prepare_dma_single()
559 count = dma_map_sg(host->dev, data->sg, data->sg_len, in prepare_dma_sg()
567 writeq(0, host->dma_base + MIO_EMM_DMA_FIFO_CFG(host)); in prepare_dma_sg()
569 for_each_sg(data->sg, sg, count, i) { in prepare_dma_sg()
574 writeq(addr, host->dma_base + MIO_EMM_DMA_FIFO_ADR(host)); in prepare_dma_sg()
577 * If we have scatter-gather support we also have an extra in prepare_dma_sg()
578 * register for the DMA addr, so no need to check in prepare_dma_sg()
579 * host->big_dma_addr here. in prepare_dma_sg()
581 rw = (data->flags & MMC_DATA_WRITE) ? 1 : 0; in prepare_dma_sg()
592 sg_dma_len(sg) / 8 - 1); in prepare_dma_sg()
597 writeq(fifo_cmd, host->dma_base + MIO_EMM_DMA_FIFO_CMD(host)); in prepare_dma_sg()
604 * address here, as it would not make sense for scatter-gather. in prepare_dma_sg()
606 * scatter-gather, so that is not a problem. in prepare_dma_sg()
612 dma_unmap_sg(host->dev, data->sg, data->sg_len, get_dma_dir(data)); in prepare_dma_sg()
614 writeq(BIT_ULL(16), host->dma_base + MIO_EMM_DMA_FIFO_CFG(host)); in prepare_dma_sg()
620 if (host->use_sg && data->sg_len > 1) in prepare_dma()
626 static u64 prepare_ext_dma(struct mmc_host *mmc, struct mmc_request *mrq) in prepare_ext_dma() argument
628 struct cvm_mmc_slot *slot = mmc_priv(mmc); in prepare_ext_dma()
633 mmc_card_is_blockaddr(mmc->card) ? 1 : 0) | in prepare_ext_dma()
635 (mrq->data->flags & MMC_DATA_WRITE) ? 1 : 0) | in prepare_ext_dma()
636 FIELD_PREP(MIO_EMM_DMA_BLOCK_CNT, mrq->data->blocks) | in prepare_ext_dma()
637 FIELD_PREP(MIO_EMM_DMA_CARD_ADDR, mrq->cmd->arg); in prepare_ext_dma()
638 set_bus_id(&emm_dma, slot->bus_id); in prepare_ext_dma()
640 if (mmc_card_mmc(mmc->card) || (mmc_card_sd(mmc->card) && in prepare_ext_dma()
641 (mmc->card->scr.cmds & SD_SCR_CMD23_SUPPORT))) in prepare_ext_dma()
646 mrq->data->blocks, (emm_dma & MIO_EMM_DMA_MULTI) ? 1 : 0); in prepare_ext_dma()
650 static void cvm_mmc_dma_request(struct mmc_host *mmc, in cvm_mmc_dma_request() argument
653 struct cvm_mmc_slot *slot = mmc_priv(mmc); in cvm_mmc_dma_request()
654 struct cvm_mmc_host *host = slot->host; in cvm_mmc_dma_request()
658 if (!mrq->data || !mrq->data->sg || !mrq->data->sg_len || in cvm_mmc_dma_request()
659 !mrq->stop || mrq->stop->opcode != MMC_STOP_TRANSMISSION) { in cvm_mmc_dma_request()
660 dev_err(&mmc->card->dev, in cvm_mmc_dma_request()
661 "Error: cmv_mmc_dma_request no data\n"); in cvm_mmc_dma_request()
667 data = mrq->data; in cvm_mmc_dma_request()
669 data->blocks, data->blksz, data->blocks * data->blksz); in cvm_mmc_dma_request()
670 if (data->timeout_ns) in cvm_mmc_dma_request()
671 set_wdog(slot, data->timeout_ns); in cvm_mmc_dma_request()
673 WARN_ON(host->current_req); in cvm_mmc_dma_request()
674 host->current_req = mrq; in cvm_mmc_dma_request()
676 emm_dma = prepare_ext_dma(mmc, mrq); in cvm_mmc_dma_request()
679 dev_err(host->dev, "prepare_dma failed\n"); in cvm_mmc_dma_request()
683 host->dma_active = true; in cvm_mmc_dma_request()
684 host->int_enable(host, MIO_EMM_INT_CMD_ERR | MIO_EMM_INT_DMA_DONE | in cvm_mmc_dma_request()
687 if (host->dmar_fixup) in cvm_mmc_dma_request()
688 host->dmar_fixup(host, mrq->cmd, data, addr); in cvm_mmc_dma_request()
695 if (mmc_card_sd(mmc->card)) in cvm_mmc_dma_request()
696 writeq(0x00b00000ull, host->base + MIO_EMM_STS_MASK(host)); in cvm_mmc_dma_request()
698 writeq(0xe4390080ull, host->base + MIO_EMM_STS_MASK(host)); in cvm_mmc_dma_request()
699 writeq(emm_dma, host->base + MIO_EMM_DMA(host)); in cvm_mmc_dma_request()
703 mrq->cmd->error = -EINVAL; in cvm_mmc_dma_request()
704 if (mrq->done) in cvm_mmc_dma_request()
705 mrq->done(mrq); in cvm_mmc_dma_request()
706 host->release_bus(host); in cvm_mmc_dma_request()
711 sg_miter_start(&host->smi, mrq->data->sg, mrq->data->sg_len, in do_read_request()
717 unsigned int data_len = mrq->data->blocks * mrq->data->blksz; in do_write_request()
718 struct sg_mapping_iter *smi = &host->smi; in do_write_request()
724 sg_miter_start(smi, mrq->data->sg, mrq->data->sg_len, SG_MITER_FROM_SG); in do_write_request()
727 writeq(0x10000ull, host->base + MIO_EMM_BUF_IDX(host)); in do_write_request()
730 if (smi->consumed >= smi->length) { in do_write_request()
733 smi->consumed = 0; in do_write_request()
736 while (smi->consumed < smi->length && shift >= 0) { in do_write_request()
737 dat |= (u64)((u8 *)smi->addr)[smi->consumed] << shift; in do_write_request()
739 smi->consumed++; in do_write_request()
740 shift -= 8; in do_write_request()
744 writeq(dat, host->base + MIO_EMM_BUF_DAT(host)); in do_write_request()
752 static void cvm_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq) in cvm_mmc_request() argument
754 struct cvm_mmc_slot *slot = mmc_priv(mmc); in cvm_mmc_request()
755 struct cvm_mmc_host *host = slot->host; in cvm_mmc_request()
756 struct mmc_command *cmd = mrq->cmd; in cvm_mmc_request()
763 * All MMC devices share the same bus and controller. Allow only a in cvm_mmc_request()
764 * single user of the bootbus/MMC bus at a time. The lock is acquired in cvm_mmc_request()
765 * on all entry points from the MMC layer. in cvm_mmc_request()
770 host->acquire_bus(host); in cvm_mmc_request()
772 if (cmd->opcode == MMC_READ_MULTIPLE_BLOCK || in cvm_mmc_request()
773 cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK) in cvm_mmc_request()
774 return cvm_mmc_dma_request(mmc, mrq); in cvm_mmc_request()
780 WARN_ON(host->current_req); in cvm_mmc_request()
781 host->current_req = mrq; in cvm_mmc_request()
783 if (cmd->data) { in cvm_mmc_request()
784 if (cmd->data->flags & MMC_DATA_READ) in cvm_mmc_request()
789 if (cmd->data->timeout_ns) in cvm_mmc_request()
790 set_wdog(slot, cmd->data->timeout_ns); in cvm_mmc_request()
794 host->dma_active = false; in cvm_mmc_request()
795 host->int_enable(host, MIO_EMM_INT_CMD_DONE | MIO_EMM_INT_CMD_ERR); in cvm_mmc_request()
800 FIELD_PREP(MIO_EMM_CMD_IDX, cmd->opcode) | in cvm_mmc_request()
801 FIELD_PREP(MIO_EMM_CMD_ARG, cmd->arg); in cvm_mmc_request()
802 set_bus_id(&emm_cmd, slot->bus_id); in cvm_mmc_request()
803 if (cmd->data && mmc_cmd_type(cmd) == MMC_CMD_ADTC) in cvm_mmc_request()
805 64 - ((cmd->data->blocks * cmd->data->blksz) / 8)); in cvm_mmc_request()
807 writeq(0, host->base + MIO_EMM_STS_MASK(host)); in cvm_mmc_request()
810 rsp_sts = readq(host->base + MIO_EMM_RSP_STS(host)); in cvm_mmc_request()
816 if (--retries) in cvm_mmc_request()
820 dev_err(host->dev, "Bad status: %llx before command write\n", rsp_sts); in cvm_mmc_request()
821 writeq(emm_cmd, host->base + MIO_EMM_CMD(host)); in cvm_mmc_request()
824 static void cvm_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) in cvm_mmc_set_ios() argument
826 struct cvm_mmc_slot *slot = mmc_priv(mmc); in cvm_mmc_set_ios()
827 struct cvm_mmc_host *host = slot->host; in cvm_mmc_set_ios()
831 host->acquire_bus(host); in cvm_mmc_set_ios()
835 switch (ios->power_mode) { in cvm_mmc_set_ios()
841 if (host->global_pwr_gpiod) in cvm_mmc_set_ios()
842 host->set_shared_power(host, 0); in cvm_mmc_set_ios()
843 else if (!IS_ERR(mmc->supply.vmmc)) in cvm_mmc_set_ios()
844 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0); in cvm_mmc_set_ios()
848 if (host->global_pwr_gpiod) in cvm_mmc_set_ios()
849 host->set_shared_power(host, 1); in cvm_mmc_set_ios()
850 else if (!IS_ERR(mmc->supply.vmmc)) in cvm_mmc_set_ios()
851 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd); in cvm_mmc_set_ios()
856 switch (ios->bus_width) { in cvm_mmc_set_ios()
869 if (ios->bus_width && ios->timing == MMC_TIMING_MMC_DDR52) in cvm_mmc_set_ios()
873 clock = ios->clock; in cvm_mmc_set_ios()
876 slot->clock = clock; in cvm_mmc_set_ios()
879 clk_period = (host->sys_freq + clock - 1) / (2 * clock); in cvm_mmc_set_ios()
882 (ios->timing == MMC_TIMING_MMC_HS)) | in cvm_mmc_set_ios()
887 set_bus_id(&emm_switch, slot->bus_id); in cvm_mmc_set_ios()
894 slot->cached_switch = emm_switch; in cvm_mmc_set_ios()
896 host->release_bus(host); in cvm_mmc_set_ios()
908 struct mmc_host *mmc = slot->mmc; in cvm_mmc_set_clock() local
910 clock = min(clock, mmc->f_max); in cvm_mmc_set_clock()
911 clock = max(clock, mmc->f_min); in cvm_mmc_set_clock()
912 slot->clock = clock; in cvm_mmc_set_clock()
917 struct cvm_mmc_host *host = slot->host; in cvm_mmc_init_lowlevel()
921 host->emm_cfg |= (1ull << slot->bus_id); in cvm_mmc_init_lowlevel()
922 writeq(host->emm_cfg, slot->host->base + MIO_EMM_CFG(host)); in cvm_mmc_init_lowlevel()
926 cvm_mmc_set_clock(slot, slot->mmc->f_min); in cvm_mmc_init_lowlevel()
929 (host->sys_freq / slot->clock) / 2); in cvm_mmc_init_lowlevel()
931 (host->sys_freq / slot->clock) / 2); in cvm_mmc_init_lowlevel()
934 set_bus_id(&emm_switch, slot->bus_id); in cvm_mmc_init_lowlevel()
937 slot->cached_switch = emm_switch; in cvm_mmc_init_lowlevel()
946 writeq(0xe4390080ull, host->base + MIO_EMM_STS_MASK(host)); in cvm_mmc_init_lowlevel()
947 writeq(1, host->base + MIO_EMM_RCA(host)); in cvm_mmc_init_lowlevel()
954 struct device_node *node = dev->of_node; in cvm_mmc_of_parse()
955 struct mmc_host *mmc = slot->mmc; in cvm_mmc_of_parse() local
965 if (id >= CAVIUM_MAX_MMC || slot->host->slot[id]) { in cvm_mmc_of_parse()
967 return -EINVAL; in cvm_mmc_of_parse()
970 ret = mmc_regulator_get_supply(mmc); in cvm_mmc_of_parse()
974 * Legacy Octeon firmware has no regulator entry, fall-back to in cvm_mmc_of_parse()
975 * a hard-coded voltage to get a sane OCR. in cvm_mmc_of_parse()
977 if (IS_ERR(mmc->supply.vmmc)) in cvm_mmc_of_parse()
978 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; in cvm_mmc_of_parse()
980 /* Common MMC bindings */ in cvm_mmc_of_parse()
981 ret = mmc_of_parse(mmc); in cvm_mmc_of_parse()
986 if (!(mmc->caps & (MMC_CAP_8_BIT_DATA | MMC_CAP_4_BIT_DATA))) { in cvm_mmc_of_parse()
987 of_property_read_u32(node, "cavium,bus-max-width", &bus_width); in cvm_mmc_of_parse()
989 mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_4_BIT_DATA; in cvm_mmc_of_parse()
991 mmc->caps |= MMC_CAP_4_BIT_DATA; in cvm_mmc_of_parse()
995 if (!mmc->f_max) in cvm_mmc_of_parse()
996 of_property_read_u32(node, "spi-max-frequency", &mmc->f_max); in cvm_mmc_of_parse()
997 if (!mmc->f_max || mmc->f_max > 52000000) in cvm_mmc_of_parse()
998 mmc->f_max = 52000000; in cvm_mmc_of_parse()
999 mmc->f_min = 400000; in cvm_mmc_of_parse()
1002 clock_period = 1000000000000ull / slot->host->sys_freq; in cvm_mmc_of_parse()
1003 of_property_read_u32(node, "cavium,cmd-clk-skew", &cmd_skew); in cvm_mmc_of_parse()
1004 of_property_read_u32(node, "cavium,dat-clk-skew", &dat_skew); in cvm_mmc_of_parse()
1005 slot->cmd_cnt = (cmd_skew + clock_period / 2) / clock_period; in cvm_mmc_of_parse()
1006 slot->dat_cnt = (dat_skew + clock_period / 2) / clock_period; in cvm_mmc_of_parse()
1014 struct mmc_host *mmc; in cvm_mmc_of_slot_probe() local
1017 mmc = mmc_alloc_host(sizeof(struct cvm_mmc_slot), dev); in cvm_mmc_of_slot_probe()
1018 if (!mmc) in cvm_mmc_of_slot_probe()
1019 return -ENOMEM; in cvm_mmc_of_slot_probe()
1021 slot = mmc_priv(mmc); in cvm_mmc_of_slot_probe()
1022 slot->mmc = mmc; in cvm_mmc_of_slot_probe()
1023 slot->host = host; in cvm_mmc_of_slot_probe()
1031 mmc->ops = &cvm_mmc_ops; in cvm_mmc_of_slot_probe()
1040 mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED | in cvm_mmc_of_slot_probe()
1043 if (host->use_sg) in cvm_mmc_of_slot_probe()
1044 mmc->max_segs = 16; in cvm_mmc_of_slot_probe()
1046 mmc->max_segs = 1; in cvm_mmc_of_slot_probe()
1049 mmc->max_seg_size = min_t(unsigned int, 8 * 1024 * 1024, in cvm_mmc_of_slot_probe()
1050 dma_get_max_seg_size(host->dev)); in cvm_mmc_of_slot_probe()
1051 mmc->max_req_size = mmc->max_seg_size; in cvm_mmc_of_slot_probe()
1053 mmc->max_blk_size = 512; in cvm_mmc_of_slot_probe()
1055 mmc->max_blk_count = 32767; in cvm_mmc_of_slot_probe()
1057 slot->clock = mmc->f_min; in cvm_mmc_of_slot_probe()
1058 slot->bus_id = id; in cvm_mmc_of_slot_probe()
1059 slot->cached_rca = 1; in cvm_mmc_of_slot_probe()
1061 host->acquire_bus(host); in cvm_mmc_of_slot_probe()
1062 host->slot[id] = slot; in cvm_mmc_of_slot_probe()
1065 host->release_bus(host); in cvm_mmc_of_slot_probe()
1067 ret = mmc_add_host(mmc); in cvm_mmc_of_slot_probe()
1070 slot->host->slot[id] = NULL; in cvm_mmc_of_slot_probe()
1076 mmc_free_host(slot->mmc); in cvm_mmc_of_slot_probe()
1082 mmc_remove_host(slot->mmc); in cvm_mmc_of_slot_remove()
1083 slot->host->slot[slot->bus_id] = NULL; in cvm_mmc_of_slot_remove()
1084 mmc_free_host(slot->mmc); in cvm_mmc_of_slot_remove()