• Home
  • Raw
  • Download

Lines Matching refs:brq

1144 	struct mmc_blk_request *brq, int *ecc_err, int *gen_err)  in mmc_blk_cmd_recovery()  argument
1178 (brq->stop.resp[0] & R1_CARD_ECC_FAILED) || in mmc_blk_cmd_recovery()
1179 (brq->cmd.resp[0] & R1_CARD_ECC_FAILED)) in mmc_blk_cmd_recovery()
1185 (brq->stop.resp[0] & R1_ERROR)) { in mmc_blk_cmd_recovery()
1188 brq->stop.resp[0], status); in mmc_blk_cmd_recovery()
1199 DIV_ROUND_UP(brq->data.timeout_ns, 1000000), in mmc_blk_cmd_recovery()
1216 if (brq->sbc.error) in mmc_blk_cmd_recovery()
1217 return mmc_blk_cmd_error(req, "SET_BLOCK_COUNT", brq->sbc.error, in mmc_blk_cmd_recovery()
1221 if (brq->cmd.error) in mmc_blk_cmd_recovery()
1222 return mmc_blk_cmd_error(req, "r/w cmd", brq->cmd.error, in mmc_blk_cmd_recovery()
1226 if (!brq->stop.error) in mmc_blk_cmd_recovery()
1231 req->rq_disk->disk_name, brq->stop.error, in mmc_blk_cmd_recovery()
1232 brq->cmd.resp[0], status); in mmc_blk_cmd_recovery()
1239 brq->stop.resp[0] = stop_status; in mmc_blk_cmd_recovery()
1240 brq->stop.error = 0; in mmc_blk_cmd_recovery()
1439 static inline void mmc_apply_rel_rw(struct mmc_blk_request *brq, in mmc_apply_rel_rw() argument
1445 if (!IS_ALIGNED(brq->cmd.arg, card->ext_csd.rel_sectors)) in mmc_apply_rel_rw()
1446 brq->data.blocks = 1; in mmc_apply_rel_rw()
1448 if (brq->data.blocks > card->ext_csd.rel_sectors) in mmc_apply_rel_rw()
1449 brq->data.blocks = card->ext_csd.rel_sectors; in mmc_apply_rel_rw()
1450 else if (brq->data.blocks < card->ext_csd.rel_sectors) in mmc_apply_rel_rw()
1451 brq->data.blocks = 1; in mmc_apply_rel_rw()
1468 struct mmc_blk_request *brq = &mq_mrq->brq; in mmc_blk_err_check() local
1482 if (brq->sbc.error || brq->cmd.error || brq->stop.error || in mmc_blk_err_check()
1483 brq->data.error) { in mmc_blk_err_check()
1484 switch (mmc_blk_cmd_recovery(card, req, brq, &ecc_err, &gen_err)) { in mmc_blk_err_check()
1501 if (brq->cmd.resp[0] & CMD_ERRORS) { in mmc_blk_err_check()
1503 req->rq_disk->disk_name, brq->cmd.resp[0]); in mmc_blk_err_check()
1516 if (brq->stop.resp[0] & R1_ERROR) { in mmc_blk_err_check()
1519 brq->stop.resp[0]); in mmc_blk_err_check()
1536 if (brq->data.error) { in mmc_blk_err_check()
1538 req->rq_disk->disk_name, brq->data.error, in mmc_blk_err_check()
1541 brq->cmd.resp[0], brq->stop.resp[0]); in mmc_blk_err_check()
1552 if (!brq->data.bytes_xfered) in mmc_blk_err_check()
1556 if (unlikely(brq->data.blocks << 9 != brq->data.bytes_xfered)) in mmc_blk_err_check()
1562 if (blk_rq_bytes(req) != brq->data.bytes_xfered) in mmc_blk_err_check()
1633 struct mmc_blk_request *brq = &mqrq->brq; in mmc_blk_rw_rq_prep() local
1650 memset(brq, 0, sizeof(struct mmc_blk_request)); in mmc_blk_rw_rq_prep()
1651 brq->mrq.cmd = &brq->cmd; in mmc_blk_rw_rq_prep()
1652 brq->mrq.data = &brq->data; in mmc_blk_rw_rq_prep()
1654 brq->cmd.arg = blk_rq_pos(req); in mmc_blk_rw_rq_prep()
1656 brq->cmd.arg <<= 9; in mmc_blk_rw_rq_prep()
1657 brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; in mmc_blk_rw_rq_prep()
1658 brq->data.blksz = 512; in mmc_blk_rw_rq_prep()
1659 brq->stop.opcode = MMC_STOP_TRANSMISSION; in mmc_blk_rw_rq_prep()
1660 brq->stop.arg = 0; in mmc_blk_rw_rq_prep()
1661 brq->data.blocks = blk_rq_sectors(req); in mmc_blk_rw_rq_prep()
1668 if (brq->data.blocks > card->host->max_blk_count) in mmc_blk_rw_rq_prep()
1669 brq->data.blocks = card->host->max_blk_count; in mmc_blk_rw_rq_prep()
1671 if (brq->data.blocks > 1) { in mmc_blk_rw_rq_prep()
1678 brq->data.blocks = 1; in mmc_blk_rw_rq_prep()
1685 brq->data.blocks = card->host->ops->multi_io_quirk(card, in mmc_blk_rw_rq_prep()
1688 brq->data.blocks); in mmc_blk_rw_rq_prep()
1691 if (brq->data.blocks > 1 || do_rel_wr) { in mmc_blk_rw_rq_prep()
1697 brq->mrq.stop = &brq->stop; in mmc_blk_rw_rq_prep()
1701 brq->mrq.stop = NULL; in mmc_blk_rw_rq_prep()
1706 brq->cmd.opcode = readcmd; in mmc_blk_rw_rq_prep()
1707 brq->data.flags |= MMC_DATA_READ; in mmc_blk_rw_rq_prep()
1708 if (brq->mrq.stop) in mmc_blk_rw_rq_prep()
1709 brq->stop.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | in mmc_blk_rw_rq_prep()
1712 brq->cmd.opcode = writecmd; in mmc_blk_rw_rq_prep()
1713 brq->data.flags |= MMC_DATA_WRITE; in mmc_blk_rw_rq_prep()
1714 if (brq->mrq.stop) in mmc_blk_rw_rq_prep()
1715 brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | in mmc_blk_rw_rq_prep()
1720 mmc_apply_rel_rw(brq, card, req); in mmc_blk_rw_rq_prep()
1729 ((brq->data.blocks * brq->data.blksz) >= in mmc_blk_rw_rq_prep()
1750 if ((md->flags & MMC_BLK_CMD23) && mmc_op_multi(brq->cmd.opcode) && in mmc_blk_rw_rq_prep()
1753 brq->sbc.opcode = MMC_SET_BLOCK_COUNT; in mmc_blk_rw_rq_prep()
1754 brq->sbc.arg = brq->data.blocks | in mmc_blk_rw_rq_prep()
1757 brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC; in mmc_blk_rw_rq_prep()
1758 brq->mrq.sbc = &brq->sbc; in mmc_blk_rw_rq_prep()
1761 mmc_set_data_timeout(&brq->data, card); in mmc_blk_rw_rq_prep()
1763 brq->data.sg = mqrq->sg; in mmc_blk_rw_rq_prep()
1764 brq->data.sg_len = mmc_queue_map_sg(mq, mqrq); in mmc_blk_rw_rq_prep()
1770 if (brq->data.blocks != blk_rq_sectors(req)) { in mmc_blk_rw_rq_prep()
1771 int i, data_size = brq->data.blocks << 9; in mmc_blk_rw_rq_prep()
1774 for_each_sg(brq->data.sg, sg, brq->data.sg_len, i) { in mmc_blk_rw_rq_prep()
1782 brq->data.sg_len = i; in mmc_blk_rw_rq_prep()
1785 mqrq->mmc_active.mrq = &brq->mrq; in mmc_blk_rw_rq_prep()
1919 struct mmc_blk_request *brq = &mqrq->brq; in mmc_blk_packed_hdr_wrq_prep() local
1949 ((brq->data.blocks * brq->data.blksz) >= in mmc_blk_packed_hdr_wrq_prep()
1964 memset(brq, 0, sizeof(struct mmc_blk_request)); in mmc_blk_packed_hdr_wrq_prep()
1965 brq->mrq.cmd = &brq->cmd; in mmc_blk_packed_hdr_wrq_prep()
1966 brq->mrq.data = &brq->data; in mmc_blk_packed_hdr_wrq_prep()
1967 brq->mrq.sbc = &brq->sbc; in mmc_blk_packed_hdr_wrq_prep()
1968 brq->mrq.stop = &brq->stop; in mmc_blk_packed_hdr_wrq_prep()
1970 brq->sbc.opcode = MMC_SET_BLOCK_COUNT; in mmc_blk_packed_hdr_wrq_prep()
1971 brq->sbc.arg = MMC_CMD23_ARG_PACKED | (packed->blocks + hdr_blocks); in mmc_blk_packed_hdr_wrq_prep()
1972 brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC; in mmc_blk_packed_hdr_wrq_prep()
1974 brq->cmd.opcode = MMC_WRITE_MULTIPLE_BLOCK; in mmc_blk_packed_hdr_wrq_prep()
1975 brq->cmd.arg = blk_rq_pos(req); in mmc_blk_packed_hdr_wrq_prep()
1977 brq->cmd.arg <<= 9; in mmc_blk_packed_hdr_wrq_prep()
1978 brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; in mmc_blk_packed_hdr_wrq_prep()
1980 brq->data.blksz = 512; in mmc_blk_packed_hdr_wrq_prep()
1981 brq->data.blocks = packed->blocks + hdr_blocks; in mmc_blk_packed_hdr_wrq_prep()
1982 brq->data.flags |= MMC_DATA_WRITE; in mmc_blk_packed_hdr_wrq_prep()
1984 brq->stop.opcode = MMC_STOP_TRANSMISSION; in mmc_blk_packed_hdr_wrq_prep()
1985 brq->stop.arg = 0; in mmc_blk_packed_hdr_wrq_prep()
1986 brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; in mmc_blk_packed_hdr_wrq_prep()
1988 mmc_set_data_timeout(&brq->data, card); in mmc_blk_packed_hdr_wrq_prep()
1990 brq->data.sg = mqrq->sg; in mmc_blk_packed_hdr_wrq_prep()
1991 brq->data.sg_len = mmc_queue_map_sg(mq, mqrq); in mmc_blk_packed_hdr_wrq_prep()
1993 mqrq->mmc_active.mrq = &brq->mrq; in mmc_blk_packed_hdr_wrq_prep()
2000 struct mmc_blk_request *brq, struct request *req, in mmc_blk_cmd_err() argument
2004 mq_rq = container_of(brq, struct mmc_queue_req, brq); in mmc_blk_cmd_err()
2023 ret = blk_end_request(req, 0, brq->data.bytes_xfered); in mmc_blk_cmd_err()
2104 struct mmc_blk_request *brq = &mq->mqrq_cur->brq; in mmc_blk_issue_rw_rq() local
2128 if ((brq->data.blocks & 0x07) && in mmc_blk_issue_rw_rq()
2152 brq = &mq_rq->brq; in mmc_blk_issue_rw_rq()
2172 brq->data.bytes_xfered); in mmc_blk_issue_rw_rq()
2183 brq->data.bytes_xfered); in mmc_blk_issue_rw_rq()
2189 ret = mmc_blk_cmd_err(md, card, brq, req, ret); in mmc_blk_issue_rw_rq()
2215 if (brq->data.blocks > 1) { in mmc_blk_issue_rw_rq()
2228 brq->data.blksz); in mmc_blk_issue_rw_rq()