1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * bsg endpoint that supports UPIUs
4 *
5 * Copyright (C) 2018 Western Digital Corporation
6 */
7
8 #include <linux/bsg-lib.h>
9 #include <linux/dma-mapping.h>
10 #include <scsi/scsi.h>
11 #include <scsi/scsi_host.h>
12 #include "ufs_bsg.h"
13 #include <ufs/ufshcd.h>
14 #include "ufshcd-priv.h"
15
ufs_bsg_get_query_desc_size(struct ufs_hba * hba,int * desc_len,struct utp_upiu_query * qr)16 static int ufs_bsg_get_query_desc_size(struct ufs_hba *hba, int *desc_len,
17 struct utp_upiu_query *qr)
18 {
19 int desc_size = be16_to_cpu(qr->length);
20
21 if (desc_size <= 0)
22 return -EINVAL;
23
24 *desc_len = min_t(int, QUERY_DESC_MAX_SIZE, desc_size);
25
26 return 0;
27 }
28
ufs_bsg_alloc_desc_buffer(struct ufs_hba * hba,struct bsg_job * job,uint8_t ** desc_buff,int * desc_len,enum query_opcode desc_op)29 static int ufs_bsg_alloc_desc_buffer(struct ufs_hba *hba, struct bsg_job *job,
30 uint8_t **desc_buff, int *desc_len,
31 enum query_opcode desc_op)
32 {
33 struct ufs_bsg_request *bsg_request = job->request;
34 struct utp_upiu_query *qr;
35 u8 *descp;
36
37 if (desc_op != UPIU_QUERY_OPCODE_WRITE_DESC &&
38 desc_op != UPIU_QUERY_OPCODE_READ_DESC)
39 goto out;
40
41 qr = &bsg_request->upiu_req.qr;
42 if (ufs_bsg_get_query_desc_size(hba, desc_len, qr)) {
43 dev_err(hba->dev, "Illegal desc size\n");
44 return -EINVAL;
45 }
46
47 if (*desc_len > job->request_payload.payload_len) {
48 dev_err(hba->dev, "Illegal desc size\n");
49 return -EINVAL;
50 }
51
52 descp = kzalloc(*desc_len, GFP_KERNEL);
53 if (!descp)
54 return -ENOMEM;
55
56 if (desc_op == UPIU_QUERY_OPCODE_WRITE_DESC)
57 sg_copy_to_buffer(job->request_payload.sg_list,
58 job->request_payload.sg_cnt, descp,
59 *desc_len);
60
61 *desc_buff = descp;
62
63 out:
64 return 0;
65 }
66
ufs_bsg_exec_advanced_rpmb_req(struct ufs_hba * hba,struct bsg_job * job)67 static int ufs_bsg_exec_advanced_rpmb_req(struct ufs_hba *hba, struct bsg_job *job)
68 {
69 struct ufs_rpmb_request *rpmb_request = job->request;
70 struct ufs_rpmb_reply *rpmb_reply = job->reply;
71 struct bsg_buffer *payload = NULL;
72 enum dma_data_direction dir;
73 struct scatterlist *sg_list = NULL;
74 int rpmb_req_type;
75 int sg_cnt = 0;
76 int ret;
77 int data_len;
78
79 if (hba->ufs_version < ufshci_version(4, 0) || !hba->dev_info.b_advanced_rpmb_en ||
80 !(hba->capabilities & MASK_EHSLUTRD_SUPPORTED))
81 return -EINVAL;
82
83 if (rpmb_request->ehs_req.length != 2 || rpmb_request->ehs_req.ehs_type != 1)
84 return -EINVAL;
85
86 rpmb_req_type = be16_to_cpu(rpmb_request->ehs_req.meta.req_resp_type);
87
88 switch (rpmb_req_type) {
89 case UFS_RPMB_WRITE_KEY:
90 case UFS_RPMB_READ_CNT:
91 case UFS_RPMB_PURGE_ENABLE:
92 dir = DMA_NONE;
93 break;
94 case UFS_RPMB_WRITE:
95 case UFS_RPMB_SEC_CONF_WRITE:
96 dir = DMA_TO_DEVICE;
97 break;
98 case UFS_RPMB_READ:
99 case UFS_RPMB_SEC_CONF_READ:
100 case UFS_RPMB_PURGE_STATUS_READ:
101 dir = DMA_FROM_DEVICE;
102 break;
103 default:
104 return -EINVAL;
105 }
106
107 if (dir != DMA_NONE) {
108 payload = &job->request_payload;
109 if (!payload || !payload->payload_len || !payload->sg_cnt)
110 return -EINVAL;
111
112 sg_cnt = dma_map_sg(hba->host->dma_dev, payload->sg_list, payload->sg_cnt, dir);
113 if (unlikely(!sg_cnt))
114 return -ENOMEM;
115 sg_list = payload->sg_list;
116 data_len = payload->payload_len;
117 }
118
119 ret = ufshcd_advanced_rpmb_req_handler(hba, &rpmb_request->bsg_request.upiu_req,
120 &rpmb_reply->bsg_reply.upiu_rsp, &rpmb_request->ehs_req,
121 &rpmb_reply->ehs_rsp, sg_cnt, sg_list, dir);
122
123 if (dir != DMA_NONE) {
124 dma_unmap_sg(hba->host->dma_dev, payload->sg_list, payload->sg_cnt, dir);
125
126 if (!ret)
127 rpmb_reply->bsg_reply.reply_payload_rcv_len = data_len;
128 }
129
130 return ret;
131 }
132
ufs_bsg_request(struct bsg_job * job)133 static int ufs_bsg_request(struct bsg_job *job)
134 {
135 struct ufs_bsg_request *bsg_request = job->request;
136 struct ufs_bsg_reply *bsg_reply = job->reply;
137 struct ufs_hba *hba = shost_priv(dev_to_shost(job->dev->parent));
138 struct uic_command uc = {};
139 int msgcode;
140 uint8_t *buff = NULL;
141 int desc_len = 0;
142 enum query_opcode desc_op = UPIU_QUERY_OPCODE_NOP;
143 int ret;
144 bool rpmb = false;
145
146 bsg_reply->reply_payload_rcv_len = 0;
147
148 ufshcd_rpm_get_sync(hba);
149
150 msgcode = bsg_request->msgcode;
151 switch (msgcode) {
152 case UPIU_TRANSACTION_QUERY_REQ:
153 desc_op = bsg_request->upiu_req.qr.opcode;
154 ret = ufs_bsg_alloc_desc_buffer(hba, job, &buff, &desc_len, desc_op);
155 if (ret)
156 goto out;
157 fallthrough;
158 case UPIU_TRANSACTION_NOP_OUT:
159 case UPIU_TRANSACTION_TASK_REQ:
160 ret = ufshcd_exec_raw_upiu_cmd(hba, &bsg_request->upiu_req,
161 &bsg_reply->upiu_rsp, msgcode,
162 buff, &desc_len, desc_op);
163 if (ret)
164 dev_err(hba->dev, "exe raw upiu: error code %d\n", ret);
165 else if (desc_op == UPIU_QUERY_OPCODE_READ_DESC && desc_len) {
166 bsg_reply->reply_payload_rcv_len =
167 sg_copy_from_buffer(job->request_payload.sg_list,
168 job->request_payload.sg_cnt,
169 buff, desc_len);
170 }
171 break;
172 case UPIU_TRANSACTION_UIC_CMD:
173 memcpy(&uc, &bsg_request->upiu_req.uc, UIC_CMD_SIZE);
174 ret = ufshcd_send_uic_cmd(hba, &uc);
175 if (ret)
176 dev_err(hba->dev, "send uic cmd: error code %d\n", ret);
177
178 memcpy(&bsg_reply->upiu_rsp.uc, &uc, UIC_CMD_SIZE);
179
180 break;
181 case UPIU_TRANSACTION_ARPMB_CMD:
182 rpmb = true;
183 ret = ufs_bsg_exec_advanced_rpmb_req(hba, job);
184 if (ret)
185 dev_err(hba->dev, "ARPMB OP failed: error code %d\n", ret);
186 break;
187 default:
188 ret = -ENOTSUPP;
189 dev_err(hba->dev, "unsupported msgcode 0x%x\n", msgcode);
190
191 break;
192 }
193
194 out:
195 ufshcd_rpm_put_sync(hba);
196 kfree(buff);
197 bsg_reply->result = ret;
198 job->reply_len = !rpmb ? sizeof(struct ufs_bsg_reply) : sizeof(struct ufs_rpmb_reply);
199 /* complete the job here only if no error */
200 if (ret == 0)
201 bsg_job_done(job, ret, bsg_reply->reply_payload_rcv_len);
202
203 return ret;
204 }
205
206 /**
207 * ufs_bsg_remove - detach and remove the added ufs-bsg node
208 * @hba: per adapter object
209 *
210 * Should be called when unloading the driver.
211 */
ufs_bsg_remove(struct ufs_hba * hba)212 void ufs_bsg_remove(struct ufs_hba *hba)
213 {
214 struct device *bsg_dev = &hba->bsg_dev;
215
216 if (!hba->bsg_queue)
217 return;
218
219 bsg_remove_queue(hba->bsg_queue);
220
221 device_del(bsg_dev);
222 put_device(bsg_dev);
223 }
224
ufs_bsg_node_release(struct device * dev)225 static inline void ufs_bsg_node_release(struct device *dev)
226 {
227 put_device(dev->parent);
228 }
229
230 /**
231 * ufs_bsg_probe - Add ufs bsg device node
232 * @hba: per adapter object
233 *
234 * Called during initial loading of the driver, and before scsi_scan_host.
235 */
ufs_bsg_probe(struct ufs_hba * hba)236 int ufs_bsg_probe(struct ufs_hba *hba)
237 {
238 struct device *bsg_dev = &hba->bsg_dev;
239 struct Scsi_Host *shost = hba->host;
240 struct device *parent = &shost->shost_gendev;
241 struct request_queue *q;
242 int ret;
243
244 device_initialize(bsg_dev);
245
246 bsg_dev->parent = get_device(parent);
247 bsg_dev->release = ufs_bsg_node_release;
248
249 dev_set_name(bsg_dev, "ufs-bsg%u", shost->host_no);
250
251 ret = device_add(bsg_dev);
252 if (ret)
253 goto out;
254
255 q = bsg_setup_queue(bsg_dev, dev_name(bsg_dev), ufs_bsg_request, NULL, 0);
256 if (IS_ERR(q)) {
257 ret = PTR_ERR(q);
258 goto out;
259 }
260
261 hba->bsg_queue = q;
262
263 return 0;
264
265 out:
266 dev_err(bsg_dev, "fail to initialize a bsg dev %d\n", shost->host_no);
267 put_device(bsg_dev);
268 return ret;
269 }
270