1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright(c) 2022 Intel Corporation */
3 #include <linux/crypto.h>
4 #include <crypto/acompress.h>
5 #include <crypto/internal/acompress.h>
6 #include <crypto/scatterwalk.h>
7 #include <linux/dma-mapping.h>
8 #include <linux/workqueue.h>
9 #include "adf_accel_devices.h"
10 #include "adf_common_drv.h"
11 #include "qat_bl.h"
12 #include "qat_comp_req.h"
13 #include "qat_compression.h"
14 #include "qat_algs_send.h"
15
16 #define QAT_RFC_1950_HDR_SIZE 2
17 #define QAT_RFC_1950_FOOTER_SIZE 4
18 #define QAT_RFC_1950_CM_DEFLATE 8
19 #define QAT_RFC_1950_CM_DEFLATE_CINFO_32K 7
20 #define QAT_RFC_1950_CM_MASK 0x0f
21 #define QAT_RFC_1950_CM_OFFSET 4
22 #define QAT_RFC_1950_DICT_MASK 0x20
23 #define QAT_RFC_1950_COMP_HDR 0x785e
24
25 static DEFINE_MUTEX(algs_lock);
26 static unsigned int active_devs;
27
28 enum direction {
29 DECOMPRESSION = 0,
30 COMPRESSION = 1,
31 };
32
33 struct qat_compression_req;
34
35 struct qat_compression_ctx {
36 u8 comp_ctx[QAT_COMP_CTX_SIZE];
37 struct qat_compression_instance *inst;
38 int (*qat_comp_callback)(struct qat_compression_req *qat_req, void *resp);
39 };
40
41 struct qat_dst {
42 bool is_null;
43 int resubmitted;
44 };
45
46 struct qat_compression_req {
47 u8 req[QAT_COMP_REQ_SIZE];
48 struct qat_compression_ctx *qat_compression_ctx;
49 struct acomp_req *acompress_req;
50 struct qat_request_buffs buf;
51 enum direction dir;
52 int actual_dlen;
53 struct qat_alg_req alg_req;
54 struct work_struct resubmit;
55 struct qat_dst dst;
56 };
57
qat_alg_send_dc_message(struct qat_compression_req * qat_req,struct qat_compression_instance * inst,struct crypto_async_request * base)58 static int qat_alg_send_dc_message(struct qat_compression_req *qat_req,
59 struct qat_compression_instance *inst,
60 struct crypto_async_request *base)
61 {
62 struct qat_alg_req *alg_req = &qat_req->alg_req;
63
64 alg_req->fw_req = (u32 *)&qat_req->req;
65 alg_req->tx_ring = inst->dc_tx;
66 alg_req->base = base;
67 alg_req->backlog = &inst->backlog;
68
69 return qat_alg_send_message(alg_req);
70 }
71
qat_comp_resubmit(struct work_struct * work)72 static void qat_comp_resubmit(struct work_struct *work)
73 {
74 struct qat_compression_req *qat_req =
75 container_of(work, struct qat_compression_req, resubmit);
76 struct qat_compression_ctx *ctx = qat_req->qat_compression_ctx;
77 struct adf_accel_dev *accel_dev = ctx->inst->accel_dev;
78 struct qat_request_buffs *qat_bufs = &qat_req->buf;
79 struct qat_compression_instance *inst = ctx->inst;
80 struct acomp_req *areq = qat_req->acompress_req;
81 struct crypto_acomp *tfm = crypto_acomp_reqtfm(areq);
82 unsigned int dlen = CRYPTO_ACOMP_DST_MAX;
83 u8 *req = qat_req->req;
84 dma_addr_t dfbuf;
85 int ret;
86
87 areq->dlen = dlen;
88
89 dev_dbg(&GET_DEV(accel_dev), "[%s][%s] retry NULL dst request - dlen = %d\n",
90 crypto_tfm_alg_driver_name(crypto_acomp_tfm(tfm)),
91 qat_req->dir == COMPRESSION ? "comp" : "decomp", dlen);
92
93 ret = qat_bl_realloc_map_new_dst(accel_dev, &areq->dst, dlen, qat_bufs,
94 qat_algs_alloc_flags(&areq->base));
95 if (ret)
96 goto err;
97
98 qat_req->dst.resubmitted = true;
99
100 dfbuf = qat_req->buf.bloutp;
101 qat_comp_override_dst(req, dfbuf, dlen);
102
103 ret = qat_alg_send_dc_message(qat_req, inst, &areq->base);
104 if (ret != -ENOSPC)
105 return;
106
107 err:
108 qat_bl_free_bufl(accel_dev, qat_bufs);
109 acomp_request_complete(areq, ret);
110 }
111
parse_zlib_header(u16 zlib_h)112 static int parse_zlib_header(u16 zlib_h)
113 {
114 int ret = -EINVAL;
115 __be16 header;
116 u8 *header_p;
117 u8 cmf, flg;
118
119 header = cpu_to_be16(zlib_h);
120 header_p = (u8 *)&header;
121
122 flg = header_p[0];
123 cmf = header_p[1];
124
125 if (cmf >> QAT_RFC_1950_CM_OFFSET > QAT_RFC_1950_CM_DEFLATE_CINFO_32K)
126 return ret;
127
128 if ((cmf & QAT_RFC_1950_CM_MASK) != QAT_RFC_1950_CM_DEFLATE)
129 return ret;
130
131 if (flg & QAT_RFC_1950_DICT_MASK)
132 return ret;
133
134 return 0;
135 }
136
qat_comp_rfc1950_callback(struct qat_compression_req * qat_req,void * resp)137 static int qat_comp_rfc1950_callback(struct qat_compression_req *qat_req,
138 void *resp)
139 {
140 struct acomp_req *areq = qat_req->acompress_req;
141 enum direction dir = qat_req->dir;
142 __be32 qat_produced_adler;
143
144 qat_produced_adler = cpu_to_be32(qat_comp_get_produced_adler32(resp));
145
146 if (dir == COMPRESSION) {
147 __be16 zlib_header;
148
149 zlib_header = cpu_to_be16(QAT_RFC_1950_COMP_HDR);
150 scatterwalk_map_and_copy(&zlib_header, areq->dst, 0, QAT_RFC_1950_HDR_SIZE, 1);
151 areq->dlen += QAT_RFC_1950_HDR_SIZE;
152
153 scatterwalk_map_and_copy(&qat_produced_adler, areq->dst, areq->dlen,
154 QAT_RFC_1950_FOOTER_SIZE, 1);
155 areq->dlen += QAT_RFC_1950_FOOTER_SIZE;
156 } else {
157 __be32 decomp_adler;
158 int footer_offset;
159 int consumed;
160
161 consumed = qat_comp_get_consumed_ctr(resp);
162 footer_offset = consumed + QAT_RFC_1950_HDR_SIZE;
163 if (footer_offset + QAT_RFC_1950_FOOTER_SIZE > areq->slen)
164 return -EBADMSG;
165
166 scatterwalk_map_and_copy(&decomp_adler, areq->src, footer_offset,
167 QAT_RFC_1950_FOOTER_SIZE, 0);
168
169 if (qat_produced_adler != decomp_adler)
170 return -EBADMSG;
171 }
172 return 0;
173 }
174
qat_comp_generic_callback(struct qat_compression_req * qat_req,void * resp)175 static void qat_comp_generic_callback(struct qat_compression_req *qat_req,
176 void *resp)
177 {
178 struct acomp_req *areq = qat_req->acompress_req;
179 struct qat_compression_ctx *ctx = qat_req->qat_compression_ctx;
180 struct adf_accel_dev *accel_dev = ctx->inst->accel_dev;
181 struct crypto_acomp *tfm = crypto_acomp_reqtfm(areq);
182 struct qat_compression_instance *inst = ctx->inst;
183 int consumed, produced;
184 s8 cmp_err, xlt_err;
185 int res = -EBADMSG;
186 int status;
187 u8 cnv;
188
189 status = qat_comp_get_cmp_status(resp);
190 status |= qat_comp_get_xlt_status(resp);
191 cmp_err = qat_comp_get_cmp_err(resp);
192 xlt_err = qat_comp_get_xlt_err(resp);
193
194 consumed = qat_comp_get_consumed_ctr(resp);
195 produced = qat_comp_get_produced_ctr(resp);
196
197 dev_dbg(&GET_DEV(accel_dev),
198 "[%s][%s][%s] slen = %8d dlen = %8d consumed = %8d produced = %8d cmp_err = %3d xlt_err = %3d",
199 crypto_tfm_alg_driver_name(crypto_acomp_tfm(tfm)),
200 qat_req->dir == COMPRESSION ? "comp " : "decomp",
201 status ? "ERR" : "OK ",
202 areq->slen, areq->dlen, consumed, produced, cmp_err, xlt_err);
203
204 areq->dlen = 0;
205
206 if (qat_req->dir == DECOMPRESSION && qat_req->dst.is_null) {
207 if (cmp_err == ERR_CODE_OVERFLOW_ERROR) {
208 if (qat_req->dst.resubmitted) {
209 dev_dbg(&GET_DEV(accel_dev),
210 "Output does not fit destination buffer\n");
211 res = -EOVERFLOW;
212 goto end;
213 }
214
215 INIT_WORK(&qat_req->resubmit, qat_comp_resubmit);
216 adf_misc_wq_queue_work(&qat_req->resubmit);
217 return;
218 }
219 }
220
221 if (unlikely(status != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
222 goto end;
223
224 if (qat_req->dir == COMPRESSION) {
225 cnv = qat_comp_get_cmp_cnv_flag(resp);
226 if (unlikely(!cnv)) {
227 dev_err(&GET_DEV(accel_dev),
228 "Verified compression not supported\n");
229 goto end;
230 }
231
232 if (unlikely(produced > qat_req->actual_dlen)) {
233 memset(inst->dc_data->ovf_buff, 0,
234 inst->dc_data->ovf_buff_sz);
235 dev_dbg(&GET_DEV(accel_dev),
236 "Actual buffer overflow: produced=%d, dlen=%d\n",
237 produced, qat_req->actual_dlen);
238 goto end;
239 }
240 }
241
242 res = 0;
243 areq->dlen = produced;
244
245 if (ctx->qat_comp_callback)
246 res = ctx->qat_comp_callback(qat_req, resp);
247
248 end:
249 qat_bl_free_bufl(accel_dev, &qat_req->buf);
250 acomp_request_complete(areq, res);
251 }
252
qat_comp_alg_callback(void * resp)253 void qat_comp_alg_callback(void *resp)
254 {
255 struct qat_compression_req *qat_req =
256 (void *)(__force long)qat_comp_get_opaque(resp);
257 struct qat_instance_backlog *backlog = qat_req->alg_req.backlog;
258
259 qat_comp_generic_callback(qat_req, resp);
260
261 qat_alg_send_backlog(backlog);
262 }
263
qat_comp_alg_init_tfm(struct crypto_acomp * acomp_tfm)264 static int qat_comp_alg_init_tfm(struct crypto_acomp *acomp_tfm)
265 {
266 struct crypto_tfm *tfm = crypto_acomp_tfm(acomp_tfm);
267 struct qat_compression_ctx *ctx = crypto_tfm_ctx(tfm);
268 struct qat_compression_instance *inst;
269 int node;
270
271 if (tfm->node == NUMA_NO_NODE)
272 node = numa_node_id();
273 else
274 node = tfm->node;
275
276 memset(ctx, 0, sizeof(*ctx));
277 inst = qat_compression_get_instance_node(node);
278 if (!inst)
279 return -EINVAL;
280 ctx->inst = inst;
281
282 ctx->inst->build_deflate_ctx(ctx->comp_ctx);
283
284 return 0;
285 }
286
qat_comp_alg_exit_tfm(struct crypto_acomp * acomp_tfm)287 static void qat_comp_alg_exit_tfm(struct crypto_acomp *acomp_tfm)
288 {
289 struct crypto_tfm *tfm = crypto_acomp_tfm(acomp_tfm);
290 struct qat_compression_ctx *ctx = crypto_tfm_ctx(tfm);
291
292 qat_compression_put_instance(ctx->inst);
293 memset(ctx, 0, sizeof(*ctx));
294 }
295
qat_comp_alg_rfc1950_init_tfm(struct crypto_acomp * acomp_tfm)296 static int qat_comp_alg_rfc1950_init_tfm(struct crypto_acomp *acomp_tfm)
297 {
298 struct crypto_tfm *tfm = crypto_acomp_tfm(acomp_tfm);
299 struct qat_compression_ctx *ctx = crypto_tfm_ctx(tfm);
300 int ret;
301
302 ret = qat_comp_alg_init_tfm(acomp_tfm);
303 ctx->qat_comp_callback = &qat_comp_rfc1950_callback;
304
305 return ret;
306 }
307
qat_comp_alg_compress_decompress(struct acomp_req * areq,enum direction dir,unsigned int shdr,unsigned int sftr,unsigned int dhdr,unsigned int dftr)308 static int qat_comp_alg_compress_decompress(struct acomp_req *areq, enum direction dir,
309 unsigned int shdr, unsigned int sftr,
310 unsigned int dhdr, unsigned int dftr)
311 {
312 struct qat_compression_req *qat_req = acomp_request_ctx(areq);
313 struct crypto_acomp *acomp_tfm = crypto_acomp_reqtfm(areq);
314 struct crypto_tfm *tfm = crypto_acomp_tfm(acomp_tfm);
315 struct qat_compression_ctx *ctx = crypto_tfm_ctx(tfm);
316 struct qat_compression_instance *inst = ctx->inst;
317 gfp_t f = qat_algs_alloc_flags(&areq->base);
318 struct qat_sgl_to_bufl_params params = {0};
319 int slen = areq->slen - shdr - sftr;
320 int dlen = areq->dlen - dhdr - dftr;
321 dma_addr_t sfbuf, dfbuf;
322 u8 *req = qat_req->req;
323 size_t ovf_buff_sz;
324 int ret;
325
326 params.sskip = shdr;
327 params.dskip = dhdr;
328
329 if (!areq->src || !slen)
330 return -EINVAL;
331
332 if (areq->dst && !dlen)
333 return -EINVAL;
334
335 qat_req->dst.is_null = false;
336
337 /* Handle acomp requests that require the allocation of a destination
338 * buffer. The size of the destination buffer is double the source
339 * buffer (rounded up to the size of a page) to fit the decompressed
340 * output or an expansion on the data for compression.
341 */
342 if (!areq->dst) {
343 qat_req->dst.is_null = true;
344
345 dlen = round_up(2 * slen, PAGE_SIZE);
346 areq->dst = sgl_alloc(dlen, f, NULL);
347 if (!areq->dst)
348 return -ENOMEM;
349
350 dlen -= dhdr + dftr;
351 areq->dlen = dlen;
352 qat_req->dst.resubmitted = false;
353 }
354
355 if (dir == COMPRESSION) {
356 params.extra_dst_buff = inst->dc_data->ovf_buff_p;
357 ovf_buff_sz = inst->dc_data->ovf_buff_sz;
358 params.sz_extra_dst_buff = ovf_buff_sz;
359 }
360
361 ret = qat_bl_sgl_to_bufl(ctx->inst->accel_dev, areq->src, areq->dst,
362 &qat_req->buf, ¶ms, f);
363 if (unlikely(ret))
364 return ret;
365
366 sfbuf = qat_req->buf.blp;
367 dfbuf = qat_req->buf.bloutp;
368 qat_req->qat_compression_ctx = ctx;
369 qat_req->acompress_req = areq;
370 qat_req->dir = dir;
371
372 if (dir == COMPRESSION) {
373 qat_req->actual_dlen = dlen;
374 dlen += ovf_buff_sz;
375 qat_comp_create_compression_req(ctx->comp_ctx, req,
376 (u64)(__force long)sfbuf, slen,
377 (u64)(__force long)dfbuf, dlen,
378 (u64)(__force long)qat_req);
379 } else {
380 qat_comp_create_decompression_req(ctx->comp_ctx, req,
381 (u64)(__force long)sfbuf, slen,
382 (u64)(__force long)dfbuf, dlen,
383 (u64)(__force long)qat_req);
384 }
385
386 ret = qat_alg_send_dc_message(qat_req, inst, &areq->base);
387 if (ret == -ENOSPC)
388 qat_bl_free_bufl(inst->accel_dev, &qat_req->buf);
389
390 return ret;
391 }
392
qat_comp_alg_compress(struct acomp_req * req)393 static int qat_comp_alg_compress(struct acomp_req *req)
394 {
395 return qat_comp_alg_compress_decompress(req, COMPRESSION, 0, 0, 0, 0);
396 }
397
qat_comp_alg_decompress(struct acomp_req * req)398 static int qat_comp_alg_decompress(struct acomp_req *req)
399 {
400 return qat_comp_alg_compress_decompress(req, DECOMPRESSION, 0, 0, 0, 0);
401 }
402
qat_comp_alg_rfc1950_compress(struct acomp_req * req)403 static int qat_comp_alg_rfc1950_compress(struct acomp_req *req)
404 {
405 if (!req->dst && req->dlen != 0)
406 return -EINVAL;
407
408 if (req->dst && req->dlen <= QAT_RFC_1950_HDR_SIZE + QAT_RFC_1950_FOOTER_SIZE)
409 return -EINVAL;
410
411 return qat_comp_alg_compress_decompress(req, COMPRESSION, 0, 0,
412 QAT_RFC_1950_HDR_SIZE,
413 QAT_RFC_1950_FOOTER_SIZE);
414 }
415
qat_comp_alg_rfc1950_decompress(struct acomp_req * req)416 static int qat_comp_alg_rfc1950_decompress(struct acomp_req *req)
417 {
418 struct crypto_acomp *acomp_tfm = crypto_acomp_reqtfm(req);
419 struct crypto_tfm *tfm = crypto_acomp_tfm(acomp_tfm);
420 struct qat_compression_ctx *ctx = crypto_tfm_ctx(tfm);
421 struct adf_accel_dev *accel_dev = ctx->inst->accel_dev;
422 u16 zlib_header;
423 int ret;
424
425 if (req->slen <= QAT_RFC_1950_HDR_SIZE + QAT_RFC_1950_FOOTER_SIZE)
426 return -EBADMSG;
427
428 scatterwalk_map_and_copy(&zlib_header, req->src, 0, QAT_RFC_1950_HDR_SIZE, 0);
429
430 ret = parse_zlib_header(zlib_header);
431 if (ret) {
432 dev_dbg(&GET_DEV(accel_dev), "Error parsing zlib header\n");
433 return ret;
434 }
435
436 return qat_comp_alg_compress_decompress(req, DECOMPRESSION, QAT_RFC_1950_HDR_SIZE,
437 QAT_RFC_1950_FOOTER_SIZE, 0, 0);
438 }
439
440 static struct acomp_alg qat_acomp[] = { {
441 .base = {
442 .cra_name = "deflate",
443 .cra_driver_name = "qat_deflate",
444 .cra_priority = 4001,
445 .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
446 .cra_ctxsize = sizeof(struct qat_compression_ctx),
447 .cra_module = THIS_MODULE,
448 },
449 .init = qat_comp_alg_init_tfm,
450 .exit = qat_comp_alg_exit_tfm,
451 .compress = qat_comp_alg_compress,
452 .decompress = qat_comp_alg_decompress,
453 .dst_free = sgl_free,
454 .reqsize = sizeof(struct qat_compression_req),
455 }, {
456 .base = {
457 .cra_name = "zlib-deflate",
458 .cra_driver_name = "qat_zlib_deflate",
459 .cra_priority = 4001,
460 .cra_flags = CRYPTO_ALG_ASYNC,
461 .cra_ctxsize = sizeof(struct qat_compression_ctx),
462 .cra_module = THIS_MODULE,
463 },
464 .init = qat_comp_alg_rfc1950_init_tfm,
465 .exit = qat_comp_alg_exit_tfm,
466 .compress = qat_comp_alg_rfc1950_compress,
467 .decompress = qat_comp_alg_rfc1950_decompress,
468 .dst_free = sgl_free,
469 .reqsize = sizeof(struct qat_compression_req),
470 } };
471
qat_comp_algs_register(void)472 int qat_comp_algs_register(void)
473 {
474 int ret = 0;
475
476 mutex_lock(&algs_lock);
477 if (++active_devs == 1)
478 ret = crypto_register_acomps(qat_acomp, ARRAY_SIZE(qat_acomp));
479 mutex_unlock(&algs_lock);
480 return ret;
481 }
482
qat_comp_algs_unregister(void)483 void qat_comp_algs_unregister(void)
484 {
485 mutex_lock(&algs_lock);
486 if (--active_devs == 0)
487 crypto_unregister_acomps(qat_acomp, ARRAY_SIZE(qat_acomp));
488 mutex_unlock(&algs_lock);
489 }
490