• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright(c) 2014 - 2022 Intel Corporation */
3 #include <linux/device.h>
4 #include <linux/dma-mapping.h>
5 #include <linux/pci.h>
6 #include <linux/scatterlist.h>
7 #include <linux/slab.h>
8 #include <linux/types.h>
9 #include "adf_accel_devices.h"
10 #include "qat_bl.h"
11 #include "qat_crypto.h"
12 
qat_bl_free_bufl(struct adf_accel_dev * accel_dev,struct qat_request_buffs * buf)13 void qat_bl_free_bufl(struct adf_accel_dev *accel_dev,
14 		      struct qat_request_buffs *buf)
15 {
16 	struct device *dev = &GET_DEV(accel_dev);
17 	struct qat_alg_buf_list *bl = buf->bl;
18 	struct qat_alg_buf_list *blout = buf->blout;
19 	dma_addr_t blp = buf->blp;
20 	dma_addr_t blpout = buf->bloutp;
21 	size_t sz = buf->sz;
22 	size_t sz_out = buf->sz_out;
23 	int bl_dma_dir;
24 	int i;
25 
26 	bl_dma_dir = blp != blpout ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
27 
28 	for (i = 0; i < bl->num_bufs; i++)
29 		dma_unmap_single(dev, bl->bufers[i].addr,
30 				 bl->bufers[i].len, bl_dma_dir);
31 
32 	dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
33 
34 	if (!buf->sgl_src_valid)
35 		kfree(bl);
36 
37 	if (blp != blpout) {
38 		for (i = 0; i < blout->num_mapped_bufs; i++) {
39 			dma_unmap_single(dev, blout->bufers[i].addr,
40 					 blout->bufers[i].len,
41 					 DMA_FROM_DEVICE);
42 		}
43 		dma_unmap_single(dev, blpout, sz_out, DMA_TO_DEVICE);
44 
45 		if (!buf->sgl_dst_valid)
46 			kfree(blout);
47 	}
48 }
49 
__qat_bl_sgl_to_bufl(struct adf_accel_dev * accel_dev,struct scatterlist * sgl,struct scatterlist * sglout,struct qat_request_buffs * buf,dma_addr_t extra_dst_buff,size_t sz_extra_dst_buff,gfp_t flags)50 static int __qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev,
51 				struct scatterlist *sgl,
52 				struct scatterlist *sglout,
53 				struct qat_request_buffs *buf,
54 				dma_addr_t extra_dst_buff,
55 				size_t sz_extra_dst_buff,
56 				gfp_t flags)
57 {
58 	struct device *dev = &GET_DEV(accel_dev);
59 	int i, sg_nctr = 0;
60 	int n = sg_nents(sgl);
61 	struct qat_alg_buf_list *bufl;
62 	struct qat_alg_buf_list *buflout = NULL;
63 	dma_addr_t blp = DMA_MAPPING_ERROR;
64 	dma_addr_t bloutp = DMA_MAPPING_ERROR;
65 	struct scatterlist *sg;
66 	size_t sz_out, sz = struct_size(bufl, bufers, n);
67 	int node = dev_to_node(&GET_DEV(accel_dev));
68 	int bufl_dma_dir;
69 
70 	if (unlikely(!n))
71 		return -EINVAL;
72 
73 	buf->sgl_src_valid = false;
74 	buf->sgl_dst_valid = false;
75 
76 	if (n > QAT_MAX_BUFF_DESC) {
77 		bufl = kzalloc_node(sz, flags, node);
78 		if (unlikely(!bufl))
79 			return -ENOMEM;
80 	} else {
81 		bufl = &buf->sgl_src.sgl_hdr;
82 		memset(bufl, 0, sizeof(struct qat_alg_buf_list));
83 		buf->sgl_src_valid = true;
84 	}
85 
86 	bufl_dma_dir = sgl != sglout ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
87 
88 	for (i = 0; i < n; i++)
89 		bufl->bufers[i].addr = DMA_MAPPING_ERROR;
90 
91 	for_each_sg(sgl, sg, n, i) {
92 		int y = sg_nctr;
93 
94 		if (!sg->length)
95 			continue;
96 
97 		bufl->bufers[y].addr = dma_map_single(dev, sg_virt(sg),
98 						      sg->length,
99 						      bufl_dma_dir);
100 		bufl->bufers[y].len = sg->length;
101 		if (unlikely(dma_mapping_error(dev, bufl->bufers[y].addr)))
102 			goto err_in;
103 		sg_nctr++;
104 	}
105 	bufl->num_bufs = sg_nctr;
106 	blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE);
107 	if (unlikely(dma_mapping_error(dev, blp)))
108 		goto err_in;
109 	buf->bl = bufl;
110 	buf->blp = blp;
111 	buf->sz = sz;
112 	/* Handle out of place operation */
113 	if (sgl != sglout) {
114 		struct qat_alg_buf *bufers;
115 		int extra_buff = extra_dst_buff ? 1 : 0;
116 		int n_sglout = sg_nents(sglout);
117 
118 		n = n_sglout + extra_buff;
119 		sz_out = struct_size(buflout, bufers, n);
120 		sg_nctr = 0;
121 
122 		if (n > QAT_MAX_BUFF_DESC) {
123 			buflout = kzalloc_node(sz_out, flags, node);
124 			if (unlikely(!buflout))
125 				goto err_in;
126 		} else {
127 			buflout = &buf->sgl_dst.sgl_hdr;
128 			memset(buflout, 0, sizeof(struct qat_alg_buf_list));
129 			buf->sgl_dst_valid = true;
130 		}
131 
132 		bufers = buflout->bufers;
133 		for (i = 0; i < n; i++)
134 			bufers[i].addr = DMA_MAPPING_ERROR;
135 
136 		for_each_sg(sglout, sg, n_sglout, i) {
137 			int y = sg_nctr;
138 
139 			if (!sg->length)
140 				continue;
141 
142 			bufers[y].addr = dma_map_single(dev, sg_virt(sg),
143 							sg->length,
144 							DMA_FROM_DEVICE);
145 			if (unlikely(dma_mapping_error(dev, bufers[y].addr)))
146 				goto err_out;
147 			bufers[y].len = sg->length;
148 			sg_nctr++;
149 		}
150 		if (extra_buff) {
151 			bufers[sg_nctr].addr = extra_dst_buff;
152 			bufers[sg_nctr].len = sz_extra_dst_buff;
153 		}
154 
155 		buflout->num_bufs = sg_nctr;
156 		buflout->num_bufs += extra_buff;
157 		buflout->num_mapped_bufs = sg_nctr;
158 		bloutp = dma_map_single(dev, buflout, sz_out, DMA_TO_DEVICE);
159 		if (unlikely(dma_mapping_error(dev, bloutp)))
160 			goto err_out;
161 		buf->blout = buflout;
162 		buf->bloutp = bloutp;
163 		buf->sz_out = sz_out;
164 	} else {
165 		/* Otherwise set the src and dst to the same address */
166 		buf->bloutp = buf->blp;
167 		buf->sz_out = 0;
168 	}
169 	return 0;
170 
171 err_out:
172 	if (!dma_mapping_error(dev, bloutp))
173 		dma_unmap_single(dev, bloutp, sz_out, DMA_TO_DEVICE);
174 
175 	n = sg_nents(sglout);
176 	for (i = 0; i < n; i++) {
177 		if (buflout->bufers[i].addr == extra_dst_buff)
178 			break;
179 		if (!dma_mapping_error(dev, buflout->bufers[i].addr))
180 			dma_unmap_single(dev, buflout->bufers[i].addr,
181 					 buflout->bufers[i].len,
182 					 DMA_FROM_DEVICE);
183 	}
184 
185 	if (!buf->sgl_dst_valid)
186 		kfree(buflout);
187 
188 err_in:
189 	if (!dma_mapping_error(dev, blp))
190 		dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
191 
192 	n = sg_nents(sgl);
193 	for (i = 0; i < n; i++)
194 		if (!dma_mapping_error(dev, bufl->bufers[i].addr))
195 			dma_unmap_single(dev, bufl->bufers[i].addr,
196 					 bufl->bufers[i].len,
197 					 bufl_dma_dir);
198 
199 	if (!buf->sgl_src_valid)
200 		kfree(bufl);
201 
202 	dev_err(dev, "Failed to map buf for dma\n");
203 	return -ENOMEM;
204 }
205 
qat_bl_sgl_to_bufl(struct adf_accel_dev * accel_dev,struct scatterlist * sgl,struct scatterlist * sglout,struct qat_request_buffs * buf,struct qat_sgl_to_bufl_params * params,gfp_t flags)206 int qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev,
207 		       struct scatterlist *sgl,
208 		       struct scatterlist *sglout,
209 		       struct qat_request_buffs *buf,
210 		       struct qat_sgl_to_bufl_params *params,
211 		       gfp_t flags)
212 {
213 	dma_addr_t extra_dst_buff = 0;
214 	size_t sz_extra_dst_buff = 0;
215 
216 	if (params) {
217 		extra_dst_buff = params->extra_dst_buff;
218 		sz_extra_dst_buff = params->sz_extra_dst_buff;
219 	}
220 
221 	return __qat_bl_sgl_to_bufl(accel_dev, sgl, sglout, buf,
222 				    extra_dst_buff, sz_extra_dst_buff,
223 				    flags);
224 }
225