1 /*
2 * Copyright (c) 2006 Oracle. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33 #include <linux/kernel.h>
34 #include <linux/slab.h>
35 #include <linux/export.h>
36
37 #include "rds.h"
38
39 static unsigned int rds_exthdr_size[__RDS_EXTHDR_MAX] = {
40 [RDS_EXTHDR_NONE] = 0,
41 [RDS_EXTHDR_VERSION] = sizeof(struct rds_ext_header_version),
42 [RDS_EXTHDR_RDMA] = sizeof(struct rds_ext_header_rdma),
43 [RDS_EXTHDR_RDMA_DEST] = sizeof(struct rds_ext_header_rdma_dest),
44 };
45
46
rds_message_addref(struct rds_message * rm)47 void rds_message_addref(struct rds_message *rm)
48 {
49 rdsdebug("addref rm %p ref %d\n", rm, atomic_read(&rm->m_refcount));
50 atomic_inc(&rm->m_refcount);
51 }
52 EXPORT_SYMBOL_GPL(rds_message_addref);
53
54 /*
55 * This relies on dma_map_sg() not touching sg[].page during merging.
56 */
rds_message_purge(struct rds_message * rm)57 static void rds_message_purge(struct rds_message *rm)
58 {
59 unsigned long i;
60
61 if (unlikely(test_bit(RDS_MSG_PAGEVEC, &rm->m_flags)))
62 return;
63
64 for (i = 0; i < rm->data.op_nents; i++) {
65 rdsdebug("putting data page %p\n", (void *)sg_page(&rm->data.op_sg[i]));
66 /* XXX will have to put_page for page refs */
67 __free_page(sg_page(&rm->data.op_sg[i]));
68 }
69 rm->data.op_nents = 0;
70
71 if (rm->rdma.op_active)
72 rds_rdma_free_op(&rm->rdma);
73 if (rm->rdma.op_rdma_mr)
74 rds_mr_put(rm->rdma.op_rdma_mr);
75
76 if (rm->atomic.op_active)
77 rds_atomic_free_op(&rm->atomic);
78 if (rm->atomic.op_rdma_mr)
79 rds_mr_put(rm->atomic.op_rdma_mr);
80 }
81
rds_message_put(struct rds_message * rm)82 void rds_message_put(struct rds_message *rm)
83 {
84 rdsdebug("put rm %p ref %d\n", rm, atomic_read(&rm->m_refcount));
85 WARN(!atomic_read(&rm->m_refcount), "danger refcount zero on %p\n", rm);
86 if (atomic_dec_and_test(&rm->m_refcount)) {
87 BUG_ON(!list_empty(&rm->m_sock_item));
88 BUG_ON(!list_empty(&rm->m_conn_item));
89 rds_message_purge(rm);
90
91 kfree(rm);
92 }
93 }
94 EXPORT_SYMBOL_GPL(rds_message_put);
95
rds_message_populate_header(struct rds_header * hdr,__be16 sport,__be16 dport,u64 seq)96 void rds_message_populate_header(struct rds_header *hdr, __be16 sport,
97 __be16 dport, u64 seq)
98 {
99 hdr->h_flags = 0;
100 hdr->h_sport = sport;
101 hdr->h_dport = dport;
102 hdr->h_sequence = cpu_to_be64(seq);
103 hdr->h_exthdr[0] = RDS_EXTHDR_NONE;
104 }
105 EXPORT_SYMBOL_GPL(rds_message_populate_header);
106
rds_message_add_extension(struct rds_header * hdr,unsigned int type,const void * data,unsigned int len)107 int rds_message_add_extension(struct rds_header *hdr, unsigned int type,
108 const void *data, unsigned int len)
109 {
110 unsigned int ext_len = sizeof(u8) + len;
111 unsigned char *dst;
112
113 /* For now, refuse to add more than one extension header */
114 if (hdr->h_exthdr[0] != RDS_EXTHDR_NONE)
115 return 0;
116
117 if (type >= __RDS_EXTHDR_MAX || len != rds_exthdr_size[type])
118 return 0;
119
120 if (ext_len >= RDS_HEADER_EXT_SPACE)
121 return 0;
122 dst = hdr->h_exthdr;
123
124 *dst++ = type;
125 memcpy(dst, data, len);
126
127 dst[len] = RDS_EXTHDR_NONE;
128 return 1;
129 }
130 EXPORT_SYMBOL_GPL(rds_message_add_extension);
131
132 /*
133 * If a message has extension headers, retrieve them here.
134 * Call like this:
135 *
136 * unsigned int pos = 0;
137 *
138 * while (1) {
139 * buflen = sizeof(buffer);
140 * type = rds_message_next_extension(hdr, &pos, buffer, &buflen);
141 * if (type == RDS_EXTHDR_NONE)
142 * break;
143 * ...
144 * }
145 */
rds_message_next_extension(struct rds_header * hdr,unsigned int * pos,void * buf,unsigned int * buflen)146 int rds_message_next_extension(struct rds_header *hdr,
147 unsigned int *pos, void *buf, unsigned int *buflen)
148 {
149 unsigned int offset, ext_type, ext_len;
150 u8 *src = hdr->h_exthdr;
151
152 offset = *pos;
153 if (offset >= RDS_HEADER_EXT_SPACE)
154 goto none;
155
156 /* Get the extension type and length. For now, the
157 * length is implied by the extension type. */
158 ext_type = src[offset++];
159
160 if (ext_type == RDS_EXTHDR_NONE || ext_type >= __RDS_EXTHDR_MAX)
161 goto none;
162 ext_len = rds_exthdr_size[ext_type];
163 if (offset + ext_len > RDS_HEADER_EXT_SPACE)
164 goto none;
165
166 *pos = offset + ext_len;
167 if (ext_len < *buflen)
168 *buflen = ext_len;
169 memcpy(buf, src + offset, *buflen);
170 return ext_type;
171
172 none:
173 *pos = RDS_HEADER_EXT_SPACE;
174 *buflen = 0;
175 return RDS_EXTHDR_NONE;
176 }
177
rds_message_add_rdma_dest_extension(struct rds_header * hdr,u32 r_key,u32 offset)178 int rds_message_add_rdma_dest_extension(struct rds_header *hdr, u32 r_key, u32 offset)
179 {
180 struct rds_ext_header_rdma_dest ext_hdr;
181
182 ext_hdr.h_rdma_rkey = cpu_to_be32(r_key);
183 ext_hdr.h_rdma_offset = cpu_to_be32(offset);
184 return rds_message_add_extension(hdr, RDS_EXTHDR_RDMA_DEST, &ext_hdr, sizeof(ext_hdr));
185 }
186 EXPORT_SYMBOL_GPL(rds_message_add_rdma_dest_extension);
187
188 /*
189 * Each rds_message is allocated with extra space for the scatterlist entries
190 * rds ops will need. This is to minimize memory allocation count. Then, each rds op
191 * can grab SGs when initializing its part of the rds_message.
192 */
rds_message_alloc(unsigned int extra_len,gfp_t gfp)193 struct rds_message *rds_message_alloc(unsigned int extra_len, gfp_t gfp)
194 {
195 struct rds_message *rm;
196
197 if (extra_len > KMALLOC_MAX_SIZE - sizeof(struct rds_message))
198 return NULL;
199
200 rm = kzalloc(sizeof(struct rds_message) + extra_len, gfp);
201 if (!rm)
202 goto out;
203
204 rm->m_used_sgs = 0;
205 rm->m_total_sgs = extra_len / sizeof(struct scatterlist);
206
207 atomic_set(&rm->m_refcount, 1);
208 INIT_LIST_HEAD(&rm->m_sock_item);
209 INIT_LIST_HEAD(&rm->m_conn_item);
210 spin_lock_init(&rm->m_rs_lock);
211 init_waitqueue_head(&rm->m_flush_wait);
212
213 out:
214 return rm;
215 }
216
217 /*
218 * RDS ops use this to grab SG entries from the rm's sg pool.
219 */
rds_message_alloc_sgs(struct rds_message * rm,int nents)220 struct scatterlist *rds_message_alloc_sgs(struct rds_message *rm, int nents)
221 {
222 struct scatterlist *sg_first = (struct scatterlist *) &rm[1];
223 struct scatterlist *sg_ret;
224
225 WARN_ON(rm->m_used_sgs + nents > rm->m_total_sgs);
226 WARN_ON(!nents);
227
228 if (rm->m_used_sgs + nents > rm->m_total_sgs)
229 return NULL;
230
231 sg_ret = &sg_first[rm->m_used_sgs];
232 sg_init_table(sg_ret, nents);
233 rm->m_used_sgs += nents;
234
235 return sg_ret;
236 }
237
rds_message_map_pages(unsigned long * page_addrs,unsigned int total_len)238 struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned int total_len)
239 {
240 struct rds_message *rm;
241 unsigned int i;
242 int num_sgs = ceil(total_len, PAGE_SIZE);
243 int extra_bytes = num_sgs * sizeof(struct scatterlist);
244
245 rm = rds_message_alloc(extra_bytes, GFP_NOWAIT);
246 if (!rm)
247 return ERR_PTR(-ENOMEM);
248
249 set_bit(RDS_MSG_PAGEVEC, &rm->m_flags);
250 rm->m_inc.i_hdr.h_len = cpu_to_be32(total_len);
251 rm->data.op_nents = ceil(total_len, PAGE_SIZE);
252 rm->data.op_sg = rds_message_alloc_sgs(rm, num_sgs);
253 if (!rm->data.op_sg) {
254 rds_message_put(rm);
255 return ERR_PTR(-ENOMEM);
256 }
257
258 for (i = 0; i < rm->data.op_nents; ++i) {
259 sg_set_page(&rm->data.op_sg[i],
260 virt_to_page(page_addrs[i]),
261 PAGE_SIZE, 0);
262 }
263
264 return rm;
265 }
266
rds_message_copy_from_user(struct rds_message * rm,struct iovec * first_iov,size_t total_len)267 int rds_message_copy_from_user(struct rds_message *rm, struct iovec *first_iov,
268 size_t total_len)
269 {
270 unsigned long to_copy;
271 unsigned long iov_off;
272 unsigned long sg_off;
273 struct iovec *iov;
274 struct scatterlist *sg;
275 int ret = 0;
276
277 rm->m_inc.i_hdr.h_len = cpu_to_be32(total_len);
278
279 /*
280 * now allocate and copy in the data payload.
281 */
282 sg = rm->data.op_sg;
283 iov = first_iov;
284 iov_off = 0;
285 sg_off = 0; /* Dear gcc, sg->page will be null from kzalloc. */
286
287 while (total_len) {
288 if (!sg_page(sg)) {
289 ret = rds_page_remainder_alloc(sg, total_len,
290 GFP_HIGHUSER);
291 if (ret)
292 goto out;
293 rm->data.op_nents++;
294 sg_off = 0;
295 }
296
297 while (iov_off == iov->iov_len) {
298 iov_off = 0;
299 iov++;
300 }
301
302 to_copy = min(iov->iov_len - iov_off, sg->length - sg_off);
303 to_copy = min_t(size_t, to_copy, total_len);
304
305 rdsdebug("copying %lu bytes from user iov [%p, %zu] + %lu to "
306 "sg [%p, %u, %u] + %lu\n",
307 to_copy, iov->iov_base, iov->iov_len, iov_off,
308 (void *)sg_page(sg), sg->offset, sg->length, sg_off);
309
310 ret = rds_page_copy_from_user(sg_page(sg), sg->offset + sg_off,
311 iov->iov_base + iov_off,
312 to_copy);
313 if (ret)
314 goto out;
315
316 iov_off += to_copy;
317 total_len -= to_copy;
318 sg_off += to_copy;
319
320 if (sg_off == sg->length)
321 sg++;
322 }
323
324 out:
325 return ret;
326 }
327
rds_message_inc_copy_to_user(struct rds_incoming * inc,struct iovec * first_iov,size_t size)328 int rds_message_inc_copy_to_user(struct rds_incoming *inc,
329 struct iovec *first_iov, size_t size)
330 {
331 struct rds_message *rm;
332 struct iovec *iov;
333 struct scatterlist *sg;
334 unsigned long to_copy;
335 unsigned long iov_off;
336 unsigned long vec_off;
337 int copied;
338 int ret;
339 u32 len;
340
341 rm = container_of(inc, struct rds_message, m_inc);
342 len = be32_to_cpu(rm->m_inc.i_hdr.h_len);
343
344 iov = first_iov;
345 iov_off = 0;
346 sg = rm->data.op_sg;
347 vec_off = 0;
348 copied = 0;
349
350 while (copied < size && copied < len) {
351 while (iov_off == iov->iov_len) {
352 iov_off = 0;
353 iov++;
354 }
355
356 to_copy = min(iov->iov_len - iov_off, sg->length - vec_off);
357 to_copy = min_t(size_t, to_copy, size - copied);
358 to_copy = min_t(unsigned long, to_copy, len - copied);
359
360 rdsdebug("copying %lu bytes to user iov [%p, %zu] + %lu to "
361 "sg [%p, %u, %u] + %lu\n",
362 to_copy, iov->iov_base, iov->iov_len, iov_off,
363 sg_page(sg), sg->offset, sg->length, vec_off);
364
365 ret = rds_page_copy_to_user(sg_page(sg), sg->offset + vec_off,
366 iov->iov_base + iov_off,
367 to_copy);
368 if (ret) {
369 copied = ret;
370 break;
371 }
372
373 iov_off += to_copy;
374 vec_off += to_copy;
375 copied += to_copy;
376
377 if (vec_off == sg->length) {
378 vec_off = 0;
379 sg++;
380 }
381 }
382
383 return copied;
384 }
385
386 /*
387 * If the message is still on the send queue, wait until the transport
388 * is done with it. This is particularly important for RDMA operations.
389 */
rds_message_wait(struct rds_message * rm)390 void rds_message_wait(struct rds_message *rm)
391 {
392 wait_event_interruptible(rm->m_flush_wait,
393 !test_bit(RDS_MSG_MAPPED, &rm->m_flags));
394 }
395
rds_message_unmapped(struct rds_message * rm)396 void rds_message_unmapped(struct rds_message *rm)
397 {
398 clear_bit(RDS_MSG_MAPPED, &rm->m_flags);
399 wake_up_interruptible(&rm->m_flush_wait);
400 }
401 EXPORT_SYMBOL_GPL(rds_message_unmapped);
402
403