• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2016 Oracle.  All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include "ib_mr.h"
34 
rds_ib_alloc_fmr(struct rds_ib_device * rds_ibdev,int npages)35 struct rds_ib_mr *rds_ib_alloc_fmr(struct rds_ib_device *rds_ibdev, int npages)
36 {
37 	struct rds_ib_mr_pool *pool;
38 	struct rds_ib_mr *ibmr = NULL;
39 	struct rds_ib_fmr *fmr;
40 	int err = 0;
41 
42 	if (npages <= RDS_MR_8K_MSG_SIZE)
43 		pool = rds_ibdev->mr_8k_pool;
44 	else
45 		pool = rds_ibdev->mr_1m_pool;
46 
47 	ibmr = rds_ib_try_reuse_ibmr(pool);
48 	if (ibmr)
49 		return ibmr;
50 
51 	ibmr = kzalloc_node(sizeof(*ibmr), GFP_KERNEL,
52 			    rdsibdev_to_node(rds_ibdev));
53 	if (!ibmr) {
54 		err = -ENOMEM;
55 		goto out_no_cigar;
56 	}
57 
58 	fmr = &ibmr->u.fmr;
59 	fmr->fmr = ib_alloc_fmr(rds_ibdev->pd,
60 			(IB_ACCESS_LOCAL_WRITE |
61 			 IB_ACCESS_REMOTE_READ |
62 			 IB_ACCESS_REMOTE_WRITE |
63 			 IB_ACCESS_REMOTE_ATOMIC),
64 			&pool->fmr_attr);
65 	if (IS_ERR(fmr->fmr)) {
66 		err = PTR_ERR(fmr->fmr);
67 		fmr->fmr = NULL;
68 		pr_warn("RDS/IB: %s failed (err=%d)\n", __func__, err);
69 		goto out_no_cigar;
70 	}
71 
72 	ibmr->pool = pool;
73 	if (pool->pool_type == RDS_IB_MR_8K_POOL)
74 		rds_ib_stats_inc(s_ib_rdma_mr_8k_alloc);
75 	else
76 		rds_ib_stats_inc(s_ib_rdma_mr_1m_alloc);
77 
78 	return ibmr;
79 
80 out_no_cigar:
81 	if (ibmr) {
82 		if (fmr->fmr)
83 			ib_dealloc_fmr(fmr->fmr);
84 		kfree(ibmr);
85 	}
86 	atomic_dec(&pool->item_count);
87 	return ERR_PTR(err);
88 }
89 
rds_ib_map_fmr(struct rds_ib_device * rds_ibdev,struct rds_ib_mr * ibmr,struct scatterlist * sg,unsigned int nents)90 int rds_ib_map_fmr(struct rds_ib_device *rds_ibdev, struct rds_ib_mr *ibmr,
91 		   struct scatterlist *sg, unsigned int nents)
92 {
93 	struct ib_device *dev = rds_ibdev->dev;
94 	struct rds_ib_fmr *fmr = &ibmr->u.fmr;
95 	struct scatterlist *scat = sg;
96 	u64 io_addr = 0;
97 	u64 *dma_pages;
98 	u32 len;
99 	int page_cnt, sg_dma_len;
100 	int i, j;
101 	int ret;
102 
103 	sg_dma_len = ib_dma_map_sg(dev, sg, nents, DMA_BIDIRECTIONAL);
104 	if (unlikely(!sg_dma_len)) {
105 		pr_warn("RDS/IB: %s failed!\n", __func__);
106 		return -EBUSY;
107 	}
108 
109 	len = 0;
110 	page_cnt = 0;
111 
112 	for (i = 0; i < sg_dma_len; ++i) {
113 		unsigned int dma_len = ib_sg_dma_len(dev, &scat[i]);
114 		u64 dma_addr = ib_sg_dma_address(dev, &scat[i]);
115 
116 		if (dma_addr & ~PAGE_MASK) {
117 			if (i > 0)
118 				return -EINVAL;
119 			else
120 				++page_cnt;
121 		}
122 		if ((dma_addr + dma_len) & ~PAGE_MASK) {
123 			if (i < sg_dma_len - 1)
124 				return -EINVAL;
125 			else
126 				++page_cnt;
127 		}
128 
129 		len += dma_len;
130 	}
131 
132 	page_cnt += len >> PAGE_SHIFT;
133 	if (page_cnt > ibmr->pool->fmr_attr.max_pages)
134 		return -EINVAL;
135 
136 	dma_pages = kmalloc_node(sizeof(u64) * page_cnt, GFP_ATOMIC,
137 				 rdsibdev_to_node(rds_ibdev));
138 	if (!dma_pages)
139 		return -ENOMEM;
140 
141 	page_cnt = 0;
142 	for (i = 0; i < sg_dma_len; ++i) {
143 		unsigned int dma_len = ib_sg_dma_len(dev, &scat[i]);
144 		u64 dma_addr = ib_sg_dma_address(dev, &scat[i]);
145 
146 		for (j = 0; j < dma_len; j += PAGE_SIZE)
147 			dma_pages[page_cnt++] =
148 				(dma_addr & PAGE_MASK) + j;
149 	}
150 
151 	ret = ib_map_phys_fmr(fmr->fmr, dma_pages, page_cnt, io_addr);
152 	if (ret)
153 		goto out;
154 
155 	/* Success - we successfully remapped the MR, so we can
156 	 * safely tear down the old mapping.
157 	 */
158 	rds_ib_teardown_mr(ibmr);
159 
160 	ibmr->sg = scat;
161 	ibmr->sg_len = nents;
162 	ibmr->sg_dma_len = sg_dma_len;
163 	ibmr->remap_count++;
164 
165 	if (ibmr->pool->pool_type == RDS_IB_MR_8K_POOL)
166 		rds_ib_stats_inc(s_ib_rdma_mr_8k_used);
167 	else
168 		rds_ib_stats_inc(s_ib_rdma_mr_1m_used);
169 	ret = 0;
170 
171 out:
172 	kfree(dma_pages);
173 
174 	return ret;
175 }
176 
rds_ib_reg_fmr(struct rds_ib_device * rds_ibdev,struct scatterlist * sg,unsigned long nents,u32 * key)177 struct rds_ib_mr *rds_ib_reg_fmr(struct rds_ib_device *rds_ibdev,
178 				 struct scatterlist *sg,
179 				 unsigned long nents,
180 				 u32 *key)
181 {
182 	struct rds_ib_mr *ibmr = NULL;
183 	struct rds_ib_fmr *fmr;
184 	int ret;
185 
186 	ibmr = rds_ib_alloc_fmr(rds_ibdev, nents);
187 	if (IS_ERR(ibmr))
188 		return ibmr;
189 
190 	ibmr->device = rds_ibdev;
191 	fmr = &ibmr->u.fmr;
192 	ret = rds_ib_map_fmr(rds_ibdev, ibmr, sg, nents);
193 	if (ret == 0)
194 		*key = fmr->fmr->rkey;
195 	else
196 		rds_ib_free_mr(ibmr, 0);
197 
198 	return ibmr;
199 }
200 
rds_ib_unreg_fmr(struct list_head * list,unsigned int * nfreed,unsigned long * unpinned,unsigned int goal)201 void rds_ib_unreg_fmr(struct list_head *list, unsigned int *nfreed,
202 		      unsigned long *unpinned, unsigned int goal)
203 {
204 	struct rds_ib_mr *ibmr, *next;
205 	struct rds_ib_fmr *fmr;
206 	LIST_HEAD(fmr_list);
207 	int ret = 0;
208 	unsigned int freed = *nfreed;
209 
210 	/* String all ib_mr's onto one list and hand them to  ib_unmap_fmr */
211 	list_for_each_entry(ibmr, list, unmap_list) {
212 		fmr = &ibmr->u.fmr;
213 		list_add(&fmr->fmr->list, &fmr_list);
214 	}
215 
216 	ret = ib_unmap_fmr(&fmr_list);
217 	if (ret)
218 		pr_warn("RDS/IB: FMR invalidation failed (err=%d)\n", ret);
219 
220 	/* Now we can destroy the DMA mapping and unpin any pages */
221 	list_for_each_entry_safe(ibmr, next, list, unmap_list) {
222 		fmr = &ibmr->u.fmr;
223 		*unpinned += ibmr->sg_len;
224 		__rds_ib_teardown_mr(ibmr);
225 		if (freed < goal ||
226 		    ibmr->remap_count >= ibmr->pool->fmr_attr.max_maps) {
227 			if (ibmr->pool->pool_type == RDS_IB_MR_8K_POOL)
228 				rds_ib_stats_inc(s_ib_rdma_mr_8k_free);
229 			else
230 				rds_ib_stats_inc(s_ib_rdma_mr_1m_free);
231 			list_del(&ibmr->unmap_list);
232 			ib_dealloc_fmr(fmr->fmr);
233 			kfree(ibmr);
234 			freed++;
235 		}
236 	}
237 	*nfreed = freed;
238 }
239 
rds_ib_free_fmr_list(struct rds_ib_mr * ibmr)240 void rds_ib_free_fmr_list(struct rds_ib_mr *ibmr)
241 {
242 	struct rds_ib_mr_pool *pool = ibmr->pool;
243 
244 	if (ibmr->remap_count >= pool->fmr_attr.max_maps)
245 		llist_add(&ibmr->llnode, &pool->drop_list);
246 	else
247 		llist_add(&ibmr->llnode, &pool->free_list);
248 }
249