• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * libcxgb_ppm.c: Chelsio common library for T3/T4/T5 iSCSI PagePod Manager
3  *
4  * Copyright (c) 2016 Chelsio Communications, Inc. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  *
34  * Written by: Karen Xie (kxie@chelsio.com)
35  */
36 
37 #define DRV_NAME "libcxgb"
38 #define DRV_VERSION "1.0.0-ko"
39 #define pr_fmt(fmt) DRV_NAME ": " fmt
40 
41 #include <linux/kernel.h>
42 #include <linux/module.h>
43 #include <linux/errno.h>
44 #include <linux/types.h>
45 #include <linux/debugfs.h>
46 #include <linux/export.h>
47 #include <linux/list.h>
48 #include <linux/skbuff.h>
49 #include <linux/pci.h>
50 #include <linux/scatterlist.h>
51 
52 #include "libcxgb_ppm.h"
53 
54 /* Direct Data Placement -
55  * Directly place the iSCSI Data-In or Data-Out PDU's payload into
56  * pre-posted final destination host-memory buffers based on the
57  * Initiator Task Tag (ITT) in Data-In or Target Task Tag (TTT)
58  * in Data-Out PDUs. The host memory address is programmed into
59  * h/w in the format of pagepod entries. The location of the
60  * pagepod entry is encoded into ddp tag which is used as the base
61  * for ITT/TTT.
62  */
63 
64 /* Direct-Data Placement page size adjustment
65  */
cxgbi_ppm_find_page_index(struct cxgbi_ppm * ppm,unsigned long pgsz)66 int cxgbi_ppm_find_page_index(struct cxgbi_ppm *ppm, unsigned long pgsz)
67 {
68 	struct cxgbi_tag_format *tformat = &ppm->tformat;
69 	int i;
70 
71 	for (i = 0; i < DDP_PGIDX_MAX; i++) {
72 		if (pgsz == 1UL << (DDP_PGSZ_BASE_SHIFT +
73 					 tformat->pgsz_order[i])) {
74 			pr_debug("%s: %s ppm, pgsz %lu -> idx %d.\n",
75 				 __func__, ppm->ndev->name, pgsz, i);
76 			return i;
77 		}
78 	}
79 	pr_info("ippm: ddp page size %lu not supported.\n", pgsz);
80 	return DDP_PGIDX_MAX;
81 }
82 
83 /* DDP setup & teardown
84  */
ppm_find_unused_entries(unsigned long * bmap,unsigned int max_ppods,unsigned int start,unsigned int nr,unsigned int align_mask)85 static int ppm_find_unused_entries(unsigned long *bmap,
86 				   unsigned int max_ppods,
87 				   unsigned int start,
88 				   unsigned int nr,
89 				   unsigned int align_mask)
90 {
91 	unsigned long i;
92 
93 	i = bitmap_find_next_zero_area(bmap, max_ppods, start, nr, align_mask);
94 
95 	if (unlikely(i >= max_ppods) && (start > nr))
96 		i = bitmap_find_next_zero_area(bmap, max_ppods, 0, start - 1,
97 					       align_mask);
98 	if (unlikely(i >= max_ppods))
99 		return -ENOSPC;
100 
101 	bitmap_set(bmap, i, nr);
102 	return (int)i;
103 }
104 
ppm_mark_entries(struct cxgbi_ppm * ppm,int i,int count,unsigned long caller_data)105 static void ppm_mark_entries(struct cxgbi_ppm *ppm, int i, int count,
106 			     unsigned long caller_data)
107 {
108 	struct cxgbi_ppod_data *pdata = ppm->ppod_data + i;
109 
110 	pdata->caller_data = caller_data;
111 	pdata->npods = count;
112 
113 	if (pdata->color == ((1 << PPOD_IDX_SHIFT) - 1))
114 		pdata->color = 0;
115 	else
116 		pdata->color++;
117 }
118 
ppm_get_cpu_entries(struct cxgbi_ppm * ppm,unsigned int count,unsigned long caller_data)119 static int ppm_get_cpu_entries(struct cxgbi_ppm *ppm, unsigned int count,
120 			       unsigned long caller_data)
121 {
122 	struct cxgbi_ppm_pool *pool;
123 	unsigned int cpu;
124 	int i;
125 
126 	cpu = get_cpu();
127 	pool = per_cpu_ptr(ppm->pool, cpu);
128 	spin_lock_bh(&pool->lock);
129 	put_cpu();
130 
131 	i = ppm_find_unused_entries(pool->bmap, ppm->pool_index_max,
132 				    pool->next, count, 0);
133 	if (i < 0) {
134 		pool->next = 0;
135 		spin_unlock_bh(&pool->lock);
136 		return -ENOSPC;
137 	}
138 
139 	pool->next = i + count;
140 	if (pool->next >= ppm->pool_index_max)
141 		pool->next = 0;
142 
143 	spin_unlock_bh(&pool->lock);
144 
145 	pr_debug("%s: cpu %u, idx %d + %d (%d), next %u.\n",
146 		 __func__, cpu, i, count, i + cpu * ppm->pool_index_max,
147 		pool->next);
148 
149 	i += cpu * ppm->pool_index_max;
150 	ppm_mark_entries(ppm, i, count, caller_data);
151 
152 	return i;
153 }
154 
ppm_get_entries(struct cxgbi_ppm * ppm,unsigned int count,unsigned long caller_data)155 static int ppm_get_entries(struct cxgbi_ppm *ppm, unsigned int count,
156 			   unsigned long caller_data)
157 {
158 	int i;
159 
160 	spin_lock_bh(&ppm->map_lock);
161 	i = ppm_find_unused_entries(ppm->ppod_bmap, ppm->bmap_index_max,
162 				    ppm->next, count, 0);
163 	if (i < 0) {
164 		ppm->next = 0;
165 		spin_unlock_bh(&ppm->map_lock);
166 		pr_debug("ippm: NO suitable entries %u available.\n",
167 			 count);
168 		return -ENOSPC;
169 	}
170 
171 	ppm->next = i + count;
172 	if (ppm->next >= ppm->bmap_index_max)
173 		ppm->next = 0;
174 
175 	spin_unlock_bh(&ppm->map_lock);
176 
177 	pr_debug("%s: idx %d + %d (%d), next %u, caller_data 0x%lx.\n",
178 		 __func__, i, count, i + ppm->pool_rsvd, ppm->next,
179 		 caller_data);
180 
181 	i += ppm->pool_rsvd;
182 	ppm_mark_entries(ppm, i, count, caller_data);
183 
184 	return i;
185 }
186 
ppm_unmark_entries(struct cxgbi_ppm * ppm,int i,int count)187 static void ppm_unmark_entries(struct cxgbi_ppm *ppm, int i, int count)
188 {
189 	pr_debug("%s: idx %d + %d.\n", __func__, i, count);
190 
191 	if (i < ppm->pool_rsvd) {
192 		unsigned int cpu;
193 		struct cxgbi_ppm_pool *pool;
194 
195 		cpu = i / ppm->pool_index_max;
196 		i %= ppm->pool_index_max;
197 
198 		pool = per_cpu_ptr(ppm->pool, cpu);
199 		spin_lock_bh(&pool->lock);
200 		bitmap_clear(pool->bmap, i, count);
201 
202 		if (i < pool->next)
203 			pool->next = i;
204 		spin_unlock_bh(&pool->lock);
205 
206 		pr_debug("%s: cpu %u, idx %d, next %u.\n",
207 			 __func__, cpu, i, pool->next);
208 	} else {
209 		spin_lock_bh(&ppm->map_lock);
210 
211 		i -= ppm->pool_rsvd;
212 		bitmap_clear(ppm->ppod_bmap, i, count);
213 
214 		if (i < ppm->next)
215 			ppm->next = i;
216 		spin_unlock_bh(&ppm->map_lock);
217 
218 		pr_debug("%s: idx %d, next %u.\n", __func__, i, ppm->next);
219 	}
220 }
221 
cxgbi_ppm_ppod_release(struct cxgbi_ppm * ppm,u32 idx)222 void cxgbi_ppm_ppod_release(struct cxgbi_ppm *ppm, u32 idx)
223 {
224 	struct cxgbi_ppod_data *pdata;
225 
226 	if (idx >= ppm->ppmax) {
227 		pr_warn("ippm: idx too big %u > %u.\n", idx, ppm->ppmax);
228 		return;
229 	}
230 
231 	pdata = ppm->ppod_data + idx;
232 	if (!pdata->npods) {
233 		pr_warn("ippm: idx %u, npods 0.\n", idx);
234 		return;
235 	}
236 
237 	pr_debug("release idx %u, npods %u.\n", idx, pdata->npods);
238 	ppm_unmark_entries(ppm, idx, pdata->npods);
239 }
240 EXPORT_SYMBOL(cxgbi_ppm_ppod_release);
241 
cxgbi_ppm_ppods_reserve(struct cxgbi_ppm * ppm,unsigned short nr_pages,u32 per_tag_pg_idx,u32 * ppod_idx,u32 * ddp_tag,unsigned long caller_data)242 int cxgbi_ppm_ppods_reserve(struct cxgbi_ppm *ppm, unsigned short nr_pages,
243 			    u32 per_tag_pg_idx, u32 *ppod_idx,
244 			    u32 *ddp_tag, unsigned long caller_data)
245 {
246 	struct cxgbi_ppod_data *pdata;
247 	unsigned int npods;
248 	int idx = -1;
249 	unsigned int hwidx;
250 	u32 tag;
251 
252 	npods = (nr_pages + PPOD_PAGES_MAX - 1) >> PPOD_PAGES_SHIFT;
253 	if (!npods) {
254 		pr_warn("%s: pages %u -> npods %u, full.\n",
255 			__func__, nr_pages, npods);
256 		return -EINVAL;
257 	}
258 
259 	/* grab from cpu pool first */
260 	idx = ppm_get_cpu_entries(ppm, npods, caller_data);
261 	/* try the general pool */
262 	if (idx < 0)
263 		idx = ppm_get_entries(ppm, npods, caller_data);
264 	if (idx < 0) {
265 		pr_debug("ippm: pages %u, nospc %u, nxt %u, 0x%lx.\n",
266 			 nr_pages, npods, ppm->next, caller_data);
267 		return idx;
268 	}
269 
270 	pdata = ppm->ppod_data + idx;
271 	hwidx = ppm->base_idx + idx;
272 
273 	tag = cxgbi_ppm_make_ddp_tag(hwidx, pdata->color);
274 
275 	if (per_tag_pg_idx)
276 		tag |= (per_tag_pg_idx << 30) & 0xC0000000;
277 
278 	*ppod_idx = idx;
279 	*ddp_tag = tag;
280 
281 	pr_debug("ippm: sg %u, tag 0x%x(%u,%u), data 0x%lx.\n",
282 		 nr_pages, tag, idx, npods, caller_data);
283 
284 	return npods;
285 }
286 EXPORT_SYMBOL(cxgbi_ppm_ppods_reserve);
287 
cxgbi_ppm_make_ppod_hdr(struct cxgbi_ppm * ppm,u32 tag,unsigned int tid,unsigned int offset,unsigned int length,struct cxgbi_pagepod_hdr * hdr)288 void cxgbi_ppm_make_ppod_hdr(struct cxgbi_ppm *ppm, u32 tag,
289 			     unsigned int tid, unsigned int offset,
290 			     unsigned int length,
291 			     struct cxgbi_pagepod_hdr *hdr)
292 {
293 	/* The ddp tag in pagepod should be with bit 31:30 set to 0.
294 	 * The ddp Tag on the wire should be with non-zero 31:30 to the peer
295 	 */
296 	tag &= 0x3FFFFFFF;
297 
298 	hdr->vld_tid = htonl(PPOD_VALID_FLAG | PPOD_TID(tid));
299 
300 	hdr->rsvd = 0;
301 	hdr->pgsz_tag_clr = htonl(tag & ppm->tformat.idx_clr_mask);
302 	hdr->max_offset = htonl(length);
303 	hdr->page_offset = htonl(offset);
304 
305 	pr_debug("ippm: tag 0x%x, tid 0x%x, xfer %u, off %u.\n",
306 		 tag, tid, length, offset);
307 }
308 EXPORT_SYMBOL(cxgbi_ppm_make_ppod_hdr);
309 
ppm_free(struct cxgbi_ppm * ppm)310 static void ppm_free(struct cxgbi_ppm *ppm)
311 {
312 	vfree(ppm);
313 }
314 
ppm_destroy(struct kref * kref)315 static void ppm_destroy(struct kref *kref)
316 {
317 	struct cxgbi_ppm *ppm = container_of(kref,
318 					     struct cxgbi_ppm,
319 					     refcnt);
320 	pr_info("ippm: kref 0, destroy %s ppm 0x%p.\n",
321 		ppm->ndev->name, ppm);
322 
323 	*ppm->ppm_pp = NULL;
324 
325 	free_percpu(ppm->pool);
326 	ppm_free(ppm);
327 }
328 
cxgbi_ppm_release(struct cxgbi_ppm * ppm)329 int cxgbi_ppm_release(struct cxgbi_ppm *ppm)
330 {
331 	if (ppm) {
332 		int rv;
333 
334 		rv = kref_put(&ppm->refcnt, ppm_destroy);
335 		return rv;
336 	}
337 	return 1;
338 }
339 EXPORT_SYMBOL(cxgbi_ppm_release);
340 
ppm_alloc_cpu_pool(unsigned int * total,unsigned int * pcpu_ppmax)341 static struct cxgbi_ppm_pool *ppm_alloc_cpu_pool(unsigned int *total,
342 						 unsigned int *pcpu_ppmax)
343 {
344 	struct cxgbi_ppm_pool *pools;
345 	unsigned int ppmax = (*total) / num_possible_cpus();
346 	unsigned int max = (PCPU_MIN_UNIT_SIZE - sizeof(*pools)) << 3;
347 	unsigned int bmap;
348 	unsigned int alloc_sz;
349 	unsigned int count = 0;
350 	unsigned int cpu;
351 
352 	/* make sure per cpu pool fits into PCPU_MIN_UNIT_SIZE */
353 	if (ppmax > max)
354 		ppmax = max;
355 
356 	/* pool size must be multiple of unsigned long */
357 	bmap = BITS_TO_LONGS(ppmax);
358 	ppmax = (bmap * sizeof(unsigned long)) << 3;
359 
360 	alloc_sz = sizeof(*pools) + sizeof(unsigned long) * bmap;
361 	pools = __alloc_percpu(alloc_sz, __alignof__(struct cxgbi_ppm_pool));
362 
363 	if (!pools)
364 		return NULL;
365 
366 	for_each_possible_cpu(cpu) {
367 		struct cxgbi_ppm_pool *ppool = per_cpu_ptr(pools, cpu);
368 
369 		memset(ppool, 0, alloc_sz);
370 		spin_lock_init(&ppool->lock);
371 		count += ppmax;
372 	}
373 
374 	*total = count;
375 	*pcpu_ppmax = ppmax;
376 
377 	return pools;
378 }
379 
cxgbi_ppm_init(void ** ppm_pp,struct net_device * ndev,struct pci_dev * pdev,void * lldev,struct cxgbi_tag_format * tformat,unsigned int ppmax,unsigned int llimit,unsigned int start,unsigned int reserve_factor)380 int cxgbi_ppm_init(void **ppm_pp, struct net_device *ndev,
381 		   struct pci_dev *pdev, void *lldev,
382 		   struct cxgbi_tag_format *tformat,
383 		   unsigned int ppmax,
384 		   unsigned int llimit,
385 		   unsigned int start,
386 		   unsigned int reserve_factor)
387 {
388 	struct cxgbi_ppm *ppm = (struct cxgbi_ppm *)(*ppm_pp);
389 	struct cxgbi_ppm_pool *pool = NULL;
390 	unsigned int ppmax_pool = 0;
391 	unsigned int pool_index_max = 0;
392 	unsigned int alloc_sz;
393 	unsigned int ppod_bmap_size;
394 
395 	if (ppm) {
396 		pr_info("ippm: %s, ppm 0x%p,0x%p already initialized, %u/%u.\n",
397 			ndev->name, ppm_pp, ppm, ppm->ppmax, ppmax);
398 		kref_get(&ppm->refcnt);
399 		return 1;
400 	}
401 
402 	if (reserve_factor) {
403 		ppmax_pool = ppmax / reserve_factor;
404 		pool = ppm_alloc_cpu_pool(&ppmax_pool, &pool_index_max);
405 
406 		pr_debug("%s: ppmax %u, cpu total %u, per cpu %u.\n",
407 			 ndev->name, ppmax, ppmax_pool, pool_index_max);
408 	}
409 
410 	ppod_bmap_size = BITS_TO_LONGS(ppmax - ppmax_pool);
411 	alloc_sz = sizeof(struct cxgbi_ppm) +
412 			ppmax * (sizeof(struct cxgbi_ppod_data)) +
413 			ppod_bmap_size * sizeof(unsigned long);
414 
415 	ppm = vmalloc(alloc_sz);
416 	if (!ppm)
417 		goto release_ppm_pool;
418 
419 	memset(ppm, 0, alloc_sz);
420 
421 	ppm->ppod_bmap = (unsigned long *)(&ppm->ppod_data[ppmax]);
422 
423 	if ((ppod_bmap_size >> 3) > (ppmax - ppmax_pool)) {
424 		unsigned int start = ppmax - ppmax_pool;
425 		unsigned int end = ppod_bmap_size >> 3;
426 
427 		bitmap_set(ppm->ppod_bmap, ppmax, end - start);
428 		pr_info("%s: %u - %u < %u * 8, mask extra bits %u, %u.\n",
429 			__func__, ppmax, ppmax_pool, ppod_bmap_size, start,
430 			end);
431 	}
432 
433 	spin_lock_init(&ppm->map_lock);
434 	kref_init(&ppm->refcnt);
435 
436 	memcpy(&ppm->tformat, tformat, sizeof(struct cxgbi_tag_format));
437 
438 	ppm->ppm_pp = ppm_pp;
439 	ppm->ndev = ndev;
440 	ppm->pdev = pdev;
441 	ppm->lldev = lldev;
442 	ppm->ppmax = ppmax;
443 	ppm->next = 0;
444 	ppm->llimit = llimit;
445 	ppm->base_idx = start > llimit ?
446 			(start - llimit + 1) >> PPOD_SIZE_SHIFT : 0;
447 	ppm->bmap_index_max = ppmax - ppmax_pool;
448 
449 	ppm->pool = pool;
450 	ppm->pool_rsvd = ppmax_pool;
451 	ppm->pool_index_max = pool_index_max;
452 
453 	/* check one more time */
454 	if (*ppm_pp) {
455 		ppm_free(ppm);
456 		ppm = (struct cxgbi_ppm *)(*ppm_pp);
457 
458 		pr_info("ippm: %s, ppm 0x%p,0x%p already initialized, %u/%u.\n",
459 			ndev->name, ppm_pp, *ppm_pp, ppm->ppmax, ppmax);
460 
461 		kref_get(&ppm->refcnt);
462 		return 1;
463 	}
464 	*ppm_pp = ppm;
465 
466 	ppm->tformat.pgsz_idx_dflt = cxgbi_ppm_find_page_index(ppm, PAGE_SIZE);
467 
468 	pr_info("ippm %s: ppm 0x%p, 0x%p, base %u/%u, pg %lu,%u, rsvd %u,%u.\n",
469 		ndev->name, ppm_pp, ppm, ppm->base_idx, ppm->ppmax, PAGE_SIZE,
470 		ppm->tformat.pgsz_idx_dflt, ppm->pool_rsvd,
471 		ppm->pool_index_max);
472 
473 	return 0;
474 
475 release_ppm_pool:
476 	free_percpu(pool);
477 	return -ENOMEM;
478 }
479 EXPORT_SYMBOL(cxgbi_ppm_init);
480 
cxgbi_tagmask_set(unsigned int ppmax)481 unsigned int cxgbi_tagmask_set(unsigned int ppmax)
482 {
483 	unsigned int bits = fls(ppmax);
484 
485 	if (bits > PPOD_IDX_MAX_SIZE)
486 		bits = PPOD_IDX_MAX_SIZE;
487 
488 	pr_info("ippm: ppmax %u/0x%x -> bits %u, tagmask 0x%x.\n",
489 		ppmax, ppmax, bits, 1 << (bits + PPOD_IDX_SHIFT));
490 
491 	return 1 << (bits + PPOD_IDX_SHIFT);
492 }
493 EXPORT_SYMBOL(cxgbi_tagmask_set);
494 
495 MODULE_AUTHOR("Chelsio Communications");
496 MODULE_DESCRIPTION("Chelsio common library");
497 MODULE_VERSION(DRV_VERSION);
498 MODULE_LICENSE("Dual BSD/GPL");
499