• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/highmem.h>
34 #include <linux/kernel.h>
35 #include <linux/module.h>
36 #include <linux/delay.h>
37 #include <linux/mlx5/driver.h>
38 #include <linux/xarray.h>
39 #include "mlx5_core.h"
40 #include "lib/eq.h"
41 
42 enum {
43 	MLX5_PAGES_CANT_GIVE	= 0,
44 	MLX5_PAGES_GIVE		= 1,
45 	MLX5_PAGES_TAKE		= 2
46 };
47 
48 struct mlx5_pages_req {
49 	struct mlx5_core_dev *dev;
50 	u16	func_id;
51 	u8	ec_function;
52 	s32	npages;
53 	struct work_struct work;
54 	u8	release_all;
55 };
56 
57 struct fw_page {
58 	struct rb_node		rb_node;
59 	u64			addr;
60 	struct page	       *page;
61 	u32			function;
62 	unsigned long		bitmask;
63 	struct list_head	list;
64 	unsigned		free_count;
65 };
66 
67 enum {
68 	MAX_RECLAIM_TIME_MSECS	= 5000,
69 	MAX_RECLAIM_VFS_PAGES_TIME_MSECS = 2 * 1000 * 60,
70 };
71 
72 enum {
73 	MLX5_MAX_RECLAIM_TIME_MILI	= 5000,
74 	MLX5_NUM_4K_IN_PAGE		= PAGE_SIZE / MLX5_ADAPTER_PAGE_SIZE,
75 };
76 
get_function(u16 func_id,bool ec_function)77 static u32 get_function(u16 func_id, bool ec_function)
78 {
79 	return (u32)func_id | (ec_function << 16);
80 }
81 
page_root_per_function(struct mlx5_core_dev * dev,u32 function)82 static struct rb_root *page_root_per_function(struct mlx5_core_dev *dev, u32 function)
83 {
84 	struct rb_root *root;
85 	int err;
86 
87 	root = xa_load(&dev->priv.page_root_xa, function);
88 	if (root)
89 		return root;
90 
91 	root = kzalloc(sizeof(*root), GFP_KERNEL);
92 	if (!root)
93 		return ERR_PTR(-ENOMEM);
94 
95 	err = xa_insert(&dev->priv.page_root_xa, function, root, GFP_KERNEL);
96 	if (err) {
97 		kfree(root);
98 		return ERR_PTR(err);
99 	}
100 
101 	*root = RB_ROOT;
102 
103 	return root;
104 }
105 
insert_page(struct mlx5_core_dev * dev,u64 addr,struct page * page,u32 function)106 static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u32 function)
107 {
108 	struct rb_node *parent = NULL;
109 	struct rb_root *root;
110 	struct rb_node **new;
111 	struct fw_page *nfp;
112 	struct fw_page *tfp;
113 	int i;
114 
115 	root = page_root_per_function(dev, function);
116 	if (IS_ERR(root))
117 		return PTR_ERR(root);
118 
119 	new = &root->rb_node;
120 
121 	while (*new) {
122 		parent = *new;
123 		tfp = rb_entry(parent, struct fw_page, rb_node);
124 		if (tfp->addr < addr)
125 			new = &parent->rb_left;
126 		else if (tfp->addr > addr)
127 			new = &parent->rb_right;
128 		else
129 			return -EEXIST;
130 	}
131 
132 	nfp = kzalloc(sizeof(*nfp), GFP_KERNEL);
133 	if (!nfp)
134 		return -ENOMEM;
135 
136 	nfp->addr = addr;
137 	nfp->page = page;
138 	nfp->function = function;
139 	nfp->free_count = MLX5_NUM_4K_IN_PAGE;
140 	for (i = 0; i < MLX5_NUM_4K_IN_PAGE; i++)
141 		set_bit(i, &nfp->bitmask);
142 
143 	rb_link_node(&nfp->rb_node, parent, new);
144 	rb_insert_color(&nfp->rb_node, root);
145 	list_add(&nfp->list, &dev->priv.free_list);
146 
147 	return 0;
148 }
149 
find_fw_page(struct mlx5_core_dev * dev,u64 addr,u32 function)150 static struct fw_page *find_fw_page(struct mlx5_core_dev *dev, u64 addr,
151 				    u32 function)
152 {
153 	struct fw_page *result = NULL;
154 	struct rb_root *root;
155 	struct rb_node *tmp;
156 	struct fw_page *tfp;
157 
158 	root = xa_load(&dev->priv.page_root_xa, function);
159 	if (WARN_ON_ONCE(!root))
160 		return NULL;
161 
162 	tmp = root->rb_node;
163 
164 	while (tmp) {
165 		tfp = rb_entry(tmp, struct fw_page, rb_node);
166 		if (tfp->addr < addr) {
167 			tmp = tmp->rb_left;
168 		} else if (tfp->addr > addr) {
169 			tmp = tmp->rb_right;
170 		} else {
171 			result = tfp;
172 			break;
173 		}
174 	}
175 
176 	return result;
177 }
178 
mlx5_cmd_query_pages(struct mlx5_core_dev * dev,u16 * func_id,s32 * npages,int boot)179 static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id,
180 				s32 *npages, int boot)
181 {
182 	u32 out[MLX5_ST_SZ_DW(query_pages_out)] = {};
183 	u32 in[MLX5_ST_SZ_DW(query_pages_in)] = {};
184 	int err;
185 
186 	MLX5_SET(query_pages_in, in, opcode, MLX5_CMD_OP_QUERY_PAGES);
187 	MLX5_SET(query_pages_in, in, op_mod, boot ?
188 		 MLX5_QUERY_PAGES_IN_OP_MOD_BOOT_PAGES :
189 		 MLX5_QUERY_PAGES_IN_OP_MOD_INIT_PAGES);
190 	MLX5_SET(query_pages_in, in, embedded_cpu_function, mlx5_core_is_ecpf(dev));
191 
192 	err = mlx5_cmd_exec_inout(dev, query_pages, in, out);
193 	if (err)
194 		return err;
195 
196 	*npages = MLX5_GET(query_pages_out, out, num_pages);
197 	*func_id = MLX5_GET(query_pages_out, out, function_id);
198 
199 	return err;
200 }
201 
alloc_4k(struct mlx5_core_dev * dev,u64 * addr,u32 function)202 static int alloc_4k(struct mlx5_core_dev *dev, u64 *addr, u32 function)
203 {
204 	struct fw_page *fp = NULL;
205 	struct fw_page *iter;
206 	unsigned n;
207 
208 	list_for_each_entry(iter, &dev->priv.free_list, list) {
209 		if (iter->function != function)
210 			continue;
211 		fp = iter;
212 	}
213 
214 	if (list_empty(&dev->priv.free_list) || !fp)
215 		return -ENOMEM;
216 
217 	n = find_first_bit(&fp->bitmask, 8 * sizeof(fp->bitmask));
218 	if (n >= MLX5_NUM_4K_IN_PAGE) {
219 		mlx5_core_warn(dev, "alloc 4k bug\n");
220 		return -ENOENT;
221 	}
222 	clear_bit(n, &fp->bitmask);
223 	fp->free_count--;
224 	if (!fp->free_count)
225 		list_del(&fp->list);
226 
227 	*addr = fp->addr + n * MLX5_ADAPTER_PAGE_SIZE;
228 
229 	return 0;
230 }
231 
232 #define MLX5_U64_4K_PAGE_MASK ((~(u64)0U) << PAGE_SHIFT)
233 
free_fwp(struct mlx5_core_dev * dev,struct fw_page * fwp,bool in_free_list)234 static void free_fwp(struct mlx5_core_dev *dev, struct fw_page *fwp,
235 		     bool in_free_list)
236 {
237 	struct rb_root *root;
238 
239 	root = xa_load(&dev->priv.page_root_xa, fwp->function);
240 	if (WARN_ON_ONCE(!root))
241 		return;
242 
243 	rb_erase(&fwp->rb_node, root);
244 	if (in_free_list)
245 		list_del(&fwp->list);
246 	dma_unmap_page(mlx5_core_dma_dev(dev), fwp->addr & MLX5_U64_4K_PAGE_MASK,
247 		       PAGE_SIZE, DMA_BIDIRECTIONAL);
248 	__free_page(fwp->page);
249 	kfree(fwp);
250 }
251 
free_4k(struct mlx5_core_dev * dev,u64 addr,u32 function)252 static void free_4k(struct mlx5_core_dev *dev, u64 addr, u32 function)
253 {
254 	struct fw_page *fwp;
255 	int n;
256 
257 	fwp = find_fw_page(dev, addr & MLX5_U64_4K_PAGE_MASK, function);
258 	if (!fwp) {
259 		mlx5_core_warn_rl(dev, "page not found\n");
260 		return;
261 	}
262 	n = (addr & ~MLX5_U64_4K_PAGE_MASK) >> MLX5_ADAPTER_PAGE_SHIFT;
263 	fwp->free_count++;
264 	set_bit(n, &fwp->bitmask);
265 	if (fwp->free_count == MLX5_NUM_4K_IN_PAGE)
266 		free_fwp(dev, fwp, fwp->free_count != 1);
267 	else if (fwp->free_count == 1)
268 		list_add(&fwp->list, &dev->priv.free_list);
269 }
270 
alloc_system_page(struct mlx5_core_dev * dev,u32 function)271 static int alloc_system_page(struct mlx5_core_dev *dev, u32 function)
272 {
273 	struct device *device = mlx5_core_dma_dev(dev);
274 	int nid = dev_to_node(device);
275 	struct page *page;
276 	u64 zero_addr = 1;
277 	u64 addr;
278 	int err;
279 
280 	page = alloc_pages_node(nid, GFP_HIGHUSER, 0);
281 	if (!page) {
282 		mlx5_core_warn(dev, "failed to allocate page\n");
283 		return -ENOMEM;
284 	}
285 map:
286 	addr = dma_map_page(device, page, 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
287 	if (dma_mapping_error(device, addr)) {
288 		mlx5_core_warn(dev, "failed dma mapping page\n");
289 		err = -ENOMEM;
290 		goto err_mapping;
291 	}
292 
293 	/* Firmware doesn't support page with physical address 0 */
294 	if (addr == 0) {
295 		zero_addr = addr;
296 		goto map;
297 	}
298 
299 	err = insert_page(dev, addr, page, function);
300 	if (err) {
301 		mlx5_core_err(dev, "failed to track allocated page\n");
302 		dma_unmap_page(device, addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
303 	}
304 
305 err_mapping:
306 	if (err)
307 		__free_page(page);
308 
309 	if (zero_addr == 0)
310 		dma_unmap_page(device, zero_addr, PAGE_SIZE,
311 			       DMA_BIDIRECTIONAL);
312 
313 	return err;
314 }
315 
page_notify_fail(struct mlx5_core_dev * dev,u16 func_id,bool ec_function)316 static void page_notify_fail(struct mlx5_core_dev *dev, u16 func_id,
317 			     bool ec_function)
318 {
319 	u32 in[MLX5_ST_SZ_DW(manage_pages_in)] = {};
320 	int err;
321 
322 	MLX5_SET(manage_pages_in, in, opcode, MLX5_CMD_OP_MANAGE_PAGES);
323 	MLX5_SET(manage_pages_in, in, op_mod, MLX5_PAGES_CANT_GIVE);
324 	MLX5_SET(manage_pages_in, in, function_id, func_id);
325 	MLX5_SET(manage_pages_in, in, embedded_cpu_function, ec_function);
326 
327 	err = mlx5_cmd_exec_in(dev, manage_pages, in);
328 	if (err)
329 		mlx5_core_warn(dev, "page notify failed func_id(%d) err(%d)\n",
330 			       func_id, err);
331 }
332 
give_pages(struct mlx5_core_dev * dev,u16 func_id,int npages,int notify_fail,bool ec_function)333 static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
334 		      int notify_fail, bool ec_function)
335 {
336 	u32 function = get_function(func_id, ec_function);
337 	u32 out[MLX5_ST_SZ_DW(manage_pages_out)] = {0};
338 	int inlen = MLX5_ST_SZ_BYTES(manage_pages_in);
339 	u64 addr;
340 	int err;
341 	u32 *in;
342 	int i;
343 
344 	inlen += npages * MLX5_FLD_SZ_BYTES(manage_pages_in, pas[0]);
345 	in = kvzalloc(inlen, GFP_KERNEL);
346 	if (!in) {
347 		err = -ENOMEM;
348 		mlx5_core_warn(dev, "vzalloc failed %d\n", inlen);
349 		goto out_free;
350 	}
351 
352 	for (i = 0; i < npages; i++) {
353 retry:
354 		err = alloc_4k(dev, &addr, function);
355 		if (err) {
356 			if (err == -ENOMEM)
357 				err = alloc_system_page(dev, function);
358 			if (err)
359 				goto out_4k;
360 
361 			goto retry;
362 		}
363 		MLX5_ARRAY_SET64(manage_pages_in, in, pas, i, addr);
364 	}
365 
366 	MLX5_SET(manage_pages_in, in, opcode, MLX5_CMD_OP_MANAGE_PAGES);
367 	MLX5_SET(manage_pages_in, in, op_mod, MLX5_PAGES_GIVE);
368 	MLX5_SET(manage_pages_in, in, function_id, func_id);
369 	MLX5_SET(manage_pages_in, in, input_num_entries, npages);
370 	MLX5_SET(manage_pages_in, in, embedded_cpu_function, ec_function);
371 
372 	err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
373 	if (err) {
374 		mlx5_core_warn(dev, "func_id 0x%x, npages %d, err %d\n",
375 			       func_id, npages, err);
376 		goto out_4k;
377 	}
378 
379 	dev->priv.fw_pages += npages;
380 	if (func_id)
381 		dev->priv.vfs_pages += npages;
382 	else if (mlx5_core_is_ecpf(dev) && !ec_function)
383 		dev->priv.peer_pf_pages += npages;
384 
385 	mlx5_core_dbg(dev, "npages %d, ec_function %d, func_id 0x%x, err %d\n",
386 		      npages, ec_function, func_id, err);
387 
388 	kvfree(in);
389 	return 0;
390 
391 out_4k:
392 	for (i--; i >= 0; i--)
393 		free_4k(dev, MLX5_GET64(manage_pages_in, in, pas[i]), function);
394 out_free:
395 	kvfree(in);
396 	if (notify_fail)
397 		page_notify_fail(dev, func_id, ec_function);
398 	return err;
399 }
400 
release_all_pages(struct mlx5_core_dev * dev,u16 func_id,bool ec_function)401 static void release_all_pages(struct mlx5_core_dev *dev, u16 func_id,
402 			      bool ec_function)
403 {
404 	u32 function = get_function(func_id, ec_function);
405 	struct rb_root *root;
406 	struct rb_node *p;
407 	int npages = 0;
408 
409 	root = xa_load(&dev->priv.page_root_xa, function);
410 	if (WARN_ON_ONCE(!root))
411 		return;
412 
413 	p = rb_first(root);
414 	while (p) {
415 		struct fw_page *fwp = rb_entry(p, struct fw_page, rb_node);
416 
417 		p = rb_next(p);
418 		npages += (MLX5_NUM_4K_IN_PAGE - fwp->free_count);
419 		free_fwp(dev, fwp, fwp->free_count);
420 	}
421 
422 	dev->priv.fw_pages -= npages;
423 	if (func_id)
424 		dev->priv.vfs_pages -= npages;
425 	else if (mlx5_core_is_ecpf(dev) && !ec_function)
426 		dev->priv.peer_pf_pages -= npages;
427 
428 	mlx5_core_dbg(dev, "npages %d, ec_function %d, func_id 0x%x\n",
429 		      npages, ec_function, func_id);
430 }
431 
fwp_fill_manage_pages_out(struct fw_page * fwp,u32 * out,u32 index,u32 npages)432 static u32 fwp_fill_manage_pages_out(struct fw_page *fwp, u32 *out, u32 index,
433 				     u32 npages)
434 {
435 	u32 pages_set = 0;
436 	unsigned int n;
437 
438 	for_each_clear_bit(n, &fwp->bitmask, MLX5_NUM_4K_IN_PAGE) {
439 		MLX5_ARRAY_SET64(manage_pages_out, out, pas, index + pages_set,
440 				 fwp->addr + (n * MLX5_ADAPTER_PAGE_SIZE));
441 		pages_set++;
442 
443 		if (!--npages)
444 			break;
445 	}
446 
447 	return pages_set;
448 }
449 
reclaim_pages_cmd(struct mlx5_core_dev * dev,u32 * in,int in_size,u32 * out,int out_size)450 static int reclaim_pages_cmd(struct mlx5_core_dev *dev,
451 			     u32 *in, int in_size, u32 *out, int out_size)
452 {
453 	struct rb_root *root;
454 	struct fw_page *fwp;
455 	struct rb_node *p;
456 	bool ec_function;
457 	u32 func_id;
458 	u32 npages;
459 	u32 i = 0;
460 
461 	if (!mlx5_cmd_is_down(dev))
462 		return mlx5_cmd_exec(dev, in, in_size, out, out_size);
463 
464 	/* No hard feelings, we want our pages back! */
465 	npages = MLX5_GET(manage_pages_in, in, input_num_entries);
466 	func_id = MLX5_GET(manage_pages_in, in, function_id);
467 	ec_function = MLX5_GET(manage_pages_in, in, embedded_cpu_function);
468 
469 	root = xa_load(&dev->priv.page_root_xa, get_function(func_id, ec_function));
470 	if (WARN_ON_ONCE(!root))
471 		return -EEXIST;
472 
473 	p = rb_first(root);
474 	while (p && i < npages) {
475 		fwp = rb_entry(p, struct fw_page, rb_node);
476 		p = rb_next(p);
477 
478 		i += fwp_fill_manage_pages_out(fwp, out, i, npages - i);
479 	}
480 
481 	MLX5_SET(manage_pages_out, out, output_num_entries, i);
482 	return 0;
483 }
484 
reclaim_pages(struct mlx5_core_dev * dev,u16 func_id,int npages,int * nclaimed,bool ec_function)485 static int reclaim_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
486 			 int *nclaimed, bool ec_function)
487 {
488 	u32 function = get_function(func_id, ec_function);
489 	int outlen = MLX5_ST_SZ_BYTES(manage_pages_out);
490 	u32 in[MLX5_ST_SZ_DW(manage_pages_in)] = {};
491 	int num_claimed;
492 	u32 *out;
493 	int err;
494 	int i;
495 
496 	if (nclaimed)
497 		*nclaimed = 0;
498 
499 	outlen += npages * MLX5_FLD_SZ_BYTES(manage_pages_out, pas[0]);
500 	out = kvzalloc(outlen, GFP_KERNEL);
501 	if (!out)
502 		return -ENOMEM;
503 
504 	MLX5_SET(manage_pages_in, in, opcode, MLX5_CMD_OP_MANAGE_PAGES);
505 	MLX5_SET(manage_pages_in, in, op_mod, MLX5_PAGES_TAKE);
506 	MLX5_SET(manage_pages_in, in, function_id, func_id);
507 	MLX5_SET(manage_pages_in, in, input_num_entries, npages);
508 	MLX5_SET(manage_pages_in, in, embedded_cpu_function, ec_function);
509 
510 	mlx5_core_dbg(dev, "func 0x%x, npages %d, outlen %d\n",
511 		      func_id, npages, outlen);
512 	err = reclaim_pages_cmd(dev, in, sizeof(in), out, outlen);
513 	if (err) {
514 		mlx5_core_err(dev, "failed reclaiming pages: err %d\n", err);
515 		goto out_free;
516 	}
517 
518 	num_claimed = MLX5_GET(manage_pages_out, out, output_num_entries);
519 	if (num_claimed > npages) {
520 		mlx5_core_warn(dev, "fw returned %d, driver asked %d => corruption\n",
521 			       num_claimed, npages);
522 		err = -EINVAL;
523 		goto out_free;
524 	}
525 
526 	for (i = 0; i < num_claimed; i++)
527 		free_4k(dev, MLX5_GET64(manage_pages_out, out, pas[i]), function);
528 
529 	if (nclaimed)
530 		*nclaimed = num_claimed;
531 
532 	dev->priv.fw_pages -= num_claimed;
533 	if (func_id)
534 		dev->priv.vfs_pages -= num_claimed;
535 	else if (mlx5_core_is_ecpf(dev) && !ec_function)
536 		dev->priv.peer_pf_pages -= num_claimed;
537 
538 out_free:
539 	kvfree(out);
540 	return err;
541 }
542 
pages_work_handler(struct work_struct * work)543 static void pages_work_handler(struct work_struct *work)
544 {
545 	struct mlx5_pages_req *req = container_of(work, struct mlx5_pages_req, work);
546 	struct mlx5_core_dev *dev = req->dev;
547 	int err = 0;
548 
549 	if (req->release_all)
550 		release_all_pages(dev, req->func_id, req->ec_function);
551 	else if (req->npages < 0)
552 		err = reclaim_pages(dev, req->func_id, -1 * req->npages, NULL,
553 				    req->ec_function);
554 	else if (req->npages > 0)
555 		err = give_pages(dev, req->func_id, req->npages, 1, req->ec_function);
556 
557 	if (err)
558 		mlx5_core_warn(dev, "%s fail %d\n",
559 			       req->npages < 0 ? "reclaim" : "give", err);
560 
561 	kfree(req);
562 }
563 
564 enum {
565 	EC_FUNCTION_MASK = 0x8000,
566 	RELEASE_ALL_PAGES_MASK = 0x4000,
567 };
568 
req_pages_handler(struct notifier_block * nb,unsigned long type,void * data)569 static int req_pages_handler(struct notifier_block *nb,
570 			     unsigned long type, void *data)
571 {
572 	struct mlx5_pages_req *req;
573 	struct mlx5_core_dev *dev;
574 	struct mlx5_priv *priv;
575 	struct mlx5_eqe *eqe;
576 	bool ec_function;
577 	bool release_all;
578 	u16 func_id;
579 	s32 npages;
580 
581 	priv = mlx5_nb_cof(nb, struct mlx5_priv, pg_nb);
582 	dev  = container_of(priv, struct mlx5_core_dev, priv);
583 	eqe  = data;
584 
585 	func_id = be16_to_cpu(eqe->data.req_pages.func_id);
586 	npages  = be32_to_cpu(eqe->data.req_pages.num_pages);
587 	ec_function = be16_to_cpu(eqe->data.req_pages.ec_function) & EC_FUNCTION_MASK;
588 	release_all = be16_to_cpu(eqe->data.req_pages.ec_function) &
589 		      RELEASE_ALL_PAGES_MASK;
590 	mlx5_core_dbg(dev, "page request for func 0x%x, npages %d, release_all %d\n",
591 		      func_id, npages, release_all);
592 	req = kzalloc(sizeof(*req), GFP_ATOMIC);
593 	if (!req) {
594 		mlx5_core_warn(dev, "failed to allocate pages request\n");
595 		return NOTIFY_DONE;
596 	}
597 
598 	req->dev = dev;
599 	req->func_id = func_id;
600 	req->npages = npages;
601 	req->ec_function = ec_function;
602 	req->release_all = release_all;
603 	INIT_WORK(&req->work, pages_work_handler);
604 	queue_work(dev->priv.pg_wq, &req->work);
605 	return NOTIFY_OK;
606 }
607 
mlx5_satisfy_startup_pages(struct mlx5_core_dev * dev,int boot)608 int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot)
609 {
610 	u16 func_id;
611 	s32 npages;
612 	int err;
613 
614 	err = mlx5_cmd_query_pages(dev, &func_id, &npages, boot);
615 	if (err)
616 		return err;
617 
618 	mlx5_core_dbg(dev, "requested %d %s pages for func_id 0x%x\n",
619 		      npages, boot ? "boot" : "init", func_id);
620 
621 	return give_pages(dev, func_id, npages, 0, mlx5_core_is_ecpf(dev));
622 }
623 
624 enum {
625 	MLX5_BLKS_FOR_RECLAIM_PAGES = 12
626 };
627 
optimal_reclaimed_pages(void)628 static int optimal_reclaimed_pages(void)
629 {
630 	struct mlx5_cmd_prot_block *block;
631 	struct mlx5_cmd_layout *lay;
632 	int ret;
633 
634 	ret = (sizeof(lay->out) + MLX5_BLKS_FOR_RECLAIM_PAGES * sizeof(block->data) -
635 	       MLX5_ST_SZ_BYTES(manage_pages_out)) /
636 	       MLX5_FLD_SZ_BYTES(manage_pages_out, pas[0]);
637 
638 	return ret;
639 }
640 
mlx5_reclaim_root_pages(struct mlx5_core_dev * dev,struct rb_root * root,u16 func_id)641 static int mlx5_reclaim_root_pages(struct mlx5_core_dev *dev,
642 				   struct rb_root *root, u16 func_id)
643 {
644 	unsigned long end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS);
645 
646 	while (!RB_EMPTY_ROOT(root)) {
647 		int nclaimed;
648 		int err;
649 
650 		err = reclaim_pages(dev, func_id, optimal_reclaimed_pages(),
651 				    &nclaimed, mlx5_core_is_ecpf(dev));
652 		if (err) {
653 			mlx5_core_warn(dev, "failed reclaiming pages (%d) for func id 0x%x\n",
654 				       err, func_id);
655 			return err;
656 		}
657 
658 		if (nclaimed)
659 			end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS);
660 
661 		if (time_after(jiffies, end)) {
662 			mlx5_core_warn(dev, "FW did not return all pages. giving up...\n");
663 			break;
664 		}
665 	}
666 
667 	return 0;
668 }
669 
mlx5_reclaim_startup_pages(struct mlx5_core_dev * dev)670 int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev)
671 {
672 	struct rb_root *root;
673 	unsigned long id;
674 	void *entry;
675 
676 	xa_for_each(&dev->priv.page_root_xa, id, entry) {
677 		root = entry;
678 		mlx5_reclaim_root_pages(dev, root, id);
679 		xa_erase(&dev->priv.page_root_xa, id);
680 		kfree(root);
681 	}
682 
683 	WARN_ON(!xa_empty(&dev->priv.page_root_xa));
684 
685 	WARN(dev->priv.fw_pages,
686 	     "FW pages counter is %d after reclaiming all pages\n",
687 	     dev->priv.fw_pages);
688 	WARN(dev->priv.vfs_pages,
689 	     "VFs FW pages counter is %d after reclaiming all pages\n",
690 	     dev->priv.vfs_pages);
691 	WARN(dev->priv.peer_pf_pages,
692 	     "Peer PF FW pages counter is %d after reclaiming all pages\n",
693 	     dev->priv.peer_pf_pages);
694 
695 	return 0;
696 }
697 
mlx5_pagealloc_init(struct mlx5_core_dev * dev)698 int mlx5_pagealloc_init(struct mlx5_core_dev *dev)
699 {
700 	INIT_LIST_HEAD(&dev->priv.free_list);
701 	dev->priv.pg_wq = create_singlethread_workqueue("mlx5_page_allocator");
702 	if (!dev->priv.pg_wq)
703 		return -ENOMEM;
704 
705 	xa_init(&dev->priv.page_root_xa);
706 
707 	return 0;
708 }
709 
mlx5_pagealloc_cleanup(struct mlx5_core_dev * dev)710 void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev)
711 {
712 	xa_destroy(&dev->priv.page_root_xa);
713 	destroy_workqueue(dev->priv.pg_wq);
714 }
715 
mlx5_pagealloc_start(struct mlx5_core_dev * dev)716 void mlx5_pagealloc_start(struct mlx5_core_dev *dev)
717 {
718 	MLX5_NB_INIT(&dev->priv.pg_nb, req_pages_handler, PAGE_REQUEST);
719 	mlx5_eq_notifier_register(dev, &dev->priv.pg_nb);
720 }
721 
mlx5_pagealloc_stop(struct mlx5_core_dev * dev)722 void mlx5_pagealloc_stop(struct mlx5_core_dev *dev)
723 {
724 	mlx5_eq_notifier_unregister(dev, &dev->priv.pg_nb);
725 	flush_workqueue(dev->priv.pg_wq);
726 }
727 
mlx5_wait_for_pages(struct mlx5_core_dev * dev,int * pages)728 int mlx5_wait_for_pages(struct mlx5_core_dev *dev, int *pages)
729 {
730 	unsigned long end = jiffies + msecs_to_jiffies(MAX_RECLAIM_VFS_PAGES_TIME_MSECS);
731 	int prev_pages = *pages;
732 
733 	/* In case of internal error we will free the pages manually later */
734 	if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
735 		mlx5_core_warn(dev, "Skipping wait for vf pages stage");
736 		return 0;
737 	}
738 
739 	mlx5_core_dbg(dev, "Waiting for %d pages\n", prev_pages);
740 	while (*pages) {
741 		if (time_after(jiffies, end)) {
742 			mlx5_core_warn(dev, "aborting while there are %d pending pages\n", *pages);
743 			return -ETIMEDOUT;
744 		}
745 		if (*pages < prev_pages) {
746 			end = jiffies + msecs_to_jiffies(MAX_RECLAIM_VFS_PAGES_TIME_MSECS);
747 			prev_pages = *pages;
748 		}
749 		msleep(50);
750 	}
751 
752 	mlx5_core_dbg(dev, "All pages received\n");
753 	return 0;
754 }
755