1 /*
2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #include <linux/highmem.h>
34 #include <linux/kernel.h>
35 #include <linux/module.h>
36 #include <linux/delay.h>
37 #include <linux/mlx5/driver.h>
38 #include <linux/xarray.h>
39 #include "mlx5_core.h"
40 #include "lib/eq.h"
41
42 enum {
43 MLX5_PAGES_CANT_GIVE = 0,
44 MLX5_PAGES_GIVE = 1,
45 MLX5_PAGES_TAKE = 2
46 };
47
48 struct mlx5_pages_req {
49 struct mlx5_core_dev *dev;
50 u16 func_id;
51 u8 ec_function;
52 s32 npages;
53 struct work_struct work;
54 u8 release_all;
55 };
56
57 struct fw_page {
58 struct rb_node rb_node;
59 u64 addr;
60 struct page *page;
61 u32 function;
62 unsigned long bitmask;
63 struct list_head list;
64 unsigned free_count;
65 };
66
67 enum {
68 MAX_RECLAIM_TIME_MSECS = 5000,
69 MAX_RECLAIM_VFS_PAGES_TIME_MSECS = 2 * 1000 * 60,
70 };
71
72 enum {
73 MLX5_MAX_RECLAIM_TIME_MILI = 5000,
74 MLX5_NUM_4K_IN_PAGE = PAGE_SIZE / MLX5_ADAPTER_PAGE_SIZE,
75 };
76
get_function(u16 func_id,bool ec_function)77 static u32 get_function(u16 func_id, bool ec_function)
78 {
79 return (u32)func_id | (ec_function << 16);
80 }
81
page_root_per_function(struct mlx5_core_dev * dev,u32 function)82 static struct rb_root *page_root_per_function(struct mlx5_core_dev *dev, u32 function)
83 {
84 struct rb_root *root;
85 int err;
86
87 root = xa_load(&dev->priv.page_root_xa, function);
88 if (root)
89 return root;
90
91 root = kzalloc(sizeof(*root), GFP_KERNEL);
92 if (!root)
93 return ERR_PTR(-ENOMEM);
94
95 err = xa_insert(&dev->priv.page_root_xa, function, root, GFP_KERNEL);
96 if (err) {
97 kfree(root);
98 return ERR_PTR(err);
99 }
100
101 *root = RB_ROOT;
102
103 return root;
104 }
105
insert_page(struct mlx5_core_dev * dev,u64 addr,struct page * page,u32 function)106 static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u32 function)
107 {
108 struct rb_node *parent = NULL;
109 struct rb_root *root;
110 struct rb_node **new;
111 struct fw_page *nfp;
112 struct fw_page *tfp;
113 int i;
114
115 root = page_root_per_function(dev, function);
116 if (IS_ERR(root))
117 return PTR_ERR(root);
118
119 new = &root->rb_node;
120
121 while (*new) {
122 parent = *new;
123 tfp = rb_entry(parent, struct fw_page, rb_node);
124 if (tfp->addr < addr)
125 new = &parent->rb_left;
126 else if (tfp->addr > addr)
127 new = &parent->rb_right;
128 else
129 return -EEXIST;
130 }
131
132 nfp = kzalloc(sizeof(*nfp), GFP_KERNEL);
133 if (!nfp)
134 return -ENOMEM;
135
136 nfp->addr = addr;
137 nfp->page = page;
138 nfp->function = function;
139 nfp->free_count = MLX5_NUM_4K_IN_PAGE;
140 for (i = 0; i < MLX5_NUM_4K_IN_PAGE; i++)
141 set_bit(i, &nfp->bitmask);
142
143 rb_link_node(&nfp->rb_node, parent, new);
144 rb_insert_color(&nfp->rb_node, root);
145 list_add(&nfp->list, &dev->priv.free_list);
146
147 return 0;
148 }
149
find_fw_page(struct mlx5_core_dev * dev,u64 addr,u32 function)150 static struct fw_page *find_fw_page(struct mlx5_core_dev *dev, u64 addr,
151 u32 function)
152 {
153 struct fw_page *result = NULL;
154 struct rb_root *root;
155 struct rb_node *tmp;
156 struct fw_page *tfp;
157
158 root = xa_load(&dev->priv.page_root_xa, function);
159 if (WARN_ON_ONCE(!root))
160 return NULL;
161
162 tmp = root->rb_node;
163
164 while (tmp) {
165 tfp = rb_entry(tmp, struct fw_page, rb_node);
166 if (tfp->addr < addr) {
167 tmp = tmp->rb_left;
168 } else if (tfp->addr > addr) {
169 tmp = tmp->rb_right;
170 } else {
171 result = tfp;
172 break;
173 }
174 }
175
176 return result;
177 }
178
mlx5_cmd_query_pages(struct mlx5_core_dev * dev,u16 * func_id,s32 * npages,int boot)179 static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id,
180 s32 *npages, int boot)
181 {
182 u32 out[MLX5_ST_SZ_DW(query_pages_out)] = {};
183 u32 in[MLX5_ST_SZ_DW(query_pages_in)] = {};
184 int err;
185
186 MLX5_SET(query_pages_in, in, opcode, MLX5_CMD_OP_QUERY_PAGES);
187 MLX5_SET(query_pages_in, in, op_mod, boot ?
188 MLX5_QUERY_PAGES_IN_OP_MOD_BOOT_PAGES :
189 MLX5_QUERY_PAGES_IN_OP_MOD_INIT_PAGES);
190 MLX5_SET(query_pages_in, in, embedded_cpu_function, mlx5_core_is_ecpf(dev));
191
192 err = mlx5_cmd_exec_inout(dev, query_pages, in, out);
193 if (err)
194 return err;
195
196 *npages = MLX5_GET(query_pages_out, out, num_pages);
197 *func_id = MLX5_GET(query_pages_out, out, function_id);
198
199 return err;
200 }
201
alloc_4k(struct mlx5_core_dev * dev,u64 * addr,u32 function)202 static int alloc_4k(struct mlx5_core_dev *dev, u64 *addr, u32 function)
203 {
204 struct fw_page *fp = NULL;
205 struct fw_page *iter;
206 unsigned n;
207
208 list_for_each_entry(iter, &dev->priv.free_list, list) {
209 if (iter->function != function)
210 continue;
211 fp = iter;
212 }
213
214 if (list_empty(&dev->priv.free_list) || !fp)
215 return -ENOMEM;
216
217 n = find_first_bit(&fp->bitmask, 8 * sizeof(fp->bitmask));
218 if (n >= MLX5_NUM_4K_IN_PAGE) {
219 mlx5_core_warn(dev, "alloc 4k bug: fw page = 0x%llx, n = %u, bitmask: %lu, max num of 4K pages: %d\n",
220 fp->addr, n, fp->bitmask, MLX5_NUM_4K_IN_PAGE);
221 return -ENOENT;
222 }
223 clear_bit(n, &fp->bitmask);
224 fp->free_count--;
225 if (!fp->free_count)
226 list_del(&fp->list);
227
228 *addr = fp->addr + n * MLX5_ADAPTER_PAGE_SIZE;
229
230 return 0;
231 }
232
233 #define MLX5_U64_4K_PAGE_MASK ((~(u64)0U) << PAGE_SHIFT)
234
free_fwp(struct mlx5_core_dev * dev,struct fw_page * fwp,bool in_free_list)235 static void free_fwp(struct mlx5_core_dev *dev, struct fw_page *fwp,
236 bool in_free_list)
237 {
238 struct rb_root *root;
239
240 root = xa_load(&dev->priv.page_root_xa, fwp->function);
241 if (WARN_ON_ONCE(!root))
242 return;
243
244 rb_erase(&fwp->rb_node, root);
245 if (in_free_list)
246 list_del(&fwp->list);
247 dma_unmap_page(mlx5_core_dma_dev(dev), fwp->addr & MLX5_U64_4K_PAGE_MASK,
248 PAGE_SIZE, DMA_BIDIRECTIONAL);
249 __free_page(fwp->page);
250 kfree(fwp);
251 }
252
free_4k(struct mlx5_core_dev * dev,u64 addr,u32 function)253 static void free_4k(struct mlx5_core_dev *dev, u64 addr, u32 function)
254 {
255 struct fw_page *fwp;
256 int n;
257
258 fwp = find_fw_page(dev, addr & MLX5_U64_4K_PAGE_MASK, function);
259 if (!fwp) {
260 mlx5_core_warn_rl(dev, "page not found\n");
261 return;
262 }
263 n = (addr & ~MLX5_U64_4K_PAGE_MASK) >> MLX5_ADAPTER_PAGE_SHIFT;
264 fwp->free_count++;
265 set_bit(n, &fwp->bitmask);
266 if (fwp->free_count == MLX5_NUM_4K_IN_PAGE)
267 free_fwp(dev, fwp, fwp->free_count != 1);
268 else if (fwp->free_count == 1)
269 list_add(&fwp->list, &dev->priv.free_list);
270 }
271
alloc_system_page(struct mlx5_core_dev * dev,u32 function)272 static int alloc_system_page(struct mlx5_core_dev *dev, u32 function)
273 {
274 struct device *device = mlx5_core_dma_dev(dev);
275 int nid = dev_to_node(device);
276 struct page *page;
277 u64 zero_addr = 1;
278 u64 addr;
279 int err;
280
281 page = alloc_pages_node(nid, GFP_HIGHUSER, 0);
282 if (!page) {
283 mlx5_core_warn(dev, "failed to allocate page\n");
284 return -ENOMEM;
285 }
286 map:
287 addr = dma_map_page(device, page, 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
288 if (dma_mapping_error(device, addr)) {
289 mlx5_core_warn(dev, "failed dma mapping page\n");
290 err = -ENOMEM;
291 goto err_mapping;
292 }
293
294 /* Firmware doesn't support page with physical address 0 */
295 if (addr == 0) {
296 zero_addr = addr;
297 goto map;
298 }
299
300 err = insert_page(dev, addr, page, function);
301 if (err) {
302 mlx5_core_err(dev, "failed to track allocated page\n");
303 dma_unmap_page(device, addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
304 }
305
306 err_mapping:
307 if (err)
308 __free_page(page);
309
310 if (zero_addr == 0)
311 dma_unmap_page(device, zero_addr, PAGE_SIZE,
312 DMA_BIDIRECTIONAL);
313
314 return err;
315 }
316
page_notify_fail(struct mlx5_core_dev * dev,u16 func_id,bool ec_function)317 static void page_notify_fail(struct mlx5_core_dev *dev, u16 func_id,
318 bool ec_function)
319 {
320 u32 in[MLX5_ST_SZ_DW(manage_pages_in)] = {};
321 int err;
322
323 MLX5_SET(manage_pages_in, in, opcode, MLX5_CMD_OP_MANAGE_PAGES);
324 MLX5_SET(manage_pages_in, in, op_mod, MLX5_PAGES_CANT_GIVE);
325 MLX5_SET(manage_pages_in, in, function_id, func_id);
326 MLX5_SET(manage_pages_in, in, embedded_cpu_function, ec_function);
327
328 err = mlx5_cmd_exec_in(dev, manage_pages, in);
329 if (err)
330 mlx5_core_warn(dev, "page notify failed func_id(%d) err(%d)\n",
331 func_id, err);
332 }
333
give_pages(struct mlx5_core_dev * dev,u16 func_id,int npages,int notify_fail,bool ec_function)334 static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
335 int notify_fail, bool ec_function)
336 {
337 u32 function = get_function(func_id, ec_function);
338 u32 out[MLX5_ST_SZ_DW(manage_pages_out)] = {0};
339 int inlen = MLX5_ST_SZ_BYTES(manage_pages_in);
340 u64 addr;
341 int err;
342 u32 *in;
343 int i;
344
345 inlen += npages * MLX5_FLD_SZ_BYTES(manage_pages_in, pas[0]);
346 in = kvzalloc(inlen, GFP_KERNEL);
347 if (!in) {
348 err = -ENOMEM;
349 mlx5_core_warn(dev, "vzalloc failed %d\n", inlen);
350 goto out_free;
351 }
352
353 for (i = 0; i < npages; i++) {
354 retry:
355 err = alloc_4k(dev, &addr, function);
356 if (err) {
357 if (err == -ENOMEM)
358 err = alloc_system_page(dev, function);
359 if (err)
360 goto out_4k;
361
362 goto retry;
363 }
364 MLX5_ARRAY_SET64(manage_pages_in, in, pas, i, addr);
365 }
366
367 MLX5_SET(manage_pages_in, in, opcode, MLX5_CMD_OP_MANAGE_PAGES);
368 MLX5_SET(manage_pages_in, in, op_mod, MLX5_PAGES_GIVE);
369 MLX5_SET(manage_pages_in, in, function_id, func_id);
370 MLX5_SET(manage_pages_in, in, input_num_entries, npages);
371 MLX5_SET(manage_pages_in, in, embedded_cpu_function, ec_function);
372
373 err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
374 if (err) {
375 mlx5_core_warn(dev, "func_id 0x%x, npages %d, err %d\n",
376 func_id, npages, err);
377 goto out_4k;
378 }
379
380 dev->priv.fw_pages += npages;
381 if (func_id)
382 dev->priv.vfs_pages += npages;
383 else if (mlx5_core_is_ecpf(dev) && !ec_function)
384 dev->priv.peer_pf_pages += npages;
385
386 mlx5_core_dbg(dev, "npages %d, ec_function %d, func_id 0x%x, err %d\n",
387 npages, ec_function, func_id, err);
388
389 kvfree(in);
390 return 0;
391
392 out_4k:
393 for (i--; i >= 0; i--)
394 free_4k(dev, MLX5_GET64(manage_pages_in, in, pas[i]), function);
395 out_free:
396 kvfree(in);
397 if (notify_fail)
398 page_notify_fail(dev, func_id, ec_function);
399 return err;
400 }
401
release_all_pages(struct mlx5_core_dev * dev,u16 func_id,bool ec_function)402 static void release_all_pages(struct mlx5_core_dev *dev, u16 func_id,
403 bool ec_function)
404 {
405 u32 function = get_function(func_id, ec_function);
406 struct rb_root *root;
407 struct rb_node *p;
408 int npages = 0;
409
410 root = xa_load(&dev->priv.page_root_xa, function);
411 if (WARN_ON_ONCE(!root))
412 return;
413
414 p = rb_first(root);
415 while (p) {
416 struct fw_page *fwp = rb_entry(p, struct fw_page, rb_node);
417
418 p = rb_next(p);
419 npages += (MLX5_NUM_4K_IN_PAGE - fwp->free_count);
420 free_fwp(dev, fwp, fwp->free_count);
421 }
422
423 dev->priv.fw_pages -= npages;
424 if (func_id)
425 dev->priv.vfs_pages -= npages;
426 else if (mlx5_core_is_ecpf(dev) && !ec_function)
427 dev->priv.peer_pf_pages -= npages;
428
429 mlx5_core_dbg(dev, "npages %d, ec_function %d, func_id 0x%x\n",
430 npages, ec_function, func_id);
431 }
432
fwp_fill_manage_pages_out(struct fw_page * fwp,u32 * out,u32 index,u32 npages)433 static u32 fwp_fill_manage_pages_out(struct fw_page *fwp, u32 *out, u32 index,
434 u32 npages)
435 {
436 u32 pages_set = 0;
437 unsigned int n;
438
439 for_each_clear_bit(n, &fwp->bitmask, MLX5_NUM_4K_IN_PAGE) {
440 MLX5_ARRAY_SET64(manage_pages_out, out, pas, index + pages_set,
441 fwp->addr + (n * MLX5_ADAPTER_PAGE_SIZE));
442 pages_set++;
443
444 if (!--npages)
445 break;
446 }
447
448 return pages_set;
449 }
450
reclaim_pages_cmd(struct mlx5_core_dev * dev,u32 * in,int in_size,u32 * out,int out_size)451 static int reclaim_pages_cmd(struct mlx5_core_dev *dev,
452 u32 *in, int in_size, u32 *out, int out_size)
453 {
454 struct rb_root *root;
455 struct fw_page *fwp;
456 struct rb_node *p;
457 bool ec_function;
458 u32 func_id;
459 u32 npages;
460 u32 i = 0;
461
462 if (!mlx5_cmd_is_down(dev))
463 return mlx5_cmd_exec(dev, in, in_size, out, out_size);
464
465 /* No hard feelings, we want our pages back! */
466 npages = MLX5_GET(manage_pages_in, in, input_num_entries);
467 func_id = MLX5_GET(manage_pages_in, in, function_id);
468 ec_function = MLX5_GET(manage_pages_in, in, embedded_cpu_function);
469
470 root = xa_load(&dev->priv.page_root_xa, get_function(func_id, ec_function));
471 if (WARN_ON_ONCE(!root))
472 return -EEXIST;
473
474 p = rb_first(root);
475 while (p && i < npages) {
476 fwp = rb_entry(p, struct fw_page, rb_node);
477 p = rb_next(p);
478
479 i += fwp_fill_manage_pages_out(fwp, out, i, npages - i);
480 }
481
482 MLX5_SET(manage_pages_out, out, output_num_entries, i);
483 return 0;
484 }
485
reclaim_pages(struct mlx5_core_dev * dev,u16 func_id,int npages,int * nclaimed,bool ec_function)486 static int reclaim_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
487 int *nclaimed, bool ec_function)
488 {
489 u32 function = get_function(func_id, ec_function);
490 int outlen = MLX5_ST_SZ_BYTES(manage_pages_out);
491 u32 in[MLX5_ST_SZ_DW(manage_pages_in)] = {};
492 int num_claimed;
493 u32 *out;
494 int err;
495 int i;
496
497 if (nclaimed)
498 *nclaimed = 0;
499
500 outlen += npages * MLX5_FLD_SZ_BYTES(manage_pages_out, pas[0]);
501 out = kvzalloc(outlen, GFP_KERNEL);
502 if (!out)
503 return -ENOMEM;
504
505 MLX5_SET(manage_pages_in, in, opcode, MLX5_CMD_OP_MANAGE_PAGES);
506 MLX5_SET(manage_pages_in, in, op_mod, MLX5_PAGES_TAKE);
507 MLX5_SET(manage_pages_in, in, function_id, func_id);
508 MLX5_SET(manage_pages_in, in, input_num_entries, npages);
509 MLX5_SET(manage_pages_in, in, embedded_cpu_function, ec_function);
510
511 mlx5_core_dbg(dev, "func 0x%x, npages %d, outlen %d\n",
512 func_id, npages, outlen);
513 err = reclaim_pages_cmd(dev, in, sizeof(in), out, outlen);
514 if (err) {
515 mlx5_core_err(dev, "failed reclaiming pages: err %d\n", err);
516 goto out_free;
517 }
518
519 num_claimed = MLX5_GET(manage_pages_out, out, output_num_entries);
520 if (num_claimed > npages) {
521 mlx5_core_warn(dev, "fw returned %d, driver asked %d => corruption\n",
522 num_claimed, npages);
523 err = -EINVAL;
524 goto out_free;
525 }
526
527 for (i = 0; i < num_claimed; i++)
528 free_4k(dev, MLX5_GET64(manage_pages_out, out, pas[i]), function);
529
530 if (nclaimed)
531 *nclaimed = num_claimed;
532
533 dev->priv.fw_pages -= num_claimed;
534 if (func_id)
535 dev->priv.vfs_pages -= num_claimed;
536 else if (mlx5_core_is_ecpf(dev) && !ec_function)
537 dev->priv.peer_pf_pages -= num_claimed;
538
539 out_free:
540 kvfree(out);
541 return err;
542 }
543
pages_work_handler(struct work_struct * work)544 static void pages_work_handler(struct work_struct *work)
545 {
546 struct mlx5_pages_req *req = container_of(work, struct mlx5_pages_req, work);
547 struct mlx5_core_dev *dev = req->dev;
548 int err = 0;
549
550 if (req->release_all)
551 release_all_pages(dev, req->func_id, req->ec_function);
552 else if (req->npages < 0)
553 err = reclaim_pages(dev, req->func_id, -1 * req->npages, NULL,
554 req->ec_function);
555 else if (req->npages > 0)
556 err = give_pages(dev, req->func_id, req->npages, 1, req->ec_function);
557
558 if (err)
559 mlx5_core_warn(dev, "%s fail %d\n",
560 req->npages < 0 ? "reclaim" : "give", err);
561
562 kfree(req);
563 }
564
565 enum {
566 EC_FUNCTION_MASK = 0x8000,
567 RELEASE_ALL_PAGES_MASK = 0x4000,
568 };
569
req_pages_handler(struct notifier_block * nb,unsigned long type,void * data)570 static int req_pages_handler(struct notifier_block *nb,
571 unsigned long type, void *data)
572 {
573 struct mlx5_pages_req *req;
574 struct mlx5_core_dev *dev;
575 struct mlx5_priv *priv;
576 struct mlx5_eqe *eqe;
577 bool ec_function;
578 bool release_all;
579 u16 func_id;
580 s32 npages;
581
582 priv = mlx5_nb_cof(nb, struct mlx5_priv, pg_nb);
583 dev = container_of(priv, struct mlx5_core_dev, priv);
584 eqe = data;
585
586 func_id = be16_to_cpu(eqe->data.req_pages.func_id);
587 npages = be32_to_cpu(eqe->data.req_pages.num_pages);
588 ec_function = be16_to_cpu(eqe->data.req_pages.ec_function) & EC_FUNCTION_MASK;
589 release_all = be16_to_cpu(eqe->data.req_pages.ec_function) &
590 RELEASE_ALL_PAGES_MASK;
591 mlx5_core_dbg(dev, "page request for func 0x%x, npages %d, release_all %d\n",
592 func_id, npages, release_all);
593 req = kzalloc(sizeof(*req), GFP_ATOMIC);
594 if (!req) {
595 mlx5_core_warn(dev, "failed to allocate pages request\n");
596 return NOTIFY_DONE;
597 }
598
599 req->dev = dev;
600 req->func_id = func_id;
601 req->npages = npages;
602 req->ec_function = ec_function;
603 req->release_all = release_all;
604 INIT_WORK(&req->work, pages_work_handler);
605 queue_work(dev->priv.pg_wq, &req->work);
606 return NOTIFY_OK;
607 }
608
mlx5_satisfy_startup_pages(struct mlx5_core_dev * dev,int boot)609 int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot)
610 {
611 u16 func_id;
612 s32 npages;
613 int err;
614
615 err = mlx5_cmd_query_pages(dev, &func_id, &npages, boot);
616 if (err)
617 return err;
618
619 mlx5_core_dbg(dev, "requested %d %s pages for func_id 0x%x\n",
620 npages, boot ? "boot" : "init", func_id);
621
622 return give_pages(dev, func_id, npages, 0, mlx5_core_is_ecpf(dev));
623 }
624
625 enum {
626 MLX5_BLKS_FOR_RECLAIM_PAGES = 12
627 };
628
optimal_reclaimed_pages(void)629 static int optimal_reclaimed_pages(void)
630 {
631 struct mlx5_cmd_prot_block *block;
632 struct mlx5_cmd_layout *lay;
633 int ret;
634
635 ret = (sizeof(lay->out) + MLX5_BLKS_FOR_RECLAIM_PAGES * sizeof(block->data) -
636 MLX5_ST_SZ_BYTES(manage_pages_out)) /
637 MLX5_FLD_SZ_BYTES(manage_pages_out, pas[0]);
638
639 return ret;
640 }
641
mlx5_reclaim_root_pages(struct mlx5_core_dev * dev,struct rb_root * root,u16 func_id)642 static int mlx5_reclaim_root_pages(struct mlx5_core_dev *dev,
643 struct rb_root *root, u16 func_id)
644 {
645 unsigned long end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS);
646
647 while (!RB_EMPTY_ROOT(root)) {
648 int nclaimed;
649 int err;
650
651 err = reclaim_pages(dev, func_id, optimal_reclaimed_pages(),
652 &nclaimed, mlx5_core_is_ecpf(dev));
653 if (err) {
654 mlx5_core_warn(dev, "failed reclaiming pages (%d) for func id 0x%x\n",
655 err, func_id);
656 return err;
657 }
658
659 if (nclaimed)
660 end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS);
661
662 if (time_after(jiffies, end)) {
663 mlx5_core_warn(dev, "FW did not return all pages. giving up...\n");
664 break;
665 }
666 }
667
668 return 0;
669 }
670
mlx5_reclaim_startup_pages(struct mlx5_core_dev * dev)671 int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev)
672 {
673 struct rb_root *root;
674 unsigned long id;
675 void *entry;
676
677 xa_for_each(&dev->priv.page_root_xa, id, entry) {
678 root = entry;
679 mlx5_reclaim_root_pages(dev, root, id);
680 xa_erase(&dev->priv.page_root_xa, id);
681 kfree(root);
682 }
683
684 WARN_ON(!xa_empty(&dev->priv.page_root_xa));
685
686 WARN(dev->priv.fw_pages,
687 "FW pages counter is %d after reclaiming all pages\n",
688 dev->priv.fw_pages);
689 WARN(dev->priv.vfs_pages,
690 "VFs FW pages counter is %d after reclaiming all pages\n",
691 dev->priv.vfs_pages);
692 WARN(dev->priv.peer_pf_pages,
693 "Peer PF FW pages counter is %d after reclaiming all pages\n",
694 dev->priv.peer_pf_pages);
695
696 return 0;
697 }
698
mlx5_pagealloc_init(struct mlx5_core_dev * dev)699 int mlx5_pagealloc_init(struct mlx5_core_dev *dev)
700 {
701 INIT_LIST_HEAD(&dev->priv.free_list);
702 dev->priv.pg_wq = create_singlethread_workqueue("mlx5_page_allocator");
703 if (!dev->priv.pg_wq)
704 return -ENOMEM;
705
706 xa_init(&dev->priv.page_root_xa);
707
708 return 0;
709 }
710
mlx5_pagealloc_cleanup(struct mlx5_core_dev * dev)711 void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev)
712 {
713 xa_destroy(&dev->priv.page_root_xa);
714 destroy_workqueue(dev->priv.pg_wq);
715 }
716
mlx5_pagealloc_start(struct mlx5_core_dev * dev)717 void mlx5_pagealloc_start(struct mlx5_core_dev *dev)
718 {
719 MLX5_NB_INIT(&dev->priv.pg_nb, req_pages_handler, PAGE_REQUEST);
720 mlx5_eq_notifier_register(dev, &dev->priv.pg_nb);
721 }
722
mlx5_pagealloc_stop(struct mlx5_core_dev * dev)723 void mlx5_pagealloc_stop(struct mlx5_core_dev *dev)
724 {
725 mlx5_eq_notifier_unregister(dev, &dev->priv.pg_nb);
726 flush_workqueue(dev->priv.pg_wq);
727 }
728
mlx5_wait_for_pages(struct mlx5_core_dev * dev,int * pages)729 int mlx5_wait_for_pages(struct mlx5_core_dev *dev, int *pages)
730 {
731 unsigned long end = jiffies + msecs_to_jiffies(MAX_RECLAIM_VFS_PAGES_TIME_MSECS);
732 int prev_pages = *pages;
733
734 /* In case of internal error we will free the pages manually later */
735 if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
736 mlx5_core_warn(dev, "Skipping wait for vf pages stage");
737 return 0;
738 }
739
740 mlx5_core_dbg(dev, "Waiting for %d pages\n", prev_pages);
741 while (*pages) {
742 if (time_after(jiffies, end)) {
743 mlx5_core_warn(dev, "aborting while there are %d pending pages\n", *pages);
744 return -ETIMEDOUT;
745 }
746 if (*pages < prev_pages) {
747 end = jiffies + msecs_to_jiffies(MAX_RECLAIM_VFS_PAGES_TIME_MSECS);
748 prev_pages = *pages;
749 }
750 msleep(50);
751 }
752
753 mlx5_core_dbg(dev, "All pages received\n");
754 return 0;
755 }
756