1 /*
2 * Copyright (c) 2016 Hisilicon Limited.
3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34 #include <linux/platform_device.h>
35 #include "hns_roce_device.h"
36 #include "hns_roce_hem.h"
37 #include "hns_roce_common.h"
38
39 #define HEM_INDEX_BUF BIT(0)
40 #define HEM_INDEX_L0 BIT(1)
41 #define HEM_INDEX_L1 BIT(2)
42 struct hns_roce_hem_index {
43 u64 buf;
44 u64 l0;
45 u64 l1;
46 u32 inited; /* indicate which index is available */
47 };
48
hns_roce_check_whether_mhop(struct hns_roce_dev * hr_dev,u32 type)49 bool hns_roce_check_whether_mhop(struct hns_roce_dev *hr_dev, u32 type)
50 {
51 int hop_num = 0;
52
53 switch (type) {
54 case HEM_TYPE_QPC:
55 hop_num = hr_dev->caps.qpc_hop_num;
56 break;
57 case HEM_TYPE_MTPT:
58 hop_num = hr_dev->caps.mpt_hop_num;
59 break;
60 case HEM_TYPE_CQC:
61 hop_num = hr_dev->caps.cqc_hop_num;
62 break;
63 case HEM_TYPE_SRQC:
64 hop_num = hr_dev->caps.srqc_hop_num;
65 break;
66 case HEM_TYPE_SCCC:
67 hop_num = hr_dev->caps.sccc_hop_num;
68 break;
69 case HEM_TYPE_QPC_TIMER:
70 hop_num = hr_dev->caps.qpc_timer_hop_num;
71 break;
72 case HEM_TYPE_CQC_TIMER:
73 hop_num = hr_dev->caps.cqc_timer_hop_num;
74 break;
75 case HEM_TYPE_GMV:
76 hop_num = hr_dev->caps.gmv_hop_num;
77 break;
78 default:
79 return false;
80 }
81
82 return hop_num ? true : false;
83 }
84
hns_roce_check_hem_null(struct hns_roce_hem ** hem,u64 hem_idx,u32 bt_chunk_num,u64 hem_max_num)85 static bool hns_roce_check_hem_null(struct hns_roce_hem **hem, u64 hem_idx,
86 u32 bt_chunk_num, u64 hem_max_num)
87 {
88 u64 start_idx = round_down(hem_idx, bt_chunk_num);
89 u64 check_max_num = start_idx + bt_chunk_num;
90 u64 i;
91
92 for (i = start_idx; (i < check_max_num) && (i < hem_max_num); i++)
93 if (i != hem_idx && hem[i])
94 return false;
95
96 return true;
97 }
98
hns_roce_check_bt_null(u64 ** bt,u64 ba_idx,u32 bt_chunk_num)99 static bool hns_roce_check_bt_null(u64 **bt, u64 ba_idx, u32 bt_chunk_num)
100 {
101 u64 start_idx = round_down(ba_idx, bt_chunk_num);
102 int i;
103
104 for (i = 0; i < bt_chunk_num; i++)
105 if (i != ba_idx && bt[start_idx + i])
106 return false;
107
108 return true;
109 }
110
hns_roce_get_bt_num(u32 table_type,u32 hop_num)111 static int hns_roce_get_bt_num(u32 table_type, u32 hop_num)
112 {
113 if (check_whether_bt_num_3(table_type, hop_num))
114 return 3;
115 else if (check_whether_bt_num_2(table_type, hop_num))
116 return 2;
117 else if (check_whether_bt_num_1(table_type, hop_num))
118 return 1;
119 else
120 return 0;
121 }
122
get_hem_table_config(struct hns_roce_dev * hr_dev,struct hns_roce_hem_mhop * mhop,u32 type)123 static int get_hem_table_config(struct hns_roce_dev *hr_dev,
124 struct hns_roce_hem_mhop *mhop,
125 u32 type)
126 {
127 struct device *dev = hr_dev->dev;
128
129 switch (type) {
130 case HEM_TYPE_QPC:
131 mhop->buf_chunk_size = 1 << (hr_dev->caps.qpc_buf_pg_sz
132 + PAGE_SHIFT);
133 mhop->bt_chunk_size = 1 << (hr_dev->caps.qpc_ba_pg_sz
134 + PAGE_SHIFT);
135 mhop->ba_l0_num = hr_dev->caps.qpc_bt_num;
136 mhop->hop_num = hr_dev->caps.qpc_hop_num;
137 break;
138 case HEM_TYPE_MTPT:
139 mhop->buf_chunk_size = 1 << (hr_dev->caps.mpt_buf_pg_sz
140 + PAGE_SHIFT);
141 mhop->bt_chunk_size = 1 << (hr_dev->caps.mpt_ba_pg_sz
142 + PAGE_SHIFT);
143 mhop->ba_l0_num = hr_dev->caps.mpt_bt_num;
144 mhop->hop_num = hr_dev->caps.mpt_hop_num;
145 break;
146 case HEM_TYPE_CQC:
147 mhop->buf_chunk_size = 1 << (hr_dev->caps.cqc_buf_pg_sz
148 + PAGE_SHIFT);
149 mhop->bt_chunk_size = 1 << (hr_dev->caps.cqc_ba_pg_sz
150 + PAGE_SHIFT);
151 mhop->ba_l0_num = hr_dev->caps.cqc_bt_num;
152 mhop->hop_num = hr_dev->caps.cqc_hop_num;
153 break;
154 case HEM_TYPE_SCCC:
155 mhop->buf_chunk_size = 1 << (hr_dev->caps.sccc_buf_pg_sz
156 + PAGE_SHIFT);
157 mhop->bt_chunk_size = 1 << (hr_dev->caps.sccc_ba_pg_sz
158 + PAGE_SHIFT);
159 mhop->ba_l0_num = hr_dev->caps.sccc_bt_num;
160 mhop->hop_num = hr_dev->caps.sccc_hop_num;
161 break;
162 case HEM_TYPE_QPC_TIMER:
163 mhop->buf_chunk_size = 1 << (hr_dev->caps.qpc_timer_buf_pg_sz
164 + PAGE_SHIFT);
165 mhop->bt_chunk_size = 1 << (hr_dev->caps.qpc_timer_ba_pg_sz
166 + PAGE_SHIFT);
167 mhop->ba_l0_num = hr_dev->caps.qpc_timer_bt_num;
168 mhop->hop_num = hr_dev->caps.qpc_timer_hop_num;
169 break;
170 case HEM_TYPE_CQC_TIMER:
171 mhop->buf_chunk_size = 1 << (hr_dev->caps.cqc_timer_buf_pg_sz
172 + PAGE_SHIFT);
173 mhop->bt_chunk_size = 1 << (hr_dev->caps.cqc_timer_ba_pg_sz
174 + PAGE_SHIFT);
175 mhop->ba_l0_num = hr_dev->caps.cqc_timer_bt_num;
176 mhop->hop_num = hr_dev->caps.cqc_timer_hop_num;
177 break;
178 case HEM_TYPE_SRQC:
179 mhop->buf_chunk_size = 1 << (hr_dev->caps.srqc_buf_pg_sz
180 + PAGE_SHIFT);
181 mhop->bt_chunk_size = 1 << (hr_dev->caps.srqc_ba_pg_sz
182 + PAGE_SHIFT);
183 mhop->ba_l0_num = hr_dev->caps.srqc_bt_num;
184 mhop->hop_num = hr_dev->caps.srqc_hop_num;
185 break;
186 case HEM_TYPE_GMV:
187 mhop->buf_chunk_size = 1 << (hr_dev->caps.gmv_buf_pg_sz +
188 PAGE_SHIFT);
189 mhop->bt_chunk_size = 1 << (hr_dev->caps.gmv_ba_pg_sz +
190 PAGE_SHIFT);
191 mhop->ba_l0_num = hr_dev->caps.gmv_bt_num;
192 mhop->hop_num = hr_dev->caps.gmv_hop_num;
193 break;
194 default:
195 dev_err(dev, "table %u not support multi-hop addressing!\n",
196 type);
197 return -EINVAL;
198 }
199
200 return 0;
201 }
202
hns_roce_calc_hem_mhop(struct hns_roce_dev * hr_dev,struct hns_roce_hem_table * table,unsigned long * obj,struct hns_roce_hem_mhop * mhop)203 int hns_roce_calc_hem_mhop(struct hns_roce_dev *hr_dev,
204 struct hns_roce_hem_table *table, unsigned long *obj,
205 struct hns_roce_hem_mhop *mhop)
206 {
207 struct device *dev = hr_dev->dev;
208 u32 chunk_ba_num;
209 u32 chunk_size;
210 u32 table_idx;
211 u32 bt_num;
212
213 if (get_hem_table_config(hr_dev, mhop, table->type))
214 return -EINVAL;
215
216 if (!obj)
217 return 0;
218
219 /*
220 * QPC/MTPT/CQC/SRQC/SCCC alloc hem for buffer pages.
221 * MTT/CQE alloc hem for bt pages.
222 */
223 bt_num = hns_roce_get_bt_num(table->type, mhop->hop_num);
224 chunk_ba_num = mhop->bt_chunk_size / BA_BYTE_LEN;
225 chunk_size = table->type < HEM_TYPE_MTT ? mhop->buf_chunk_size :
226 mhop->bt_chunk_size;
227 table_idx = *obj / (chunk_size / table->obj_size);
228 switch (bt_num) {
229 case 3:
230 mhop->l2_idx = table_idx & (chunk_ba_num - 1);
231 mhop->l1_idx = table_idx / chunk_ba_num & (chunk_ba_num - 1);
232 mhop->l0_idx = (table_idx / chunk_ba_num) / chunk_ba_num;
233 break;
234 case 2:
235 mhop->l1_idx = table_idx & (chunk_ba_num - 1);
236 mhop->l0_idx = table_idx / chunk_ba_num;
237 break;
238 case 1:
239 mhop->l0_idx = table_idx;
240 break;
241 default:
242 dev_err(dev, "table %u not support hop_num = %u!\n",
243 table->type, mhop->hop_num);
244 return -EINVAL;
245 }
246 if (mhop->l0_idx >= mhop->ba_l0_num)
247 mhop->l0_idx %= mhop->ba_l0_num;
248
249 return 0;
250 }
251
hns_roce_alloc_hem(struct hns_roce_dev * hr_dev,int npages,unsigned long hem_alloc_size,gfp_t gfp_mask)252 static struct hns_roce_hem *hns_roce_alloc_hem(struct hns_roce_dev *hr_dev,
253 int npages,
254 unsigned long hem_alloc_size,
255 gfp_t gfp_mask)
256 {
257 struct hns_roce_hem_chunk *chunk = NULL;
258 struct hns_roce_hem *hem;
259 struct scatterlist *mem;
260 int order;
261 void *buf;
262
263 WARN_ON(gfp_mask & __GFP_HIGHMEM);
264
265 hem = kmalloc(sizeof(*hem),
266 gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
267 if (!hem)
268 return NULL;
269
270 INIT_LIST_HEAD(&hem->chunk_list);
271
272 order = get_order(hem_alloc_size);
273
274 while (npages > 0) {
275 if (!chunk) {
276 chunk = kmalloc(sizeof(*chunk),
277 gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
278 if (!chunk)
279 goto fail;
280
281 sg_init_table(chunk->mem, HNS_ROCE_HEM_CHUNK_LEN);
282 chunk->npages = 0;
283 chunk->nsg = 0;
284 memset(chunk->buf, 0, sizeof(chunk->buf));
285 list_add_tail(&chunk->list, &hem->chunk_list);
286 }
287
288 while (1 << order > npages)
289 --order;
290
291 /*
292 * Alloc memory one time. If failed, don't alloc small block
293 * memory, directly return fail.
294 */
295 mem = &chunk->mem[chunk->npages];
296 buf = dma_alloc_coherent(hr_dev->dev, PAGE_SIZE << order,
297 &sg_dma_address(mem), gfp_mask);
298 if (!buf)
299 goto fail;
300
301 chunk->buf[chunk->npages] = buf;
302 sg_dma_len(mem) = PAGE_SIZE << order;
303
304 ++chunk->npages;
305 ++chunk->nsg;
306 npages -= 1 << order;
307 }
308
309 return hem;
310
311 fail:
312 hns_roce_free_hem(hr_dev, hem);
313 return NULL;
314 }
315
hns_roce_free_hem(struct hns_roce_dev * hr_dev,struct hns_roce_hem * hem)316 void hns_roce_free_hem(struct hns_roce_dev *hr_dev, struct hns_roce_hem *hem)
317 {
318 struct hns_roce_hem_chunk *chunk, *tmp;
319 int i;
320
321 if (!hem)
322 return;
323
324 list_for_each_entry_safe(chunk, tmp, &hem->chunk_list, list) {
325 for (i = 0; i < chunk->npages; ++i)
326 dma_free_coherent(hr_dev->dev,
327 sg_dma_len(&chunk->mem[i]),
328 chunk->buf[i],
329 sg_dma_address(&chunk->mem[i]));
330 kfree(chunk);
331 }
332
333 kfree(hem);
334 }
335
calc_hem_config(struct hns_roce_dev * hr_dev,struct hns_roce_hem_table * table,unsigned long obj,struct hns_roce_hem_mhop * mhop,struct hns_roce_hem_index * index)336 static int calc_hem_config(struct hns_roce_dev *hr_dev,
337 struct hns_roce_hem_table *table, unsigned long obj,
338 struct hns_roce_hem_mhop *mhop,
339 struct hns_roce_hem_index *index)
340 {
341 struct ib_device *ibdev = &hr_dev->ib_dev;
342 unsigned long mhop_obj = obj;
343 u32 l0_idx, l1_idx, l2_idx;
344 u32 chunk_ba_num;
345 u32 bt_num;
346 int ret;
347
348 ret = hns_roce_calc_hem_mhop(hr_dev, table, &mhop_obj, mhop);
349 if (ret)
350 return ret;
351
352 l0_idx = mhop->l0_idx;
353 l1_idx = mhop->l1_idx;
354 l2_idx = mhop->l2_idx;
355 chunk_ba_num = mhop->bt_chunk_size / BA_BYTE_LEN;
356 bt_num = hns_roce_get_bt_num(table->type, mhop->hop_num);
357 switch (bt_num) {
358 case 3:
359 index->l1 = l0_idx * chunk_ba_num + l1_idx;
360 index->l0 = l0_idx;
361 index->buf = l0_idx * chunk_ba_num * chunk_ba_num +
362 l1_idx * chunk_ba_num + l2_idx;
363 break;
364 case 2:
365 index->l0 = l0_idx;
366 index->buf = l0_idx * chunk_ba_num + l1_idx;
367 break;
368 case 1:
369 index->buf = l0_idx;
370 break;
371 default:
372 ibdev_err(ibdev, "table %u not support mhop.hop_num = %u!\n",
373 table->type, mhop->hop_num);
374 return -EINVAL;
375 }
376
377 if (unlikely(index->buf >= table->num_hem)) {
378 ibdev_err(ibdev, "table %u exceed hem limt idx %llu, max %lu!\n",
379 table->type, index->buf, table->num_hem);
380 return -EINVAL;
381 }
382
383 return 0;
384 }
385
free_mhop_hem(struct hns_roce_dev * hr_dev,struct hns_roce_hem_table * table,struct hns_roce_hem_mhop * mhop,struct hns_roce_hem_index * index)386 static void free_mhop_hem(struct hns_roce_dev *hr_dev,
387 struct hns_roce_hem_table *table,
388 struct hns_roce_hem_mhop *mhop,
389 struct hns_roce_hem_index *index)
390 {
391 u32 bt_size = mhop->bt_chunk_size;
392 struct device *dev = hr_dev->dev;
393
394 if (index->inited & HEM_INDEX_BUF) {
395 hns_roce_free_hem(hr_dev, table->hem[index->buf]);
396 table->hem[index->buf] = NULL;
397 }
398
399 if (index->inited & HEM_INDEX_L1) {
400 dma_free_coherent(dev, bt_size, table->bt_l1[index->l1],
401 table->bt_l1_dma_addr[index->l1]);
402 table->bt_l1[index->l1] = NULL;
403 }
404
405 if (index->inited & HEM_INDEX_L0) {
406 dma_free_coherent(dev, bt_size, table->bt_l0[index->l0],
407 table->bt_l0_dma_addr[index->l0]);
408 table->bt_l0[index->l0] = NULL;
409 }
410 }
411
alloc_mhop_hem(struct hns_roce_dev * hr_dev,struct hns_roce_hem_table * table,struct hns_roce_hem_mhop * mhop,struct hns_roce_hem_index * index)412 static int alloc_mhop_hem(struct hns_roce_dev *hr_dev,
413 struct hns_roce_hem_table *table,
414 struct hns_roce_hem_mhop *mhop,
415 struct hns_roce_hem_index *index)
416 {
417 u32 bt_size = mhop->bt_chunk_size;
418 struct device *dev = hr_dev->dev;
419 struct hns_roce_hem_iter iter;
420 gfp_t flag;
421 u64 bt_ba;
422 u32 size;
423 int ret;
424
425 /* alloc L1 BA's chunk */
426 if ((check_whether_bt_num_3(table->type, mhop->hop_num) ||
427 check_whether_bt_num_2(table->type, mhop->hop_num)) &&
428 !table->bt_l0[index->l0]) {
429 table->bt_l0[index->l0] = dma_alloc_coherent(dev, bt_size,
430 &table->bt_l0_dma_addr[index->l0],
431 GFP_KERNEL);
432 if (!table->bt_l0[index->l0]) {
433 ret = -ENOMEM;
434 goto out;
435 }
436 index->inited |= HEM_INDEX_L0;
437 }
438
439 /* alloc L2 BA's chunk */
440 if (check_whether_bt_num_3(table->type, mhop->hop_num) &&
441 !table->bt_l1[index->l1]) {
442 table->bt_l1[index->l1] = dma_alloc_coherent(dev, bt_size,
443 &table->bt_l1_dma_addr[index->l1],
444 GFP_KERNEL);
445 if (!table->bt_l1[index->l1]) {
446 ret = -ENOMEM;
447 goto err_alloc_hem;
448 }
449 index->inited |= HEM_INDEX_L1;
450 *(table->bt_l0[index->l0] + mhop->l1_idx) =
451 table->bt_l1_dma_addr[index->l1];
452 }
453
454 /*
455 * alloc buffer space chunk for QPC/MTPT/CQC/SRQC/SCCC.
456 * alloc bt space chunk for MTT/CQE.
457 */
458 size = table->type < HEM_TYPE_MTT ? mhop->buf_chunk_size : bt_size;
459 flag = (table->lowmem ? GFP_KERNEL : GFP_HIGHUSER) | __GFP_NOWARN;
460 table->hem[index->buf] = hns_roce_alloc_hem(hr_dev, size >> PAGE_SHIFT,
461 size, flag);
462 if (!table->hem[index->buf]) {
463 ret = -ENOMEM;
464 goto err_alloc_hem;
465 }
466
467 index->inited |= HEM_INDEX_BUF;
468 hns_roce_hem_first(table->hem[index->buf], &iter);
469 bt_ba = hns_roce_hem_addr(&iter);
470 if (table->type < HEM_TYPE_MTT) {
471 if (mhop->hop_num == 2)
472 *(table->bt_l1[index->l1] + mhop->l2_idx) = bt_ba;
473 else if (mhop->hop_num == 1)
474 *(table->bt_l0[index->l0] + mhop->l1_idx) = bt_ba;
475 } else if (mhop->hop_num == 2) {
476 *(table->bt_l0[index->l0] + mhop->l1_idx) = bt_ba;
477 }
478
479 return 0;
480 err_alloc_hem:
481 free_mhop_hem(hr_dev, table, mhop, index);
482 out:
483 return ret;
484 }
485
set_mhop_hem(struct hns_roce_dev * hr_dev,struct hns_roce_hem_table * table,unsigned long obj,struct hns_roce_hem_mhop * mhop,struct hns_roce_hem_index * index)486 static int set_mhop_hem(struct hns_roce_dev *hr_dev,
487 struct hns_roce_hem_table *table, unsigned long obj,
488 struct hns_roce_hem_mhop *mhop,
489 struct hns_roce_hem_index *index)
490 {
491 struct ib_device *ibdev = &hr_dev->ib_dev;
492 int step_idx;
493 int ret = 0;
494
495 if (index->inited & HEM_INDEX_L0) {
496 ret = hr_dev->hw->set_hem(hr_dev, table, obj, 0);
497 if (ret) {
498 ibdev_err(ibdev, "set HEM step 0 failed!\n");
499 goto out;
500 }
501 }
502
503 if (index->inited & HEM_INDEX_L1) {
504 ret = hr_dev->hw->set_hem(hr_dev, table, obj, 1);
505 if (ret) {
506 ibdev_err(ibdev, "set HEM step 1 failed!\n");
507 goto out;
508 }
509 }
510
511 if (index->inited & HEM_INDEX_BUF) {
512 if (mhop->hop_num == HNS_ROCE_HOP_NUM_0)
513 step_idx = 0;
514 else
515 step_idx = mhop->hop_num;
516 ret = hr_dev->hw->set_hem(hr_dev, table, obj, step_idx);
517 if (ret)
518 ibdev_err(ibdev, "set HEM step last failed!\n");
519 }
520 out:
521 return ret;
522 }
523
hns_roce_table_mhop_get(struct hns_roce_dev * hr_dev,struct hns_roce_hem_table * table,unsigned long obj)524 static int hns_roce_table_mhop_get(struct hns_roce_dev *hr_dev,
525 struct hns_roce_hem_table *table,
526 unsigned long obj)
527 {
528 struct ib_device *ibdev = &hr_dev->ib_dev;
529 struct hns_roce_hem_index index = {};
530 struct hns_roce_hem_mhop mhop = {};
531 int ret;
532
533 ret = calc_hem_config(hr_dev, table, obj, &mhop, &index);
534 if (ret) {
535 ibdev_err(ibdev, "calc hem config failed!\n");
536 return ret;
537 }
538
539 mutex_lock(&table->mutex);
540 if (table->hem[index.buf]) {
541 refcount_inc(&table->hem[index.buf]->refcount);
542 goto out;
543 }
544
545 ret = alloc_mhop_hem(hr_dev, table, &mhop, &index);
546 if (ret) {
547 ibdev_err(ibdev, "alloc mhop hem failed!\n");
548 goto out;
549 }
550
551 /* set HEM base address to hardware */
552 if (table->type < HEM_TYPE_MTT) {
553 ret = set_mhop_hem(hr_dev, table, obj, &mhop, &index);
554 if (ret) {
555 ibdev_err(ibdev, "set HEM address to HW failed!\n");
556 goto err_alloc;
557 }
558 }
559
560 refcount_set(&table->hem[index.buf]->refcount, 1);
561 goto out;
562
563 err_alloc:
564 free_mhop_hem(hr_dev, table, &mhop, &index);
565 out:
566 mutex_unlock(&table->mutex);
567 return ret;
568 }
569
hns_roce_table_get(struct hns_roce_dev * hr_dev,struct hns_roce_hem_table * table,unsigned long obj)570 int hns_roce_table_get(struct hns_roce_dev *hr_dev,
571 struct hns_roce_hem_table *table, unsigned long obj)
572 {
573 struct device *dev = hr_dev->dev;
574 unsigned long i;
575 int ret = 0;
576
577 if (hns_roce_check_whether_mhop(hr_dev, table->type))
578 return hns_roce_table_mhop_get(hr_dev, table, obj);
579
580 i = obj / (table->table_chunk_size / table->obj_size);
581
582 mutex_lock(&table->mutex);
583
584 if (table->hem[i]) {
585 refcount_inc(&table->hem[i]->refcount);
586 goto out;
587 }
588
589 table->hem[i] = hns_roce_alloc_hem(hr_dev,
590 table->table_chunk_size >> PAGE_SHIFT,
591 table->table_chunk_size,
592 (table->lowmem ? GFP_KERNEL :
593 GFP_HIGHUSER) | __GFP_NOWARN);
594 if (!table->hem[i]) {
595 ret = -ENOMEM;
596 goto out;
597 }
598
599 /* Set HEM base address(128K/page, pa) to Hardware */
600 ret = hr_dev->hw->set_hem(hr_dev, table, obj, HEM_HOP_STEP_DIRECT);
601 if (ret) {
602 hns_roce_free_hem(hr_dev, table->hem[i]);
603 table->hem[i] = NULL;
604 dev_err(dev, "set HEM base address to HW failed, ret = %d.\n",
605 ret);
606 goto out;
607 }
608
609 refcount_set(&table->hem[i]->refcount, 1);
610 out:
611 mutex_unlock(&table->mutex);
612 return ret;
613 }
614
clear_mhop_hem(struct hns_roce_dev * hr_dev,struct hns_roce_hem_table * table,unsigned long obj,struct hns_roce_hem_mhop * mhop,struct hns_roce_hem_index * index)615 static void clear_mhop_hem(struct hns_roce_dev *hr_dev,
616 struct hns_roce_hem_table *table, unsigned long obj,
617 struct hns_roce_hem_mhop *mhop,
618 struct hns_roce_hem_index *index)
619 {
620 struct ib_device *ibdev = &hr_dev->ib_dev;
621 u32 hop_num = mhop->hop_num;
622 u32 chunk_ba_num;
623 int step_idx;
624
625 index->inited = HEM_INDEX_BUF;
626 chunk_ba_num = mhop->bt_chunk_size / BA_BYTE_LEN;
627 if (check_whether_bt_num_2(table->type, hop_num)) {
628 if (hns_roce_check_hem_null(table->hem, index->buf,
629 chunk_ba_num, table->num_hem))
630 index->inited |= HEM_INDEX_L0;
631 } else if (check_whether_bt_num_3(table->type, hop_num)) {
632 if (hns_roce_check_hem_null(table->hem, index->buf,
633 chunk_ba_num, table->num_hem)) {
634 index->inited |= HEM_INDEX_L1;
635 if (hns_roce_check_bt_null(table->bt_l1, index->l1,
636 chunk_ba_num))
637 index->inited |= HEM_INDEX_L0;
638 }
639 }
640
641 if (table->type < HEM_TYPE_MTT) {
642 if (hop_num == HNS_ROCE_HOP_NUM_0)
643 step_idx = 0;
644 else
645 step_idx = hop_num;
646
647 if (hr_dev->hw->clear_hem(hr_dev, table, obj, step_idx))
648 ibdev_warn(ibdev, "failed to clear hop%u HEM.\n", hop_num);
649
650 if (index->inited & HEM_INDEX_L1)
651 if (hr_dev->hw->clear_hem(hr_dev, table, obj, 1))
652 ibdev_warn(ibdev, "failed to clear HEM step 1.\n");
653
654 if (index->inited & HEM_INDEX_L0)
655 if (hr_dev->hw->clear_hem(hr_dev, table, obj, 0))
656 ibdev_warn(ibdev, "failed to clear HEM step 0.\n");
657 }
658 }
659
hns_roce_table_mhop_put(struct hns_roce_dev * hr_dev,struct hns_roce_hem_table * table,unsigned long obj,int check_refcount)660 static void hns_roce_table_mhop_put(struct hns_roce_dev *hr_dev,
661 struct hns_roce_hem_table *table,
662 unsigned long obj,
663 int check_refcount)
664 {
665 struct ib_device *ibdev = &hr_dev->ib_dev;
666 struct hns_roce_hem_index index = {};
667 struct hns_roce_hem_mhop mhop = {};
668 int ret;
669
670 ret = calc_hem_config(hr_dev, table, obj, &mhop, &index);
671 if (ret) {
672 ibdev_err(ibdev, "calc hem config failed!\n");
673 return;
674 }
675
676 if (!check_refcount)
677 mutex_lock(&table->mutex);
678 else if (!refcount_dec_and_mutex_lock(&table->hem[index.buf]->refcount,
679 &table->mutex))
680 return;
681
682 clear_mhop_hem(hr_dev, table, obj, &mhop, &index);
683 free_mhop_hem(hr_dev, table, &mhop, &index);
684
685 mutex_unlock(&table->mutex);
686 }
687
hns_roce_table_put(struct hns_roce_dev * hr_dev,struct hns_roce_hem_table * table,unsigned long obj)688 void hns_roce_table_put(struct hns_roce_dev *hr_dev,
689 struct hns_roce_hem_table *table, unsigned long obj)
690 {
691 struct device *dev = hr_dev->dev;
692 unsigned long i;
693
694 if (hns_roce_check_whether_mhop(hr_dev, table->type)) {
695 hns_roce_table_mhop_put(hr_dev, table, obj, 1);
696 return;
697 }
698
699 i = obj / (table->table_chunk_size / table->obj_size);
700
701 if (!refcount_dec_and_mutex_lock(&table->hem[i]->refcount,
702 &table->mutex))
703 return;
704
705 if (hr_dev->hw->clear_hem(hr_dev, table, obj, HEM_HOP_STEP_DIRECT))
706 dev_warn(dev, "failed to clear HEM base address.\n");
707
708 hns_roce_free_hem(hr_dev, table->hem[i]);
709 table->hem[i] = NULL;
710
711 mutex_unlock(&table->mutex);
712 }
713
hns_roce_table_find(struct hns_roce_dev * hr_dev,struct hns_roce_hem_table * table,unsigned long obj,dma_addr_t * dma_handle)714 void *hns_roce_table_find(struct hns_roce_dev *hr_dev,
715 struct hns_roce_hem_table *table,
716 unsigned long obj, dma_addr_t *dma_handle)
717 {
718 struct hns_roce_hem_chunk *chunk;
719 struct hns_roce_hem_mhop mhop;
720 struct hns_roce_hem *hem;
721 unsigned long mhop_obj = obj;
722 unsigned long obj_per_chunk;
723 unsigned long idx_offset;
724 int offset, dma_offset;
725 void *addr = NULL;
726 u32 hem_idx = 0;
727 int length;
728 int i, j;
729
730 if (!table->lowmem)
731 return NULL;
732
733 mutex_lock(&table->mutex);
734
735 if (!hns_roce_check_whether_mhop(hr_dev, table->type)) {
736 obj_per_chunk = table->table_chunk_size / table->obj_size;
737 hem = table->hem[obj / obj_per_chunk];
738 idx_offset = obj % obj_per_chunk;
739 dma_offset = offset = idx_offset * table->obj_size;
740 } else {
741 u32 seg_size = 64; /* 8 bytes per BA and 8 BA per segment */
742
743 if (hns_roce_calc_hem_mhop(hr_dev, table, &mhop_obj, &mhop))
744 goto out;
745 /* mtt mhop */
746 i = mhop.l0_idx;
747 j = mhop.l1_idx;
748 if (mhop.hop_num == 2)
749 hem_idx = i * (mhop.bt_chunk_size / BA_BYTE_LEN) + j;
750 else if (mhop.hop_num == 1 ||
751 mhop.hop_num == HNS_ROCE_HOP_NUM_0)
752 hem_idx = i;
753
754 hem = table->hem[hem_idx];
755 dma_offset = offset = obj * seg_size % mhop.bt_chunk_size;
756 if (mhop.hop_num == 2)
757 dma_offset = offset = 0;
758 }
759
760 if (!hem)
761 goto out;
762
763 list_for_each_entry(chunk, &hem->chunk_list, list) {
764 for (i = 0; i < chunk->npages; ++i) {
765 length = sg_dma_len(&chunk->mem[i]);
766 if (dma_handle && dma_offset >= 0) {
767 if (length > (u32)dma_offset)
768 *dma_handle = sg_dma_address(
769 &chunk->mem[i]) + dma_offset;
770 dma_offset -= length;
771 }
772
773 if (length > (u32)offset) {
774 addr = chunk->buf[i] + offset;
775 goto out;
776 }
777 offset -= length;
778 }
779 }
780
781 out:
782 mutex_unlock(&table->mutex);
783 return addr;
784 }
785
hns_roce_init_hem_table(struct hns_roce_dev * hr_dev,struct hns_roce_hem_table * table,u32 type,unsigned long obj_size,unsigned long nobj,int use_lowmem)786 int hns_roce_init_hem_table(struct hns_roce_dev *hr_dev,
787 struct hns_roce_hem_table *table, u32 type,
788 unsigned long obj_size, unsigned long nobj,
789 int use_lowmem)
790 {
791 unsigned long obj_per_chunk;
792 unsigned long num_hem;
793
794 if (!hns_roce_check_whether_mhop(hr_dev, type)) {
795 table->table_chunk_size = hr_dev->caps.chunk_sz;
796 obj_per_chunk = table->table_chunk_size / obj_size;
797 num_hem = DIV_ROUND_UP(nobj, obj_per_chunk);
798
799 table->hem = kcalloc(num_hem, sizeof(*table->hem), GFP_KERNEL);
800 if (!table->hem)
801 return -ENOMEM;
802 } else {
803 struct hns_roce_hem_mhop mhop = {};
804 unsigned long buf_chunk_size;
805 unsigned long bt_chunk_size;
806 unsigned long bt_chunk_num;
807 unsigned long num_bt_l0;
808 u32 hop_num;
809
810 if (get_hem_table_config(hr_dev, &mhop, type))
811 return -EINVAL;
812
813 buf_chunk_size = mhop.buf_chunk_size;
814 bt_chunk_size = mhop.bt_chunk_size;
815 num_bt_l0 = mhop.ba_l0_num;
816 hop_num = mhop.hop_num;
817
818 obj_per_chunk = buf_chunk_size / obj_size;
819 num_hem = DIV_ROUND_UP(nobj, obj_per_chunk);
820 bt_chunk_num = bt_chunk_size / BA_BYTE_LEN;
821
822 if (type >= HEM_TYPE_MTT)
823 num_bt_l0 = bt_chunk_num;
824
825 table->hem = kcalloc(num_hem, sizeof(*table->hem),
826 GFP_KERNEL);
827 if (!table->hem)
828 goto err_kcalloc_hem_buf;
829
830 if (check_whether_bt_num_3(type, hop_num)) {
831 unsigned long num_bt_l1;
832
833 num_bt_l1 = DIV_ROUND_UP(num_hem, bt_chunk_num);
834 table->bt_l1 = kcalloc(num_bt_l1,
835 sizeof(*table->bt_l1),
836 GFP_KERNEL);
837 if (!table->bt_l1)
838 goto err_kcalloc_bt_l1;
839
840 table->bt_l1_dma_addr = kcalloc(num_bt_l1,
841 sizeof(*table->bt_l1_dma_addr),
842 GFP_KERNEL);
843
844 if (!table->bt_l1_dma_addr)
845 goto err_kcalloc_l1_dma;
846 }
847
848 if (check_whether_bt_num_2(type, hop_num) ||
849 check_whether_bt_num_3(type, hop_num)) {
850 table->bt_l0 = kcalloc(num_bt_l0, sizeof(*table->bt_l0),
851 GFP_KERNEL);
852 if (!table->bt_l0)
853 goto err_kcalloc_bt_l0;
854
855 table->bt_l0_dma_addr = kcalloc(num_bt_l0,
856 sizeof(*table->bt_l0_dma_addr),
857 GFP_KERNEL);
858 if (!table->bt_l0_dma_addr)
859 goto err_kcalloc_l0_dma;
860 }
861 }
862
863 table->type = type;
864 table->num_hem = num_hem;
865 table->obj_size = obj_size;
866 table->lowmem = use_lowmem;
867 mutex_init(&table->mutex);
868
869 return 0;
870
871 err_kcalloc_l0_dma:
872 kfree(table->bt_l0);
873 table->bt_l0 = NULL;
874
875 err_kcalloc_bt_l0:
876 kfree(table->bt_l1_dma_addr);
877 table->bt_l1_dma_addr = NULL;
878
879 err_kcalloc_l1_dma:
880 kfree(table->bt_l1);
881 table->bt_l1 = NULL;
882
883 err_kcalloc_bt_l1:
884 kfree(table->hem);
885 table->hem = NULL;
886
887 err_kcalloc_hem_buf:
888 return -ENOMEM;
889 }
890
hns_roce_cleanup_mhop_hem_table(struct hns_roce_dev * hr_dev,struct hns_roce_hem_table * table)891 static void hns_roce_cleanup_mhop_hem_table(struct hns_roce_dev *hr_dev,
892 struct hns_roce_hem_table *table)
893 {
894 struct hns_roce_hem_mhop mhop;
895 u32 buf_chunk_size;
896 u64 obj;
897 int i;
898
899 if (hns_roce_calc_hem_mhop(hr_dev, table, NULL, &mhop))
900 return;
901 buf_chunk_size = table->type < HEM_TYPE_MTT ? mhop.buf_chunk_size :
902 mhop.bt_chunk_size;
903
904 for (i = 0; i < table->num_hem; ++i) {
905 obj = i * buf_chunk_size / table->obj_size;
906 if (table->hem[i])
907 hns_roce_table_mhop_put(hr_dev, table, obj, 0);
908 }
909
910 kfree(table->hem);
911 table->hem = NULL;
912 kfree(table->bt_l1);
913 table->bt_l1 = NULL;
914 kfree(table->bt_l1_dma_addr);
915 table->bt_l1_dma_addr = NULL;
916 kfree(table->bt_l0);
917 table->bt_l0 = NULL;
918 kfree(table->bt_l0_dma_addr);
919 table->bt_l0_dma_addr = NULL;
920 }
921
hns_roce_cleanup_hem_table(struct hns_roce_dev * hr_dev,struct hns_roce_hem_table * table)922 void hns_roce_cleanup_hem_table(struct hns_roce_dev *hr_dev,
923 struct hns_roce_hem_table *table)
924 {
925 struct device *dev = hr_dev->dev;
926 unsigned long i;
927
928 if (hns_roce_check_whether_mhop(hr_dev, table->type)) {
929 hns_roce_cleanup_mhop_hem_table(hr_dev, table);
930 return;
931 }
932
933 for (i = 0; i < table->num_hem; ++i)
934 if (table->hem[i]) {
935 if (hr_dev->hw->clear_hem(hr_dev, table,
936 i * table->table_chunk_size / table->obj_size, 0))
937 dev_err(dev, "Clear HEM base address failed.\n");
938
939 hns_roce_free_hem(hr_dev, table->hem[i]);
940 }
941
942 kfree(table->hem);
943 }
944
hns_roce_cleanup_hem(struct hns_roce_dev * hr_dev)945 void hns_roce_cleanup_hem(struct hns_roce_dev *hr_dev)
946 {
947 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ)
948 hns_roce_cleanup_hem_table(hr_dev,
949 &hr_dev->srq_table.table);
950 hns_roce_cleanup_hem_table(hr_dev, &hr_dev->cq_table.table);
951 if (hr_dev->caps.qpc_timer_entry_sz)
952 hns_roce_cleanup_hem_table(hr_dev,
953 &hr_dev->qpc_timer_table);
954 if (hr_dev->caps.cqc_timer_entry_sz)
955 hns_roce_cleanup_hem_table(hr_dev,
956 &hr_dev->cqc_timer_table);
957 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL)
958 hns_roce_cleanup_hem_table(hr_dev,
959 &hr_dev->qp_table.sccc_table);
960 if (hr_dev->caps.trrl_entry_sz)
961 hns_roce_cleanup_hem_table(hr_dev,
962 &hr_dev->qp_table.trrl_table);
963
964 if (hr_dev->caps.gmv_entry_sz)
965 hns_roce_cleanup_hem_table(hr_dev, &hr_dev->gmv_table);
966
967 hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qp_table.irrl_table);
968 hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qp_table.qp_table);
969 hns_roce_cleanup_hem_table(hr_dev, &hr_dev->mr_table.mtpt_table);
970 }
971
972 struct hns_roce_hem_item {
973 struct list_head list; /* link all hems in the same bt level */
974 struct list_head sibling; /* link all hems in last hop for mtt */
975 void *addr;
976 dma_addr_t dma_addr;
977 size_t count; /* max ba numbers */
978 int start; /* start buf offset in this hem */
979 int end; /* end buf offset in this hem */
980 };
981
982 /* All HEM items are linked in a tree structure */
983 struct hns_roce_hem_head {
984 struct list_head branch[HNS_ROCE_MAX_BT_REGION];
985 struct list_head root;
986 struct list_head leaf;
987 };
988
989 static struct hns_roce_hem_item *
hem_list_alloc_item(struct hns_roce_dev * hr_dev,int start,int end,int count,bool exist_bt,int bt_level)990 hem_list_alloc_item(struct hns_roce_dev *hr_dev, int start, int end, int count,
991 bool exist_bt, int bt_level)
992 {
993 struct hns_roce_hem_item *hem;
994
995 hem = kzalloc(sizeof(*hem), GFP_KERNEL);
996 if (!hem)
997 return NULL;
998
999 if (exist_bt) {
1000 hem->addr = dma_alloc_coherent(hr_dev->dev, count * BA_BYTE_LEN,
1001 &hem->dma_addr, GFP_KERNEL);
1002 if (!hem->addr) {
1003 kfree(hem);
1004 return NULL;
1005 }
1006 }
1007
1008 hem->count = count;
1009 hem->start = start;
1010 hem->end = end;
1011 INIT_LIST_HEAD(&hem->list);
1012 INIT_LIST_HEAD(&hem->sibling);
1013
1014 return hem;
1015 }
1016
hem_list_free_item(struct hns_roce_dev * hr_dev,struct hns_roce_hem_item * hem,bool exist_bt)1017 static void hem_list_free_item(struct hns_roce_dev *hr_dev,
1018 struct hns_roce_hem_item *hem, bool exist_bt)
1019 {
1020 if (exist_bt)
1021 dma_free_coherent(hr_dev->dev, hem->count * BA_BYTE_LEN,
1022 hem->addr, hem->dma_addr);
1023 kfree(hem);
1024 }
1025
hem_list_free_all(struct hns_roce_dev * hr_dev,struct list_head * head,bool exist_bt)1026 static void hem_list_free_all(struct hns_roce_dev *hr_dev,
1027 struct list_head *head, bool exist_bt)
1028 {
1029 struct hns_roce_hem_item *hem, *temp_hem;
1030
1031 list_for_each_entry_safe(hem, temp_hem, head, list) {
1032 list_del(&hem->list);
1033 hem_list_free_item(hr_dev, hem, exist_bt);
1034 }
1035 }
1036
hem_list_link_bt(struct hns_roce_dev * hr_dev,void * base_addr,u64 table_addr)1037 static void hem_list_link_bt(struct hns_roce_dev *hr_dev, void *base_addr,
1038 u64 table_addr)
1039 {
1040 *(u64 *)(base_addr) = table_addr;
1041 }
1042
1043 /* assign L0 table address to hem from root bt */
hem_list_assign_bt(struct hns_roce_dev * hr_dev,struct hns_roce_hem_item * hem,void * cpu_addr,u64 phy_addr)1044 static void hem_list_assign_bt(struct hns_roce_dev *hr_dev,
1045 struct hns_roce_hem_item *hem, void *cpu_addr,
1046 u64 phy_addr)
1047 {
1048 hem->addr = cpu_addr;
1049 hem->dma_addr = (dma_addr_t)phy_addr;
1050 }
1051
hem_list_page_is_in_range(struct hns_roce_hem_item * hem,int offset)1052 static inline bool hem_list_page_is_in_range(struct hns_roce_hem_item *hem,
1053 int offset)
1054 {
1055 return (hem->start <= offset && offset <= hem->end);
1056 }
1057
hem_list_search_item(struct list_head * ba_list,int page_offset)1058 static struct hns_roce_hem_item *hem_list_search_item(struct list_head *ba_list,
1059 int page_offset)
1060 {
1061 struct hns_roce_hem_item *hem, *temp_hem;
1062 struct hns_roce_hem_item *found = NULL;
1063
1064 list_for_each_entry_safe(hem, temp_hem, ba_list, list) {
1065 if (hem_list_page_is_in_range(hem, page_offset)) {
1066 found = hem;
1067 break;
1068 }
1069 }
1070
1071 return found;
1072 }
1073
hem_list_is_bottom_bt(int hopnum,int bt_level)1074 static bool hem_list_is_bottom_bt(int hopnum, int bt_level)
1075 {
1076 /*
1077 * hopnum base address table levels
1078 * 0 L0(buf)
1079 * 1 L0 -> buf
1080 * 2 L0 -> L1 -> buf
1081 * 3 L0 -> L1 -> L2 -> buf
1082 */
1083 return bt_level >= (hopnum ? hopnum - 1 : hopnum);
1084 }
1085
1086 /*
1087 * calc base address entries num
1088 * @hopnum: num of mutihop addressing
1089 * @bt_level: base address table level
1090 * @unit: ba entries per bt page
1091 */
hem_list_calc_ba_range(int hopnum,int bt_level,int unit)1092 static u32 hem_list_calc_ba_range(int hopnum, int bt_level, int unit)
1093 {
1094 u32 step;
1095 int max;
1096 int i;
1097
1098 if (hopnum <= bt_level)
1099 return 0;
1100 /*
1101 * hopnum bt_level range
1102 * 1 0 unit
1103 * ------------
1104 * 2 0 unit * unit
1105 * 2 1 unit
1106 * ------------
1107 * 3 0 unit * unit * unit
1108 * 3 1 unit * unit
1109 * 3 2 unit
1110 */
1111 step = 1;
1112 max = hopnum - bt_level;
1113 for (i = 0; i < max; i++)
1114 step = step * unit;
1115
1116 return step;
1117 }
1118
1119 /*
1120 * calc the root ba entries which could cover all regions
1121 * @regions: buf region array
1122 * @region_cnt: array size of @regions
1123 * @unit: ba entries per bt page
1124 */
hns_roce_hem_list_calc_root_ba(const struct hns_roce_buf_region * regions,int region_cnt,int unit)1125 int hns_roce_hem_list_calc_root_ba(const struct hns_roce_buf_region *regions,
1126 int region_cnt, int unit)
1127 {
1128 struct hns_roce_buf_region *r;
1129 int total = 0;
1130 int step;
1131 int i;
1132
1133 for (i = 0; i < region_cnt; i++) {
1134 r = (struct hns_roce_buf_region *)®ions[i];
1135 if (r->hopnum > 1) {
1136 step = hem_list_calc_ba_range(r->hopnum, 1, unit);
1137 if (step > 0)
1138 total += (r->count + step - 1) / step;
1139 } else {
1140 total += r->count;
1141 }
1142 }
1143
1144 return total;
1145 }
1146
hem_list_alloc_mid_bt(struct hns_roce_dev * hr_dev,const struct hns_roce_buf_region * r,int unit,int offset,struct list_head * mid_bt,struct list_head * btm_bt)1147 static int hem_list_alloc_mid_bt(struct hns_roce_dev *hr_dev,
1148 const struct hns_roce_buf_region *r, int unit,
1149 int offset, struct list_head *mid_bt,
1150 struct list_head *btm_bt)
1151 {
1152 struct hns_roce_hem_item *hem_ptrs[HNS_ROCE_MAX_BT_LEVEL] = { NULL };
1153 struct list_head temp_list[HNS_ROCE_MAX_BT_LEVEL];
1154 struct hns_roce_hem_item *cur, *pre;
1155 const int hopnum = r->hopnum;
1156 int start_aligned;
1157 int distance;
1158 int ret = 0;
1159 int max_ofs;
1160 int level;
1161 u32 step;
1162 int end;
1163
1164 if (hopnum <= 1)
1165 return 0;
1166
1167 if (hopnum > HNS_ROCE_MAX_BT_LEVEL) {
1168 dev_err(hr_dev->dev, "invalid hopnum %d!\n", hopnum);
1169 return -EINVAL;
1170 }
1171
1172 if (offset < r->offset) {
1173 dev_err(hr_dev->dev, "invalid offset %d, min %u!\n",
1174 offset, r->offset);
1175 return -EINVAL;
1176 }
1177
1178 distance = offset - r->offset;
1179 max_ofs = r->offset + r->count - 1;
1180 for (level = 0; level < hopnum; level++)
1181 INIT_LIST_HEAD(&temp_list[level]);
1182
1183 /* config L1 bt to last bt and link them to corresponding parent */
1184 for (level = 1; level < hopnum; level++) {
1185 cur = hem_list_search_item(&mid_bt[level], offset);
1186 if (cur) {
1187 hem_ptrs[level] = cur;
1188 continue;
1189 }
1190
1191 step = hem_list_calc_ba_range(hopnum, level, unit);
1192 if (step < 1) {
1193 ret = -EINVAL;
1194 goto err_exit;
1195 }
1196
1197 start_aligned = (distance / step) * step + r->offset;
1198 end = min_t(int, start_aligned + step - 1, max_ofs);
1199 cur = hem_list_alloc_item(hr_dev, start_aligned, end, unit,
1200 true, level);
1201 if (!cur) {
1202 ret = -ENOMEM;
1203 goto err_exit;
1204 }
1205 hem_ptrs[level] = cur;
1206 list_add(&cur->list, &temp_list[level]);
1207 if (hem_list_is_bottom_bt(hopnum, level))
1208 list_add(&cur->sibling, &temp_list[0]);
1209
1210 /* link bt to parent bt */
1211 if (level > 1) {
1212 pre = hem_ptrs[level - 1];
1213 step = (cur->start - pre->start) / step * BA_BYTE_LEN;
1214 hem_list_link_bt(hr_dev, pre->addr + step,
1215 cur->dma_addr);
1216 }
1217 }
1218
1219 list_splice(&temp_list[0], btm_bt);
1220 for (level = 1; level < hopnum; level++)
1221 list_splice(&temp_list[level], &mid_bt[level]);
1222
1223 return 0;
1224
1225 err_exit:
1226 for (level = 1; level < hopnum; level++)
1227 hem_list_free_all(hr_dev, &temp_list[level], true);
1228
1229 return ret;
1230 }
1231
1232 static struct hns_roce_hem_item *
alloc_root_hem(struct hns_roce_dev * hr_dev,int unit,int * max_ba_num,const struct hns_roce_buf_region * regions,int region_cnt)1233 alloc_root_hem(struct hns_roce_dev *hr_dev, int unit, int *max_ba_num,
1234 const struct hns_roce_buf_region *regions, int region_cnt)
1235 {
1236 const struct hns_roce_buf_region *r;
1237 struct hns_roce_hem_item *hem;
1238 int ba_num;
1239 int offset;
1240
1241 ba_num = hns_roce_hem_list_calc_root_ba(regions, region_cnt, unit);
1242 if (ba_num < 1)
1243 return ERR_PTR(-ENOMEM);
1244
1245 if (ba_num > unit)
1246 return ERR_PTR(-ENOBUFS);
1247
1248 offset = regions[0].offset;
1249 /* indicate to last region */
1250 r = ®ions[region_cnt - 1];
1251 hem = hem_list_alloc_item(hr_dev, offset, r->offset + r->count - 1,
1252 ba_num, true, 0);
1253 if (!hem)
1254 return ERR_PTR(-ENOMEM);
1255
1256 *max_ba_num = ba_num;
1257
1258 return hem;
1259 }
1260
alloc_fake_root_bt(struct hns_roce_dev * hr_dev,void * cpu_base,u64 phy_base,const struct hns_roce_buf_region * r,struct list_head * branch_head,struct list_head * leaf_head)1261 static int alloc_fake_root_bt(struct hns_roce_dev *hr_dev, void *cpu_base,
1262 u64 phy_base, const struct hns_roce_buf_region *r,
1263 struct list_head *branch_head,
1264 struct list_head *leaf_head)
1265 {
1266 struct hns_roce_hem_item *hem;
1267
1268 hem = hem_list_alloc_item(hr_dev, r->offset, r->offset + r->count - 1,
1269 r->count, false, 0);
1270 if (!hem)
1271 return -ENOMEM;
1272
1273 hem_list_assign_bt(hr_dev, hem, cpu_base, phy_base);
1274 list_add(&hem->list, branch_head);
1275 list_add(&hem->sibling, leaf_head);
1276
1277 return r->count;
1278 }
1279
setup_middle_bt(struct hns_roce_dev * hr_dev,void * cpu_base,int unit,const struct hns_roce_buf_region * r,const struct list_head * branch_head)1280 static int setup_middle_bt(struct hns_roce_dev *hr_dev, void *cpu_base,
1281 int unit, const struct hns_roce_buf_region *r,
1282 const struct list_head *branch_head)
1283 {
1284 struct hns_roce_hem_item *hem, *temp_hem;
1285 int total = 0;
1286 int offset;
1287 int step;
1288
1289 step = hem_list_calc_ba_range(r->hopnum, 1, unit);
1290 if (step < 1)
1291 return -EINVAL;
1292
1293 /* if exist mid bt, link L1 to L0 */
1294 list_for_each_entry_safe(hem, temp_hem, branch_head, list) {
1295 offset = (hem->start - r->offset) / step * BA_BYTE_LEN;
1296 hem_list_link_bt(hr_dev, cpu_base + offset, hem->dma_addr);
1297 total++;
1298 }
1299
1300 return total;
1301 }
1302
1303 static int
setup_root_hem(struct hns_roce_dev * hr_dev,struct hns_roce_hem_list * hem_list,int unit,int max_ba_num,struct hns_roce_hem_head * head,const struct hns_roce_buf_region * regions,int region_cnt)1304 setup_root_hem(struct hns_roce_dev *hr_dev, struct hns_roce_hem_list *hem_list,
1305 int unit, int max_ba_num, struct hns_roce_hem_head *head,
1306 const struct hns_roce_buf_region *regions, int region_cnt)
1307 {
1308 const struct hns_roce_buf_region *r;
1309 struct hns_roce_hem_item *root_hem;
1310 void *cpu_base;
1311 u64 phy_base;
1312 int i, total;
1313 int ret;
1314
1315 root_hem = list_first_entry(&head->root,
1316 struct hns_roce_hem_item, list);
1317 if (!root_hem)
1318 return -ENOMEM;
1319
1320 total = 0;
1321 for (i = 0; i < region_cnt && total < max_ba_num; i++) {
1322 r = ®ions[i];
1323 if (!r->count)
1324 continue;
1325
1326 /* all regions's mid[x][0] shared the root_bt's trunk */
1327 cpu_base = root_hem->addr + total * BA_BYTE_LEN;
1328 phy_base = root_hem->dma_addr + total * BA_BYTE_LEN;
1329
1330 /* if hopnum is 0 or 1, cut a new fake hem from the root bt
1331 * which's address share to all regions.
1332 */
1333 if (hem_list_is_bottom_bt(r->hopnum, 0))
1334 ret = alloc_fake_root_bt(hr_dev, cpu_base, phy_base, r,
1335 &head->branch[i], &head->leaf);
1336 else
1337 ret = setup_middle_bt(hr_dev, cpu_base, unit, r,
1338 &hem_list->mid_bt[i][1]);
1339
1340 if (ret < 0)
1341 return ret;
1342
1343 total += ret;
1344 }
1345
1346 list_splice(&head->leaf, &hem_list->btm_bt);
1347 list_splice(&head->root, &hem_list->root_bt);
1348 for (i = 0; i < region_cnt; i++)
1349 list_splice(&head->branch[i], &hem_list->mid_bt[i][0]);
1350
1351 return 0;
1352 }
1353
hem_list_alloc_root_bt(struct hns_roce_dev * hr_dev,struct hns_roce_hem_list * hem_list,int unit,const struct hns_roce_buf_region * regions,int region_cnt)1354 static int hem_list_alloc_root_bt(struct hns_roce_dev *hr_dev,
1355 struct hns_roce_hem_list *hem_list, int unit,
1356 const struct hns_roce_buf_region *regions,
1357 int region_cnt)
1358 {
1359 struct hns_roce_hem_item *root_hem;
1360 struct hns_roce_hem_head head;
1361 int max_ba_num;
1362 int ret;
1363 int i;
1364
1365 root_hem = hem_list_search_item(&hem_list->root_bt, regions[0].offset);
1366 if (root_hem)
1367 return 0;
1368
1369 max_ba_num = 0;
1370 root_hem = alloc_root_hem(hr_dev, unit, &max_ba_num, regions,
1371 region_cnt);
1372 if (IS_ERR(root_hem))
1373 return PTR_ERR(root_hem);
1374
1375 /* List head for storing all allocated HEM items */
1376 INIT_LIST_HEAD(&head.root);
1377 INIT_LIST_HEAD(&head.leaf);
1378 for (i = 0; i < region_cnt; i++)
1379 INIT_LIST_HEAD(&head.branch[i]);
1380
1381 hem_list->root_ba = root_hem->dma_addr;
1382 list_add(&root_hem->list, &head.root);
1383 ret = setup_root_hem(hr_dev, hem_list, unit, max_ba_num, &head, regions,
1384 region_cnt);
1385 if (ret) {
1386 for (i = 0; i < region_cnt; i++)
1387 hem_list_free_all(hr_dev, &head.branch[i], false);
1388
1389 hem_list_free_all(hr_dev, &head.root, true);
1390 }
1391
1392 return ret;
1393 }
1394
1395 /* construct the base address table and link them by address hop config */
hns_roce_hem_list_request(struct hns_roce_dev * hr_dev,struct hns_roce_hem_list * hem_list,const struct hns_roce_buf_region * regions,int region_cnt,unsigned int bt_pg_shift)1396 int hns_roce_hem_list_request(struct hns_roce_dev *hr_dev,
1397 struct hns_roce_hem_list *hem_list,
1398 const struct hns_roce_buf_region *regions,
1399 int region_cnt, unsigned int bt_pg_shift)
1400 {
1401 const struct hns_roce_buf_region *r;
1402 int ofs, end;
1403 int unit;
1404 int ret;
1405 int i;
1406
1407 if (region_cnt > HNS_ROCE_MAX_BT_REGION) {
1408 dev_err(hr_dev->dev, "invalid region region_cnt %d!\n",
1409 region_cnt);
1410 return -EINVAL;
1411 }
1412
1413 unit = (1 << bt_pg_shift) / BA_BYTE_LEN;
1414 for (i = 0; i < region_cnt; i++) {
1415 r = ®ions[i];
1416 if (!r->count)
1417 continue;
1418
1419 end = r->offset + r->count;
1420 for (ofs = r->offset; ofs < end; ofs += unit) {
1421 ret = hem_list_alloc_mid_bt(hr_dev, r, unit, ofs,
1422 hem_list->mid_bt[i],
1423 &hem_list->btm_bt);
1424 if (ret) {
1425 dev_err(hr_dev->dev,
1426 "alloc hem trunk fail ret=%d!\n", ret);
1427 goto err_alloc;
1428 }
1429 }
1430 }
1431
1432 ret = hem_list_alloc_root_bt(hr_dev, hem_list, unit, regions,
1433 region_cnt);
1434 if (ret)
1435 dev_err(hr_dev->dev, "alloc hem root fail ret=%d!\n", ret);
1436 else
1437 return 0;
1438
1439 err_alloc:
1440 hns_roce_hem_list_release(hr_dev, hem_list);
1441
1442 return ret;
1443 }
1444
hns_roce_hem_list_release(struct hns_roce_dev * hr_dev,struct hns_roce_hem_list * hem_list)1445 void hns_roce_hem_list_release(struct hns_roce_dev *hr_dev,
1446 struct hns_roce_hem_list *hem_list)
1447 {
1448 int i, j;
1449
1450 for (i = 0; i < HNS_ROCE_MAX_BT_REGION; i++)
1451 for (j = 0; j < HNS_ROCE_MAX_BT_LEVEL; j++)
1452 hem_list_free_all(hr_dev, &hem_list->mid_bt[i][j],
1453 j != 0);
1454
1455 hem_list_free_all(hr_dev, &hem_list->root_bt, true);
1456 INIT_LIST_HEAD(&hem_list->btm_bt);
1457 hem_list->root_ba = 0;
1458 }
1459
hns_roce_hem_list_init(struct hns_roce_hem_list * hem_list)1460 void hns_roce_hem_list_init(struct hns_roce_hem_list *hem_list)
1461 {
1462 int i, j;
1463
1464 INIT_LIST_HEAD(&hem_list->root_bt);
1465 INIT_LIST_HEAD(&hem_list->btm_bt);
1466 for (i = 0; i < HNS_ROCE_MAX_BT_REGION; i++)
1467 for (j = 0; j < HNS_ROCE_MAX_BT_LEVEL; j++)
1468 INIT_LIST_HEAD(&hem_list->mid_bt[i][j]);
1469 }
1470
hns_roce_hem_list_find_mtt(struct hns_roce_dev * hr_dev,struct hns_roce_hem_list * hem_list,int offset,int * mtt_cnt,u64 * phy_addr)1471 void *hns_roce_hem_list_find_mtt(struct hns_roce_dev *hr_dev,
1472 struct hns_roce_hem_list *hem_list,
1473 int offset, int *mtt_cnt, u64 *phy_addr)
1474 {
1475 struct list_head *head = &hem_list->btm_bt;
1476 struct hns_roce_hem_item *hem, *temp_hem;
1477 void *cpu_base = NULL;
1478 u64 phy_base = 0;
1479 int nr = 0;
1480
1481 list_for_each_entry_safe(hem, temp_hem, head, sibling) {
1482 if (hem_list_page_is_in_range(hem, offset)) {
1483 nr = offset - hem->start;
1484 cpu_base = hem->addr + nr * BA_BYTE_LEN;
1485 phy_base = hem->dma_addr + nr * BA_BYTE_LEN;
1486 nr = hem->end + 1 - offset;
1487 break;
1488 }
1489 }
1490
1491 if (mtt_cnt)
1492 *mtt_cnt = nr;
1493
1494 if (phy_addr)
1495 *phy_addr = phy_base;
1496
1497 return cpu_base;
1498 }
1499