1 /*
2 * Copyright (c) 2016 Hisilicon Limited.
3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34 #include <linux/platform_device.h>
35 #include "hns_roce_device.h"
36 #include "hns_roce_hem.h"
37 #include "hns_roce_common.h"
38
39 #define DMA_ADDR_T_SHIFT 12
40 #define BT_BA_SHIFT 32
41
42 #define HEM_INDEX_BUF BIT(0)
43 #define HEM_INDEX_L0 BIT(1)
44 #define HEM_INDEX_L1 BIT(2)
45 struct hns_roce_hem_index {
46 u64 buf;
47 u64 l0;
48 u64 l1;
49 u32 inited; /* indicate which index is available */
50 };
51
hns_roce_check_whether_mhop(struct hns_roce_dev * hr_dev,u32 type)52 bool hns_roce_check_whether_mhop(struct hns_roce_dev *hr_dev, u32 type)
53 {
54 int hop_num = 0;
55
56 switch (type) {
57 case HEM_TYPE_QPC:
58 hop_num = hr_dev->caps.qpc_hop_num;
59 break;
60 case HEM_TYPE_MTPT:
61 hop_num = hr_dev->caps.mpt_hop_num;
62 break;
63 case HEM_TYPE_CQC:
64 hop_num = hr_dev->caps.cqc_hop_num;
65 break;
66 case HEM_TYPE_SRQC:
67 hop_num = hr_dev->caps.srqc_hop_num;
68 break;
69 case HEM_TYPE_SCCC:
70 hop_num = hr_dev->caps.sccc_hop_num;
71 break;
72 case HEM_TYPE_QPC_TIMER:
73 hop_num = hr_dev->caps.qpc_timer_hop_num;
74 break;
75 case HEM_TYPE_CQC_TIMER:
76 hop_num = hr_dev->caps.cqc_timer_hop_num;
77 break;
78 default:
79 return false;
80 }
81
82 return hop_num ? true : false;
83 }
84
hns_roce_check_hem_null(struct hns_roce_hem ** hem,u64 hem_idx,u32 bt_chunk_num,u64 hem_max_num)85 static bool hns_roce_check_hem_null(struct hns_roce_hem **hem, u64 hem_idx,
86 u32 bt_chunk_num, u64 hem_max_num)
87 {
88 u64 start_idx = round_down(hem_idx, bt_chunk_num);
89 u64 check_max_num = start_idx + bt_chunk_num;
90 u64 i;
91
92 for (i = start_idx; (i < check_max_num) && (i < hem_max_num); i++)
93 if (i != hem_idx && hem[i])
94 return false;
95
96 return true;
97 }
98
hns_roce_check_bt_null(u64 ** bt,u64 ba_idx,u32 bt_chunk_num)99 static bool hns_roce_check_bt_null(u64 **bt, u64 ba_idx, u32 bt_chunk_num)
100 {
101 u64 start_idx = round_down(ba_idx, bt_chunk_num);
102 int i;
103
104 for (i = 0; i < bt_chunk_num; i++)
105 if (i != ba_idx && bt[start_idx + i])
106 return false;
107
108 return true;
109 }
110
hns_roce_get_bt_num(u32 table_type,u32 hop_num)111 static int hns_roce_get_bt_num(u32 table_type, u32 hop_num)
112 {
113 if (check_whether_bt_num_3(table_type, hop_num))
114 return 3;
115 else if (check_whether_bt_num_2(table_type, hop_num))
116 return 2;
117 else if (check_whether_bt_num_1(table_type, hop_num))
118 return 1;
119 else
120 return 0;
121 }
122
get_hem_table_config(struct hns_roce_dev * hr_dev,struct hns_roce_hem_mhop * mhop,u32 type)123 static int get_hem_table_config(struct hns_roce_dev *hr_dev,
124 struct hns_roce_hem_mhop *mhop,
125 u32 type)
126 {
127 struct device *dev = hr_dev->dev;
128
129 switch (type) {
130 case HEM_TYPE_QPC:
131 mhop->buf_chunk_size = 1 << (hr_dev->caps.qpc_buf_pg_sz
132 + PAGE_SHIFT);
133 mhop->bt_chunk_size = 1 << (hr_dev->caps.qpc_ba_pg_sz
134 + PAGE_SHIFT);
135 mhop->ba_l0_num = hr_dev->caps.qpc_bt_num;
136 mhop->hop_num = hr_dev->caps.qpc_hop_num;
137 break;
138 case HEM_TYPE_MTPT:
139 mhop->buf_chunk_size = 1 << (hr_dev->caps.mpt_buf_pg_sz
140 + PAGE_SHIFT);
141 mhop->bt_chunk_size = 1 << (hr_dev->caps.mpt_ba_pg_sz
142 + PAGE_SHIFT);
143 mhop->ba_l0_num = hr_dev->caps.mpt_bt_num;
144 mhop->hop_num = hr_dev->caps.mpt_hop_num;
145 break;
146 case HEM_TYPE_CQC:
147 mhop->buf_chunk_size = 1 << (hr_dev->caps.cqc_buf_pg_sz
148 + PAGE_SHIFT);
149 mhop->bt_chunk_size = 1 << (hr_dev->caps.cqc_ba_pg_sz
150 + PAGE_SHIFT);
151 mhop->ba_l0_num = hr_dev->caps.cqc_bt_num;
152 mhop->hop_num = hr_dev->caps.cqc_hop_num;
153 break;
154 case HEM_TYPE_SCCC:
155 mhop->buf_chunk_size = 1 << (hr_dev->caps.sccc_buf_pg_sz
156 + PAGE_SHIFT);
157 mhop->bt_chunk_size = 1 << (hr_dev->caps.sccc_ba_pg_sz
158 + PAGE_SHIFT);
159 mhop->ba_l0_num = hr_dev->caps.sccc_bt_num;
160 mhop->hop_num = hr_dev->caps.sccc_hop_num;
161 break;
162 case HEM_TYPE_QPC_TIMER:
163 mhop->buf_chunk_size = 1 << (hr_dev->caps.qpc_timer_buf_pg_sz
164 + PAGE_SHIFT);
165 mhop->bt_chunk_size = 1 << (hr_dev->caps.qpc_timer_ba_pg_sz
166 + PAGE_SHIFT);
167 mhop->ba_l0_num = hr_dev->caps.qpc_timer_bt_num;
168 mhop->hop_num = hr_dev->caps.qpc_timer_hop_num;
169 break;
170 case HEM_TYPE_CQC_TIMER:
171 mhop->buf_chunk_size = 1 << (hr_dev->caps.cqc_timer_buf_pg_sz
172 + PAGE_SHIFT);
173 mhop->bt_chunk_size = 1 << (hr_dev->caps.cqc_timer_ba_pg_sz
174 + PAGE_SHIFT);
175 mhop->ba_l0_num = hr_dev->caps.cqc_timer_bt_num;
176 mhop->hop_num = hr_dev->caps.cqc_timer_hop_num;
177 break;
178 case HEM_TYPE_SRQC:
179 mhop->buf_chunk_size = 1 << (hr_dev->caps.srqc_buf_pg_sz
180 + PAGE_SHIFT);
181 mhop->bt_chunk_size = 1 << (hr_dev->caps.srqc_ba_pg_sz
182 + PAGE_SHIFT);
183 mhop->ba_l0_num = hr_dev->caps.srqc_bt_num;
184 mhop->hop_num = hr_dev->caps.srqc_hop_num;
185 break;
186 default:
187 dev_err(dev, "table %u not support multi-hop addressing!\n",
188 type);
189 return -EINVAL;
190 }
191
192 return 0;
193 }
194
hns_roce_calc_hem_mhop(struct hns_roce_dev * hr_dev,struct hns_roce_hem_table * table,unsigned long * obj,struct hns_roce_hem_mhop * mhop)195 int hns_roce_calc_hem_mhop(struct hns_roce_dev *hr_dev,
196 struct hns_roce_hem_table *table, unsigned long *obj,
197 struct hns_roce_hem_mhop *mhop)
198 {
199 struct device *dev = hr_dev->dev;
200 u32 chunk_ba_num;
201 u32 table_idx;
202 u32 bt_num;
203 u32 chunk_size;
204
205 if (get_hem_table_config(hr_dev, mhop, table->type))
206 return -EINVAL;
207
208 if (!obj)
209 return 0;
210
211 /*
212 * QPC/MTPT/CQC/SRQC/SCCC alloc hem for buffer pages.
213 * MTT/CQE alloc hem for bt pages.
214 */
215 bt_num = hns_roce_get_bt_num(table->type, mhop->hop_num);
216 chunk_ba_num = mhop->bt_chunk_size / BA_BYTE_LEN;
217 chunk_size = table->type < HEM_TYPE_MTT ? mhop->buf_chunk_size :
218 mhop->bt_chunk_size;
219 table_idx = (*obj & (table->num_obj - 1)) /
220 (chunk_size / table->obj_size);
221 switch (bt_num) {
222 case 3:
223 mhop->l2_idx = table_idx & (chunk_ba_num - 1);
224 mhop->l1_idx = table_idx / chunk_ba_num & (chunk_ba_num - 1);
225 mhop->l0_idx = (table_idx / chunk_ba_num) / chunk_ba_num;
226 break;
227 case 2:
228 mhop->l1_idx = table_idx & (chunk_ba_num - 1);
229 mhop->l0_idx = table_idx / chunk_ba_num;
230 break;
231 case 1:
232 mhop->l0_idx = table_idx;
233 break;
234 default:
235 dev_err(dev, "table %u not support hop_num = %u!\n",
236 table->type, mhop->hop_num);
237 return -EINVAL;
238 }
239 if (mhop->l0_idx >= mhop->ba_l0_num)
240 mhop->l0_idx %= mhop->ba_l0_num;
241
242 return 0;
243 }
244
hns_roce_alloc_hem(struct hns_roce_dev * hr_dev,int npages,unsigned long hem_alloc_size,gfp_t gfp_mask)245 static struct hns_roce_hem *hns_roce_alloc_hem(struct hns_roce_dev *hr_dev,
246 int npages,
247 unsigned long hem_alloc_size,
248 gfp_t gfp_mask)
249 {
250 struct hns_roce_hem_chunk *chunk = NULL;
251 struct hns_roce_hem *hem;
252 struct scatterlist *mem;
253 int order;
254 void *buf;
255
256 WARN_ON(gfp_mask & __GFP_HIGHMEM);
257
258 hem = kmalloc(sizeof(*hem),
259 gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
260 if (!hem)
261 return NULL;
262
263 hem->refcount = 0;
264 INIT_LIST_HEAD(&hem->chunk_list);
265
266 order = get_order(hem_alloc_size);
267
268 while (npages > 0) {
269 if (!chunk) {
270 chunk = kmalloc(sizeof(*chunk),
271 gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
272 if (!chunk)
273 goto fail;
274
275 sg_init_table(chunk->mem, HNS_ROCE_HEM_CHUNK_LEN);
276 chunk->npages = 0;
277 chunk->nsg = 0;
278 memset(chunk->buf, 0, sizeof(chunk->buf));
279 list_add_tail(&chunk->list, &hem->chunk_list);
280 }
281
282 while (1 << order > npages)
283 --order;
284
285 /*
286 * Alloc memory one time. If failed, don't alloc small block
287 * memory, directly return fail.
288 */
289 mem = &chunk->mem[chunk->npages];
290 buf = dma_alloc_coherent(hr_dev->dev, PAGE_SIZE << order,
291 &sg_dma_address(mem), gfp_mask);
292 if (!buf)
293 goto fail;
294
295 chunk->buf[chunk->npages] = buf;
296 sg_dma_len(mem) = PAGE_SIZE << order;
297
298 ++chunk->npages;
299 ++chunk->nsg;
300 npages -= 1 << order;
301 }
302
303 return hem;
304
305 fail:
306 hns_roce_free_hem(hr_dev, hem);
307 return NULL;
308 }
309
hns_roce_free_hem(struct hns_roce_dev * hr_dev,struct hns_roce_hem * hem)310 void hns_roce_free_hem(struct hns_roce_dev *hr_dev, struct hns_roce_hem *hem)
311 {
312 struct hns_roce_hem_chunk *chunk, *tmp;
313 int i;
314
315 if (!hem)
316 return;
317
318 list_for_each_entry_safe(chunk, tmp, &hem->chunk_list, list) {
319 for (i = 0; i < chunk->npages; ++i)
320 dma_free_coherent(hr_dev->dev,
321 sg_dma_len(&chunk->mem[i]),
322 chunk->buf[i],
323 sg_dma_address(&chunk->mem[i]));
324 kfree(chunk);
325 }
326
327 kfree(hem);
328 }
329
hns_roce_set_hem(struct hns_roce_dev * hr_dev,struct hns_roce_hem_table * table,unsigned long obj)330 static int hns_roce_set_hem(struct hns_roce_dev *hr_dev,
331 struct hns_roce_hem_table *table, unsigned long obj)
332 {
333 spinlock_t *lock = &hr_dev->bt_cmd_lock;
334 struct device *dev = hr_dev->dev;
335 long end;
336 unsigned long flags;
337 struct hns_roce_hem_iter iter;
338 void __iomem *bt_cmd;
339 __le32 bt_cmd_val[2];
340 __le32 bt_cmd_h = 0;
341 __le32 bt_cmd_l;
342 u64 bt_ba;
343 int ret = 0;
344
345 /* Find the HEM(Hardware Entry Memory) entry */
346 unsigned long i = (obj & (table->num_obj - 1)) /
347 (table->table_chunk_size / table->obj_size);
348
349 switch (table->type) {
350 case HEM_TYPE_QPC:
351 case HEM_TYPE_MTPT:
352 case HEM_TYPE_CQC:
353 case HEM_TYPE_SRQC:
354 roce_set_field(bt_cmd_h, ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
355 ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, table->type);
356 break;
357 default:
358 return ret;
359 }
360
361 roce_set_field(bt_cmd_h, ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_M,
362 ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_S, obj);
363 roce_set_bit(bt_cmd_h, ROCEE_BT_CMD_H_ROCEE_BT_CMD_S, 0);
364 roce_set_bit(bt_cmd_h, ROCEE_BT_CMD_H_ROCEE_BT_CMD_HW_SYNS_S, 1);
365
366 /* Currently iter only a chunk */
367 for (hns_roce_hem_first(table->hem[i], &iter);
368 !hns_roce_hem_last(&iter); hns_roce_hem_next(&iter)) {
369 bt_ba = hns_roce_hem_addr(&iter) >> DMA_ADDR_T_SHIFT;
370
371 spin_lock_irqsave(lock, flags);
372
373 bt_cmd = hr_dev->reg_base + ROCEE_BT_CMD_H_REG;
374
375 end = HW_SYNC_TIMEOUT_MSECS;
376 while (end > 0) {
377 if (!(readl(bt_cmd) >> BT_CMD_SYNC_SHIFT))
378 break;
379
380 mdelay(HW_SYNC_SLEEP_TIME_INTERVAL);
381 end -= HW_SYNC_SLEEP_TIME_INTERVAL;
382 }
383
384 if (end <= 0) {
385 dev_err(dev, "Write bt_cmd err,hw_sync is not zero.\n");
386 spin_unlock_irqrestore(lock, flags);
387 return -EBUSY;
388 }
389
390 bt_cmd_l = cpu_to_le32(bt_ba);
391 roce_set_field(bt_cmd_h, ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_M,
392 ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_S,
393 bt_ba >> BT_BA_SHIFT);
394
395 bt_cmd_val[0] = bt_cmd_l;
396 bt_cmd_val[1] = bt_cmd_h;
397 hns_roce_write64_k(bt_cmd_val,
398 hr_dev->reg_base + ROCEE_BT_CMD_L_REG);
399 spin_unlock_irqrestore(lock, flags);
400 }
401
402 return ret;
403 }
404
calc_hem_config(struct hns_roce_dev * hr_dev,struct hns_roce_hem_table * table,unsigned long obj,struct hns_roce_hem_mhop * mhop,struct hns_roce_hem_index * index)405 static int calc_hem_config(struct hns_roce_dev *hr_dev,
406 struct hns_roce_hem_table *table, unsigned long obj,
407 struct hns_roce_hem_mhop *mhop,
408 struct hns_roce_hem_index *index)
409 {
410 struct ib_device *ibdev = &hr_dev->ib_dev;
411 unsigned long mhop_obj = obj;
412 u32 l0_idx, l1_idx, l2_idx;
413 u32 chunk_ba_num;
414 u32 bt_num;
415 int ret;
416
417 ret = hns_roce_calc_hem_mhop(hr_dev, table, &mhop_obj, mhop);
418 if (ret)
419 return ret;
420
421 l0_idx = mhop->l0_idx;
422 l1_idx = mhop->l1_idx;
423 l2_idx = mhop->l2_idx;
424 chunk_ba_num = mhop->bt_chunk_size / BA_BYTE_LEN;
425 bt_num = hns_roce_get_bt_num(table->type, mhop->hop_num);
426 switch (bt_num) {
427 case 3:
428 index->l1 = l0_idx * chunk_ba_num + l1_idx;
429 index->l0 = l0_idx;
430 index->buf = l0_idx * chunk_ba_num * chunk_ba_num +
431 l1_idx * chunk_ba_num + l2_idx;
432 break;
433 case 2:
434 index->l0 = l0_idx;
435 index->buf = l0_idx * chunk_ba_num + l1_idx;
436 break;
437 case 1:
438 index->buf = l0_idx;
439 break;
440 default:
441 ibdev_err(ibdev, "table %u not support mhop.hop_num = %u!\n",
442 table->type, mhop->hop_num);
443 return -EINVAL;
444 }
445
446 if (unlikely(index->buf >= table->num_hem)) {
447 ibdev_err(ibdev, "table %u exceed hem limt idx %llu, max %lu!\n",
448 table->type, index->buf, table->num_hem);
449 return -EINVAL;
450 }
451
452 return 0;
453 }
454
free_mhop_hem(struct hns_roce_dev * hr_dev,struct hns_roce_hem_table * table,struct hns_roce_hem_mhop * mhop,struct hns_roce_hem_index * index)455 static void free_mhop_hem(struct hns_roce_dev *hr_dev,
456 struct hns_roce_hem_table *table,
457 struct hns_roce_hem_mhop *mhop,
458 struct hns_roce_hem_index *index)
459 {
460 u32 bt_size = mhop->bt_chunk_size;
461 struct device *dev = hr_dev->dev;
462
463 if (index->inited & HEM_INDEX_BUF) {
464 hns_roce_free_hem(hr_dev, table->hem[index->buf]);
465 table->hem[index->buf] = NULL;
466 }
467
468 if (index->inited & HEM_INDEX_L1) {
469 dma_free_coherent(dev, bt_size, table->bt_l1[index->l1],
470 table->bt_l1_dma_addr[index->l1]);
471 table->bt_l1[index->l1] = NULL;
472 }
473
474 if (index->inited & HEM_INDEX_L0) {
475 dma_free_coherent(dev, bt_size, table->bt_l0[index->l0],
476 table->bt_l0_dma_addr[index->l0]);
477 table->bt_l0[index->l0] = NULL;
478 }
479 }
480
alloc_mhop_hem(struct hns_roce_dev * hr_dev,struct hns_roce_hem_table * table,struct hns_roce_hem_mhop * mhop,struct hns_roce_hem_index * index)481 static int alloc_mhop_hem(struct hns_roce_dev *hr_dev,
482 struct hns_roce_hem_table *table,
483 struct hns_roce_hem_mhop *mhop,
484 struct hns_roce_hem_index *index)
485 {
486 u32 bt_size = mhop->bt_chunk_size;
487 struct device *dev = hr_dev->dev;
488 struct hns_roce_hem_iter iter;
489 gfp_t flag;
490 u64 bt_ba;
491 u32 size;
492 int ret;
493
494 /* alloc L1 BA's chunk */
495 if ((check_whether_bt_num_3(table->type, mhop->hop_num) ||
496 check_whether_bt_num_2(table->type, mhop->hop_num)) &&
497 !table->bt_l0[index->l0]) {
498 table->bt_l0[index->l0] = dma_alloc_coherent(dev, bt_size,
499 &table->bt_l0_dma_addr[index->l0],
500 GFP_KERNEL);
501 if (!table->bt_l0[index->l0]) {
502 ret = -ENOMEM;
503 goto out;
504 }
505 index->inited |= HEM_INDEX_L0;
506 }
507
508 /* alloc L2 BA's chunk */
509 if (check_whether_bt_num_3(table->type, mhop->hop_num) &&
510 !table->bt_l1[index->l1]) {
511 table->bt_l1[index->l1] = dma_alloc_coherent(dev, bt_size,
512 &table->bt_l1_dma_addr[index->l1],
513 GFP_KERNEL);
514 if (!table->bt_l1[index->l1]) {
515 ret = -ENOMEM;
516 goto err_alloc_hem;
517 }
518 index->inited |= HEM_INDEX_L1;
519 *(table->bt_l0[index->l0] + mhop->l1_idx) =
520 table->bt_l1_dma_addr[index->l1];
521 }
522
523 /*
524 * alloc buffer space chunk for QPC/MTPT/CQC/SRQC/SCCC.
525 * alloc bt space chunk for MTT/CQE.
526 */
527 size = table->type < HEM_TYPE_MTT ? mhop->buf_chunk_size : bt_size;
528 flag = (table->lowmem ? GFP_KERNEL : GFP_HIGHUSER) | __GFP_NOWARN;
529 table->hem[index->buf] = hns_roce_alloc_hem(hr_dev, size >> PAGE_SHIFT,
530 size, flag);
531 if (!table->hem[index->buf]) {
532 ret = -ENOMEM;
533 goto err_alloc_hem;
534 }
535
536 index->inited |= HEM_INDEX_BUF;
537 hns_roce_hem_first(table->hem[index->buf], &iter);
538 bt_ba = hns_roce_hem_addr(&iter);
539 if (table->type < HEM_TYPE_MTT) {
540 if (mhop->hop_num == 2)
541 *(table->bt_l1[index->l1] + mhop->l2_idx) = bt_ba;
542 else if (mhop->hop_num == 1)
543 *(table->bt_l0[index->l0] + mhop->l1_idx) = bt_ba;
544 } else if (mhop->hop_num == 2) {
545 *(table->bt_l0[index->l0] + mhop->l1_idx) = bt_ba;
546 }
547
548 return 0;
549 err_alloc_hem:
550 free_mhop_hem(hr_dev, table, mhop, index);
551 out:
552 return ret;
553 }
554
set_mhop_hem(struct hns_roce_dev * hr_dev,struct hns_roce_hem_table * table,unsigned long obj,struct hns_roce_hem_mhop * mhop,struct hns_roce_hem_index * index)555 static int set_mhop_hem(struct hns_roce_dev *hr_dev,
556 struct hns_roce_hem_table *table, unsigned long obj,
557 struct hns_roce_hem_mhop *mhop,
558 struct hns_roce_hem_index *index)
559 {
560 struct ib_device *ibdev = &hr_dev->ib_dev;
561 int step_idx;
562 int ret = 0;
563
564 if (index->inited & HEM_INDEX_L0) {
565 ret = hr_dev->hw->set_hem(hr_dev, table, obj, 0);
566 if (ret) {
567 ibdev_err(ibdev, "set HEM step 0 failed!\n");
568 goto out;
569 }
570 }
571
572 if (index->inited & HEM_INDEX_L1) {
573 ret = hr_dev->hw->set_hem(hr_dev, table, obj, 1);
574 if (ret) {
575 ibdev_err(ibdev, "set HEM step 1 failed!\n");
576 goto out;
577 }
578 }
579
580 if (index->inited & HEM_INDEX_BUF) {
581 if (mhop->hop_num == HNS_ROCE_HOP_NUM_0)
582 step_idx = 0;
583 else
584 step_idx = mhop->hop_num;
585 ret = hr_dev->hw->set_hem(hr_dev, table, obj, step_idx);
586 if (ret)
587 ibdev_err(ibdev, "set HEM step last failed!\n");
588 }
589 out:
590 return ret;
591 }
592
hns_roce_table_mhop_get(struct hns_roce_dev * hr_dev,struct hns_roce_hem_table * table,unsigned long obj)593 static int hns_roce_table_mhop_get(struct hns_roce_dev *hr_dev,
594 struct hns_roce_hem_table *table,
595 unsigned long obj)
596 {
597 struct ib_device *ibdev = &hr_dev->ib_dev;
598 struct hns_roce_hem_index index = {};
599 struct hns_roce_hem_mhop mhop = {};
600 int ret;
601
602 ret = calc_hem_config(hr_dev, table, obj, &mhop, &index);
603 if (ret) {
604 ibdev_err(ibdev, "calc hem config failed!\n");
605 return ret;
606 }
607
608 mutex_lock(&table->mutex);
609 if (table->hem[index.buf]) {
610 ++table->hem[index.buf]->refcount;
611 goto out;
612 }
613
614 ret = alloc_mhop_hem(hr_dev, table, &mhop, &index);
615 if (ret) {
616 ibdev_err(ibdev, "alloc mhop hem failed!\n");
617 goto out;
618 }
619
620 /* set HEM base address to hardware */
621 if (table->type < HEM_TYPE_MTT) {
622 ret = set_mhop_hem(hr_dev, table, obj, &mhop, &index);
623 if (ret) {
624 ibdev_err(ibdev, "set HEM address to HW failed!\n");
625 goto err_alloc;
626 }
627 }
628
629 ++table->hem[index.buf]->refcount;
630 goto out;
631
632 err_alloc:
633 free_mhop_hem(hr_dev, table, &mhop, &index);
634 out:
635 mutex_unlock(&table->mutex);
636 return ret;
637 }
638
hns_roce_table_get(struct hns_roce_dev * hr_dev,struct hns_roce_hem_table * table,unsigned long obj)639 int hns_roce_table_get(struct hns_roce_dev *hr_dev,
640 struct hns_roce_hem_table *table, unsigned long obj)
641 {
642 struct device *dev = hr_dev->dev;
643 int ret = 0;
644 unsigned long i;
645
646 if (hns_roce_check_whether_mhop(hr_dev, table->type))
647 return hns_roce_table_mhop_get(hr_dev, table, obj);
648
649 i = (obj & (table->num_obj - 1)) / (table->table_chunk_size /
650 table->obj_size);
651
652 mutex_lock(&table->mutex);
653
654 if (table->hem[i]) {
655 ++table->hem[i]->refcount;
656 goto out;
657 }
658
659 table->hem[i] = hns_roce_alloc_hem(hr_dev,
660 table->table_chunk_size >> PAGE_SHIFT,
661 table->table_chunk_size,
662 (table->lowmem ? GFP_KERNEL :
663 GFP_HIGHUSER) | __GFP_NOWARN);
664 if (!table->hem[i]) {
665 ret = -ENOMEM;
666 goto out;
667 }
668
669 /* Set HEM base address(128K/page, pa) to Hardware */
670 if (hns_roce_set_hem(hr_dev, table, obj)) {
671 hns_roce_free_hem(hr_dev, table->hem[i]);
672 table->hem[i] = NULL;
673 ret = -ENODEV;
674 dev_err(dev, "set HEM base address to HW failed.\n");
675 goto out;
676 }
677
678 ++table->hem[i]->refcount;
679 out:
680 mutex_unlock(&table->mutex);
681 return ret;
682 }
683
clear_mhop_hem(struct hns_roce_dev * hr_dev,struct hns_roce_hem_table * table,unsigned long obj,struct hns_roce_hem_mhop * mhop,struct hns_roce_hem_index * index)684 static void clear_mhop_hem(struct hns_roce_dev *hr_dev,
685 struct hns_roce_hem_table *table, unsigned long obj,
686 struct hns_roce_hem_mhop *mhop,
687 struct hns_roce_hem_index *index)
688 {
689 struct ib_device *ibdev = &hr_dev->ib_dev;
690 u32 hop_num = mhop->hop_num;
691 u32 chunk_ba_num;
692 int step_idx;
693
694 index->inited = HEM_INDEX_BUF;
695 chunk_ba_num = mhop->bt_chunk_size / BA_BYTE_LEN;
696 if (check_whether_bt_num_2(table->type, hop_num)) {
697 if (hns_roce_check_hem_null(table->hem, index->buf,
698 chunk_ba_num, table->num_hem))
699 index->inited |= HEM_INDEX_L0;
700 } else if (check_whether_bt_num_3(table->type, hop_num)) {
701 if (hns_roce_check_hem_null(table->hem, index->buf,
702 chunk_ba_num, table->num_hem)) {
703 index->inited |= HEM_INDEX_L1;
704 if (hns_roce_check_bt_null(table->bt_l1, index->l1,
705 chunk_ba_num))
706 index->inited |= HEM_INDEX_L0;
707 }
708 }
709
710 if (table->type < HEM_TYPE_MTT) {
711 if (hop_num == HNS_ROCE_HOP_NUM_0)
712 step_idx = 0;
713 else
714 step_idx = hop_num;
715
716 if (hr_dev->hw->clear_hem(hr_dev, table, obj, step_idx))
717 ibdev_warn(ibdev, "failed to clear hop%u HEM.\n", hop_num);
718
719 if (index->inited & HEM_INDEX_L1)
720 if (hr_dev->hw->clear_hem(hr_dev, table, obj, 1))
721 ibdev_warn(ibdev, "failed to clear HEM step 1.\n");
722
723 if (index->inited & HEM_INDEX_L0)
724 if (hr_dev->hw->clear_hem(hr_dev, table, obj, 0))
725 ibdev_warn(ibdev, "failed to clear HEM step 0.\n");
726 }
727 }
728
hns_roce_table_mhop_put(struct hns_roce_dev * hr_dev,struct hns_roce_hem_table * table,unsigned long obj,int check_refcount)729 static void hns_roce_table_mhop_put(struct hns_roce_dev *hr_dev,
730 struct hns_roce_hem_table *table,
731 unsigned long obj,
732 int check_refcount)
733 {
734 struct ib_device *ibdev = &hr_dev->ib_dev;
735 struct hns_roce_hem_index index = {};
736 struct hns_roce_hem_mhop mhop = {};
737 int ret;
738
739 ret = calc_hem_config(hr_dev, table, obj, &mhop, &index);
740 if (ret) {
741 ibdev_err(ibdev, "calc hem config failed!\n");
742 return;
743 }
744
745 mutex_lock(&table->mutex);
746 if (check_refcount && (--table->hem[index.buf]->refcount > 0)) {
747 mutex_unlock(&table->mutex);
748 return;
749 }
750
751 clear_mhop_hem(hr_dev, table, obj, &mhop, &index);
752 free_mhop_hem(hr_dev, table, &mhop, &index);
753
754 mutex_unlock(&table->mutex);
755 }
756
hns_roce_table_put(struct hns_roce_dev * hr_dev,struct hns_roce_hem_table * table,unsigned long obj)757 void hns_roce_table_put(struct hns_roce_dev *hr_dev,
758 struct hns_roce_hem_table *table, unsigned long obj)
759 {
760 struct device *dev = hr_dev->dev;
761 unsigned long i;
762
763 if (hns_roce_check_whether_mhop(hr_dev, table->type)) {
764 hns_roce_table_mhop_put(hr_dev, table, obj, 1);
765 return;
766 }
767
768 i = (obj & (table->num_obj - 1)) /
769 (table->table_chunk_size / table->obj_size);
770
771 mutex_lock(&table->mutex);
772
773 if (--table->hem[i]->refcount == 0) {
774 /* Clear HEM base address */
775 if (hr_dev->hw->clear_hem(hr_dev, table, obj, 0))
776 dev_warn(dev, "Clear HEM base address failed.\n");
777
778 hns_roce_free_hem(hr_dev, table->hem[i]);
779 table->hem[i] = NULL;
780 }
781
782 mutex_unlock(&table->mutex);
783 }
784
hns_roce_table_find(struct hns_roce_dev * hr_dev,struct hns_roce_hem_table * table,unsigned long obj,dma_addr_t * dma_handle)785 void *hns_roce_table_find(struct hns_roce_dev *hr_dev,
786 struct hns_roce_hem_table *table,
787 unsigned long obj, dma_addr_t *dma_handle)
788 {
789 struct hns_roce_hem_chunk *chunk;
790 struct hns_roce_hem_mhop mhop;
791 struct hns_roce_hem *hem;
792 void *addr = NULL;
793 unsigned long mhop_obj = obj;
794 unsigned long obj_per_chunk;
795 unsigned long idx_offset;
796 int offset, dma_offset;
797 int length;
798 int i, j;
799 u32 hem_idx = 0;
800
801 if (!table->lowmem)
802 return NULL;
803
804 mutex_lock(&table->mutex);
805
806 if (!hns_roce_check_whether_mhop(hr_dev, table->type)) {
807 obj_per_chunk = table->table_chunk_size / table->obj_size;
808 hem = table->hem[(obj & (table->num_obj - 1)) / obj_per_chunk];
809 idx_offset = (obj & (table->num_obj - 1)) % obj_per_chunk;
810 dma_offset = offset = idx_offset * table->obj_size;
811 } else {
812 u32 seg_size = 64; /* 8 bytes per BA and 8 BA per segment */
813
814 if (hns_roce_calc_hem_mhop(hr_dev, table, &mhop_obj, &mhop))
815 goto out;
816 /* mtt mhop */
817 i = mhop.l0_idx;
818 j = mhop.l1_idx;
819 if (mhop.hop_num == 2)
820 hem_idx = i * (mhop.bt_chunk_size / BA_BYTE_LEN) + j;
821 else if (mhop.hop_num == 1 ||
822 mhop.hop_num == HNS_ROCE_HOP_NUM_0)
823 hem_idx = i;
824
825 hem = table->hem[hem_idx];
826 dma_offset = offset = (obj & (table->num_obj - 1)) * seg_size %
827 mhop.bt_chunk_size;
828 if (mhop.hop_num == 2)
829 dma_offset = offset = 0;
830 }
831
832 if (!hem)
833 goto out;
834
835 list_for_each_entry(chunk, &hem->chunk_list, list) {
836 for (i = 0; i < chunk->npages; ++i) {
837 length = sg_dma_len(&chunk->mem[i]);
838 if (dma_handle && dma_offset >= 0) {
839 if (length > (u32)dma_offset)
840 *dma_handle = sg_dma_address(
841 &chunk->mem[i]) + dma_offset;
842 dma_offset -= length;
843 }
844
845 if (length > (u32)offset) {
846 addr = chunk->buf[i] + offset;
847 goto out;
848 }
849 offset -= length;
850 }
851 }
852
853 out:
854 mutex_unlock(&table->mutex);
855 return addr;
856 }
857
hns_roce_init_hem_table(struct hns_roce_dev * hr_dev,struct hns_roce_hem_table * table,u32 type,unsigned long obj_size,unsigned long nobj,int use_lowmem)858 int hns_roce_init_hem_table(struct hns_roce_dev *hr_dev,
859 struct hns_roce_hem_table *table, u32 type,
860 unsigned long obj_size, unsigned long nobj,
861 int use_lowmem)
862 {
863 unsigned long obj_per_chunk;
864 unsigned long num_hem;
865
866 if (!hns_roce_check_whether_mhop(hr_dev, type)) {
867 table->table_chunk_size = hr_dev->caps.chunk_sz;
868 obj_per_chunk = table->table_chunk_size / obj_size;
869 num_hem = (nobj + obj_per_chunk - 1) / obj_per_chunk;
870
871 table->hem = kcalloc(num_hem, sizeof(*table->hem), GFP_KERNEL);
872 if (!table->hem)
873 return -ENOMEM;
874 } else {
875 struct hns_roce_hem_mhop mhop = {};
876 unsigned long buf_chunk_size;
877 unsigned long bt_chunk_size;
878 unsigned long bt_chunk_num;
879 unsigned long num_bt_l0 = 0;
880 u32 hop_num;
881
882 if (get_hem_table_config(hr_dev, &mhop, type))
883 return -EINVAL;
884
885 buf_chunk_size = mhop.buf_chunk_size;
886 bt_chunk_size = mhop.bt_chunk_size;
887 num_bt_l0 = mhop.ba_l0_num;
888 hop_num = mhop.hop_num;
889
890 obj_per_chunk = buf_chunk_size / obj_size;
891 num_hem = (nobj + obj_per_chunk - 1) / obj_per_chunk;
892 bt_chunk_num = bt_chunk_size / BA_BYTE_LEN;
893 if (type >= HEM_TYPE_MTT)
894 num_bt_l0 = bt_chunk_num;
895
896 table->hem = kcalloc(num_hem, sizeof(*table->hem),
897 GFP_KERNEL);
898 if (!table->hem)
899 goto err_kcalloc_hem_buf;
900
901 if (check_whether_bt_num_3(type, hop_num)) {
902 unsigned long num_bt_l1;
903
904 num_bt_l1 = (num_hem + bt_chunk_num - 1) /
905 bt_chunk_num;
906 table->bt_l1 = kcalloc(num_bt_l1,
907 sizeof(*table->bt_l1),
908 GFP_KERNEL);
909 if (!table->bt_l1)
910 goto err_kcalloc_bt_l1;
911
912 table->bt_l1_dma_addr = kcalloc(num_bt_l1,
913 sizeof(*table->bt_l1_dma_addr),
914 GFP_KERNEL);
915
916 if (!table->bt_l1_dma_addr)
917 goto err_kcalloc_l1_dma;
918 }
919
920 if (check_whether_bt_num_2(type, hop_num) ||
921 check_whether_bt_num_3(type, hop_num)) {
922 table->bt_l0 = kcalloc(num_bt_l0, sizeof(*table->bt_l0),
923 GFP_KERNEL);
924 if (!table->bt_l0)
925 goto err_kcalloc_bt_l0;
926
927 table->bt_l0_dma_addr = kcalloc(num_bt_l0,
928 sizeof(*table->bt_l0_dma_addr),
929 GFP_KERNEL);
930 if (!table->bt_l0_dma_addr)
931 goto err_kcalloc_l0_dma;
932 }
933 }
934
935 table->type = type;
936 table->num_hem = num_hem;
937 table->num_obj = nobj;
938 table->obj_size = obj_size;
939 table->lowmem = use_lowmem;
940 mutex_init(&table->mutex);
941
942 return 0;
943
944 err_kcalloc_l0_dma:
945 kfree(table->bt_l0);
946 table->bt_l0 = NULL;
947
948 err_kcalloc_bt_l0:
949 kfree(table->bt_l1_dma_addr);
950 table->bt_l1_dma_addr = NULL;
951
952 err_kcalloc_l1_dma:
953 kfree(table->bt_l1);
954 table->bt_l1 = NULL;
955
956 err_kcalloc_bt_l1:
957 kfree(table->hem);
958 table->hem = NULL;
959
960 err_kcalloc_hem_buf:
961 return -ENOMEM;
962 }
963
hns_roce_cleanup_mhop_hem_table(struct hns_roce_dev * hr_dev,struct hns_roce_hem_table * table)964 static void hns_roce_cleanup_mhop_hem_table(struct hns_roce_dev *hr_dev,
965 struct hns_roce_hem_table *table)
966 {
967 struct hns_roce_hem_mhop mhop;
968 u32 buf_chunk_size;
969 int i;
970 u64 obj;
971
972 if (hns_roce_calc_hem_mhop(hr_dev, table, NULL, &mhop))
973 return;
974 buf_chunk_size = table->type < HEM_TYPE_MTT ? mhop.buf_chunk_size :
975 mhop.bt_chunk_size;
976
977 for (i = 0; i < table->num_hem; ++i) {
978 obj = i * buf_chunk_size / table->obj_size;
979 if (table->hem[i])
980 hns_roce_table_mhop_put(hr_dev, table, obj, 0);
981 }
982
983 kfree(table->hem);
984 table->hem = NULL;
985 kfree(table->bt_l1);
986 table->bt_l1 = NULL;
987 kfree(table->bt_l1_dma_addr);
988 table->bt_l1_dma_addr = NULL;
989 kfree(table->bt_l0);
990 table->bt_l0 = NULL;
991 kfree(table->bt_l0_dma_addr);
992 table->bt_l0_dma_addr = NULL;
993 }
994
hns_roce_cleanup_hem_table(struct hns_roce_dev * hr_dev,struct hns_roce_hem_table * table)995 void hns_roce_cleanup_hem_table(struct hns_roce_dev *hr_dev,
996 struct hns_roce_hem_table *table)
997 {
998 struct device *dev = hr_dev->dev;
999 unsigned long i;
1000
1001 if (hns_roce_check_whether_mhop(hr_dev, table->type)) {
1002 hns_roce_cleanup_mhop_hem_table(hr_dev, table);
1003 return;
1004 }
1005
1006 for (i = 0; i < table->num_hem; ++i)
1007 if (table->hem[i]) {
1008 if (hr_dev->hw->clear_hem(hr_dev, table,
1009 i * table->table_chunk_size / table->obj_size, 0))
1010 dev_err(dev, "Clear HEM base address failed.\n");
1011
1012 hns_roce_free_hem(hr_dev, table->hem[i]);
1013 }
1014
1015 kfree(table->hem);
1016 }
1017
hns_roce_cleanup_hem(struct hns_roce_dev * hr_dev)1018 void hns_roce_cleanup_hem(struct hns_roce_dev *hr_dev)
1019 {
1020 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ)
1021 hns_roce_cleanup_hem_table(hr_dev,
1022 &hr_dev->srq_table.table);
1023 hns_roce_cleanup_hem_table(hr_dev, &hr_dev->cq_table.table);
1024 if (hr_dev->caps.qpc_timer_entry_sz)
1025 hns_roce_cleanup_hem_table(hr_dev,
1026 &hr_dev->qpc_timer_table);
1027 if (hr_dev->caps.cqc_timer_entry_sz)
1028 hns_roce_cleanup_hem_table(hr_dev,
1029 &hr_dev->cqc_timer_table);
1030 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL)
1031 hns_roce_cleanup_hem_table(hr_dev,
1032 &hr_dev->qp_table.sccc_table);
1033 if (hr_dev->caps.trrl_entry_sz)
1034 hns_roce_cleanup_hem_table(hr_dev,
1035 &hr_dev->qp_table.trrl_table);
1036 hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qp_table.irrl_table);
1037 hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qp_table.qp_table);
1038 hns_roce_cleanup_hem_table(hr_dev, &hr_dev->mr_table.mtpt_table);
1039 }
1040
1041 struct roce_hem_item {
1042 struct list_head list; /* link all hems in the same bt level */
1043 struct list_head sibling; /* link all hems in last hop for mtt */
1044 void *addr;
1045 dma_addr_t dma_addr;
1046 size_t count; /* max ba numbers */
1047 int start; /* start buf offset in this hem */
1048 int end; /* end buf offset in this hem */
1049 };
1050
hem_list_alloc_item(struct hns_roce_dev * hr_dev,int start,int end,int count,bool exist_bt,int bt_level)1051 static struct roce_hem_item *hem_list_alloc_item(struct hns_roce_dev *hr_dev,
1052 int start, int end,
1053 int count, bool exist_bt,
1054 int bt_level)
1055 {
1056 struct roce_hem_item *hem;
1057
1058 hem = kzalloc(sizeof(*hem), GFP_KERNEL);
1059 if (!hem)
1060 return NULL;
1061
1062 if (exist_bt) {
1063 hem->addr = dma_alloc_coherent(hr_dev->dev,
1064 count * BA_BYTE_LEN,
1065 &hem->dma_addr, GFP_KERNEL);
1066 if (!hem->addr) {
1067 kfree(hem);
1068 return NULL;
1069 }
1070 }
1071
1072 hem->count = count;
1073 hem->start = start;
1074 hem->end = end;
1075 INIT_LIST_HEAD(&hem->list);
1076 INIT_LIST_HEAD(&hem->sibling);
1077
1078 return hem;
1079 }
1080
hem_list_free_item(struct hns_roce_dev * hr_dev,struct roce_hem_item * hem,bool exist_bt)1081 static void hem_list_free_item(struct hns_roce_dev *hr_dev,
1082 struct roce_hem_item *hem, bool exist_bt)
1083 {
1084 if (exist_bt)
1085 dma_free_coherent(hr_dev->dev, hem->count * BA_BYTE_LEN,
1086 hem->addr, hem->dma_addr);
1087 kfree(hem);
1088 }
1089
hem_list_free_all(struct hns_roce_dev * hr_dev,struct list_head * head,bool exist_bt)1090 static void hem_list_free_all(struct hns_roce_dev *hr_dev,
1091 struct list_head *head, bool exist_bt)
1092 {
1093 struct roce_hem_item *hem, *temp_hem;
1094
1095 list_for_each_entry_safe(hem, temp_hem, head, list) {
1096 list_del(&hem->list);
1097 hem_list_free_item(hr_dev, hem, exist_bt);
1098 }
1099 }
1100
hem_list_link_bt(struct hns_roce_dev * hr_dev,void * base_addr,u64 table_addr)1101 static void hem_list_link_bt(struct hns_roce_dev *hr_dev, void *base_addr,
1102 u64 table_addr)
1103 {
1104 *(u64 *)(base_addr) = table_addr;
1105 }
1106
1107 /* assign L0 table address to hem from root bt */
hem_list_assign_bt(struct hns_roce_dev * hr_dev,struct roce_hem_item * hem,void * cpu_addr,u64 phy_addr)1108 static void hem_list_assign_bt(struct hns_roce_dev *hr_dev,
1109 struct roce_hem_item *hem, void *cpu_addr,
1110 u64 phy_addr)
1111 {
1112 hem->addr = cpu_addr;
1113 hem->dma_addr = (dma_addr_t)phy_addr;
1114 }
1115
hem_list_page_is_in_range(struct roce_hem_item * hem,int offset)1116 static inline bool hem_list_page_is_in_range(struct roce_hem_item *hem,
1117 int offset)
1118 {
1119 return (hem->start <= offset && offset <= hem->end);
1120 }
1121
hem_list_search_item(struct list_head * ba_list,int page_offset)1122 static struct roce_hem_item *hem_list_search_item(struct list_head *ba_list,
1123 int page_offset)
1124 {
1125 struct roce_hem_item *hem, *temp_hem;
1126 struct roce_hem_item *found = NULL;
1127
1128 list_for_each_entry_safe(hem, temp_hem, ba_list, list) {
1129 if (hem_list_page_is_in_range(hem, page_offset)) {
1130 found = hem;
1131 break;
1132 }
1133 }
1134
1135 return found;
1136 }
1137
hem_list_is_bottom_bt(int hopnum,int bt_level)1138 static bool hem_list_is_bottom_bt(int hopnum, int bt_level)
1139 {
1140 /*
1141 * hopnum base address table levels
1142 * 0 L0(buf)
1143 * 1 L0 -> buf
1144 * 2 L0 -> L1 -> buf
1145 * 3 L0 -> L1 -> L2 -> buf
1146 */
1147 return bt_level >= (hopnum ? hopnum - 1 : hopnum);
1148 }
1149
1150 /**
1151 * calc base address entries num
1152 * @hopnum: num of mutihop addressing
1153 * @bt_level: base address table level
1154 * @unit: ba entries per bt page
1155 */
hem_list_calc_ba_range(int hopnum,int bt_level,int unit)1156 static u32 hem_list_calc_ba_range(int hopnum, int bt_level, int unit)
1157 {
1158 u32 step;
1159 int max;
1160 int i;
1161
1162 if (hopnum <= bt_level)
1163 return 0;
1164 /*
1165 * hopnum bt_level range
1166 * 1 0 unit
1167 * ------------
1168 * 2 0 unit * unit
1169 * 2 1 unit
1170 * ------------
1171 * 3 0 unit * unit * unit
1172 * 3 1 unit * unit
1173 * 3 2 unit
1174 */
1175 step = 1;
1176 max = hopnum - bt_level;
1177 for (i = 0; i < max; i++)
1178 step = step * unit;
1179
1180 return step;
1181 }
1182
1183 /**
1184 * calc the root ba entries which could cover all regions
1185 * @regions: buf region array
1186 * @region_cnt: array size of @regions
1187 * @unit: ba entries per bt page
1188 */
hns_roce_hem_list_calc_root_ba(const struct hns_roce_buf_region * regions,int region_cnt,int unit)1189 int hns_roce_hem_list_calc_root_ba(const struct hns_roce_buf_region *regions,
1190 int region_cnt, int unit)
1191 {
1192 struct hns_roce_buf_region *r;
1193 int total = 0;
1194 int step;
1195 int i;
1196
1197 for (i = 0; i < region_cnt; i++) {
1198 r = (struct hns_roce_buf_region *)®ions[i];
1199 if (r->hopnum > 1) {
1200 step = hem_list_calc_ba_range(r->hopnum, 1, unit);
1201 if (step > 0)
1202 total += (r->count + step - 1) / step;
1203 } else {
1204 total += r->count;
1205 }
1206 }
1207
1208 return total;
1209 }
1210
hem_list_alloc_mid_bt(struct hns_roce_dev * hr_dev,const struct hns_roce_buf_region * r,int unit,int offset,struct list_head * mid_bt,struct list_head * btm_bt)1211 static int hem_list_alloc_mid_bt(struct hns_roce_dev *hr_dev,
1212 const struct hns_roce_buf_region *r, int unit,
1213 int offset, struct list_head *mid_bt,
1214 struct list_head *btm_bt)
1215 {
1216 struct roce_hem_item *hem_ptrs[HNS_ROCE_MAX_BT_LEVEL] = { NULL };
1217 struct list_head temp_list[HNS_ROCE_MAX_BT_LEVEL];
1218 struct roce_hem_item *cur, *pre;
1219 const int hopnum = r->hopnum;
1220 int start_aligned;
1221 int distance;
1222 int ret = 0;
1223 int max_ofs;
1224 int level;
1225 u32 step;
1226 int end;
1227
1228 if (hopnum <= 1)
1229 return 0;
1230
1231 if (hopnum > HNS_ROCE_MAX_BT_LEVEL) {
1232 dev_err(hr_dev->dev, "invalid hopnum %d!\n", hopnum);
1233 return -EINVAL;
1234 }
1235
1236 if (offset < r->offset) {
1237 dev_err(hr_dev->dev, "invalid offset %d, min %u!\n",
1238 offset, r->offset);
1239 return -EINVAL;
1240 }
1241
1242 distance = offset - r->offset;
1243 max_ofs = r->offset + r->count - 1;
1244 for (level = 0; level < hopnum; level++)
1245 INIT_LIST_HEAD(&temp_list[level]);
1246
1247 /* config L1 bt to last bt and link them to corresponding parent */
1248 for (level = 1; level < hopnum; level++) {
1249 cur = hem_list_search_item(&mid_bt[level], offset);
1250 if (cur) {
1251 hem_ptrs[level] = cur;
1252 continue;
1253 }
1254
1255 step = hem_list_calc_ba_range(hopnum, level, unit);
1256 if (step < 1) {
1257 ret = -EINVAL;
1258 goto err_exit;
1259 }
1260
1261 start_aligned = (distance / step) * step + r->offset;
1262 end = min_t(int, start_aligned + step - 1, max_ofs);
1263 cur = hem_list_alloc_item(hr_dev, start_aligned, end, unit,
1264 true, level);
1265 if (!cur) {
1266 ret = -ENOMEM;
1267 goto err_exit;
1268 }
1269 hem_ptrs[level] = cur;
1270 list_add(&cur->list, &temp_list[level]);
1271 if (hem_list_is_bottom_bt(hopnum, level))
1272 list_add(&cur->sibling, &temp_list[0]);
1273
1274 /* link bt to parent bt */
1275 if (level > 1) {
1276 pre = hem_ptrs[level - 1];
1277 step = (cur->start - pre->start) / step * BA_BYTE_LEN;
1278 hem_list_link_bt(hr_dev, pre->addr + step,
1279 cur->dma_addr);
1280 }
1281 }
1282
1283 list_splice(&temp_list[0], btm_bt);
1284 for (level = 1; level < hopnum; level++)
1285 list_splice(&temp_list[level], &mid_bt[level]);
1286
1287 return 0;
1288
1289 err_exit:
1290 for (level = 1; level < hopnum; level++)
1291 hem_list_free_all(hr_dev, &temp_list[level], true);
1292
1293 return ret;
1294 }
1295
hem_list_alloc_root_bt(struct hns_roce_dev * hr_dev,struct hns_roce_hem_list * hem_list,int unit,const struct hns_roce_buf_region * regions,int region_cnt)1296 static int hem_list_alloc_root_bt(struct hns_roce_dev *hr_dev,
1297 struct hns_roce_hem_list *hem_list, int unit,
1298 const struct hns_roce_buf_region *regions,
1299 int region_cnt)
1300 {
1301 struct roce_hem_item *hem, *temp_hem, *root_hem;
1302 struct list_head temp_list[HNS_ROCE_MAX_BT_REGION];
1303 const struct hns_roce_buf_region *r;
1304 struct list_head temp_root;
1305 struct list_head temp_btm;
1306 void *cpu_base;
1307 u64 phy_base;
1308 int ret = 0;
1309 int ba_num;
1310 int offset;
1311 int total;
1312 int step;
1313 int i;
1314
1315 r = ®ions[0];
1316 root_hem = hem_list_search_item(&hem_list->root_bt, r->offset);
1317 if (root_hem)
1318 return 0;
1319
1320 ba_num = hns_roce_hem_list_calc_root_ba(regions, region_cnt, unit);
1321 if (ba_num < 1)
1322 return -ENOMEM;
1323
1324 INIT_LIST_HEAD(&temp_root);
1325 offset = r->offset;
1326 /* indicate to last region */
1327 r = ®ions[region_cnt - 1];
1328 root_hem = hem_list_alloc_item(hr_dev, offset, r->offset + r->count - 1,
1329 ba_num, true, 0);
1330 if (!root_hem)
1331 return -ENOMEM;
1332 list_add(&root_hem->list, &temp_root);
1333
1334 hem_list->root_ba = root_hem->dma_addr;
1335
1336 INIT_LIST_HEAD(&temp_btm);
1337 for (i = 0; i < region_cnt; i++)
1338 INIT_LIST_HEAD(&temp_list[i]);
1339
1340 total = 0;
1341 for (i = 0; i < region_cnt && total < ba_num; i++) {
1342 r = ®ions[i];
1343 if (!r->count)
1344 continue;
1345
1346 /* all regions's mid[x][0] shared the root_bt's trunk */
1347 cpu_base = root_hem->addr + total * BA_BYTE_LEN;
1348 phy_base = root_hem->dma_addr + total * BA_BYTE_LEN;
1349
1350 /* if hopnum is 0 or 1, cut a new fake hem from the root bt
1351 * which's address share to all regions.
1352 */
1353 if (hem_list_is_bottom_bt(r->hopnum, 0)) {
1354 hem = hem_list_alloc_item(hr_dev, r->offset,
1355 r->offset + r->count - 1,
1356 r->count, false, 0);
1357 if (!hem) {
1358 ret = -ENOMEM;
1359 goto err_exit;
1360 }
1361 hem_list_assign_bt(hr_dev, hem, cpu_base, phy_base);
1362 list_add(&hem->list, &temp_list[i]);
1363 list_add(&hem->sibling, &temp_btm);
1364 total += r->count;
1365 } else {
1366 step = hem_list_calc_ba_range(r->hopnum, 1, unit);
1367 if (step < 1) {
1368 ret = -EINVAL;
1369 goto err_exit;
1370 }
1371 /* if exist mid bt, link L1 to L0 */
1372 list_for_each_entry_safe(hem, temp_hem,
1373 &hem_list->mid_bt[i][1], list) {
1374 offset = (hem->start - r->offset) / step *
1375 BA_BYTE_LEN;
1376 hem_list_link_bt(hr_dev, cpu_base + offset,
1377 hem->dma_addr);
1378 total++;
1379 }
1380 }
1381 }
1382
1383 list_splice(&temp_btm, &hem_list->btm_bt);
1384 list_splice(&temp_root, &hem_list->root_bt);
1385 for (i = 0; i < region_cnt; i++)
1386 list_splice(&temp_list[i], &hem_list->mid_bt[i][0]);
1387
1388 return 0;
1389
1390 err_exit:
1391 for (i = 0; i < region_cnt; i++)
1392 hem_list_free_all(hr_dev, &temp_list[i], false);
1393
1394 hem_list_free_all(hr_dev, &temp_root, true);
1395
1396 return ret;
1397 }
1398
1399 /* construct the base address table and link them by address hop config */
hns_roce_hem_list_request(struct hns_roce_dev * hr_dev,struct hns_roce_hem_list * hem_list,const struct hns_roce_buf_region * regions,int region_cnt,unsigned int bt_pg_shift)1400 int hns_roce_hem_list_request(struct hns_roce_dev *hr_dev,
1401 struct hns_roce_hem_list *hem_list,
1402 const struct hns_roce_buf_region *regions,
1403 int region_cnt, unsigned int bt_pg_shift)
1404 {
1405 const struct hns_roce_buf_region *r;
1406 int ofs, end;
1407 int ret;
1408 int unit;
1409 int i;
1410
1411 if (region_cnt > HNS_ROCE_MAX_BT_REGION) {
1412 dev_err(hr_dev->dev, "invalid region region_cnt %d!\n",
1413 region_cnt);
1414 return -EINVAL;
1415 }
1416
1417 unit = (1 << bt_pg_shift) / BA_BYTE_LEN;
1418 for (i = 0; i < region_cnt; i++) {
1419 r = ®ions[i];
1420 if (!r->count)
1421 continue;
1422
1423 end = r->offset + r->count;
1424 for (ofs = r->offset; ofs < end; ofs += unit) {
1425 ret = hem_list_alloc_mid_bt(hr_dev, r, unit, ofs,
1426 hem_list->mid_bt[i],
1427 &hem_list->btm_bt);
1428 if (ret) {
1429 dev_err(hr_dev->dev,
1430 "alloc hem trunk fail ret=%d!\n", ret);
1431 goto err_alloc;
1432 }
1433 }
1434 }
1435
1436 ret = hem_list_alloc_root_bt(hr_dev, hem_list, unit, regions,
1437 region_cnt);
1438 if (ret)
1439 dev_err(hr_dev->dev, "alloc hem root fail ret=%d!\n", ret);
1440 else
1441 return 0;
1442
1443 err_alloc:
1444 hns_roce_hem_list_release(hr_dev, hem_list);
1445
1446 return ret;
1447 }
1448
hns_roce_hem_list_release(struct hns_roce_dev * hr_dev,struct hns_roce_hem_list * hem_list)1449 void hns_roce_hem_list_release(struct hns_roce_dev *hr_dev,
1450 struct hns_roce_hem_list *hem_list)
1451 {
1452 int i, j;
1453
1454 for (i = 0; i < HNS_ROCE_MAX_BT_REGION; i++)
1455 for (j = 0; j < HNS_ROCE_MAX_BT_LEVEL; j++)
1456 hem_list_free_all(hr_dev, &hem_list->mid_bt[i][j],
1457 j != 0);
1458
1459 hem_list_free_all(hr_dev, &hem_list->root_bt, true);
1460 INIT_LIST_HEAD(&hem_list->btm_bt);
1461 hem_list->root_ba = 0;
1462 }
1463
hns_roce_hem_list_init(struct hns_roce_hem_list * hem_list)1464 void hns_roce_hem_list_init(struct hns_roce_hem_list *hem_list)
1465 {
1466 int i, j;
1467
1468 INIT_LIST_HEAD(&hem_list->root_bt);
1469 INIT_LIST_HEAD(&hem_list->btm_bt);
1470 for (i = 0; i < HNS_ROCE_MAX_BT_REGION; i++)
1471 for (j = 0; j < HNS_ROCE_MAX_BT_LEVEL; j++)
1472 INIT_LIST_HEAD(&hem_list->mid_bt[i][j]);
1473 }
1474
hns_roce_hem_list_find_mtt(struct hns_roce_dev * hr_dev,struct hns_roce_hem_list * hem_list,int offset,int * mtt_cnt,u64 * phy_addr)1475 void *hns_roce_hem_list_find_mtt(struct hns_roce_dev *hr_dev,
1476 struct hns_roce_hem_list *hem_list,
1477 int offset, int *mtt_cnt, u64 *phy_addr)
1478 {
1479 struct list_head *head = &hem_list->btm_bt;
1480 struct roce_hem_item *hem, *temp_hem;
1481 void *cpu_base = NULL;
1482 u64 phy_base = 0;
1483 int nr = 0;
1484
1485 list_for_each_entry_safe(hem, temp_hem, head, sibling) {
1486 if (hem_list_page_is_in_range(hem, offset)) {
1487 nr = offset - hem->start;
1488 cpu_base = hem->addr + nr * BA_BYTE_LEN;
1489 phy_base = hem->dma_addr + nr * BA_BYTE_LEN;
1490 nr = hem->end + 1 - offset;
1491 break;
1492 }
1493 }
1494
1495 if (mtt_cnt)
1496 *mtt_cnt = nr;
1497
1498 if (phy_addr)
1499 *phy_addr = phy_base;
1500
1501 return cpu_base;
1502 }
1503