1 // SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
2 /* Copyright (c) 2015 - 2021 Intel Corporation */
3 #include "osdep.h"
4 #include "status.h"
5 #include "hmc.h"
6 #include "defs.h"
7 #include "type.h"
8 #include "protos.h"
9
10 /**
11 * irdma_find_sd_index_limit - finds segment descriptor index limit
12 * @hmc_info: pointer to the HMC configuration information structure
13 * @type: type of HMC resources we're searching
14 * @idx: starting index for the object
15 * @cnt: number of objects we're trying to create
16 * @sd_idx: pointer to return index of the segment descriptor in question
17 * @sd_limit: pointer to return the maximum number of segment descriptors
18 *
19 * This function calculates the segment descriptor index and index limit
20 * for the resource defined by irdma_hmc_rsrc_type.
21 */
22
irdma_find_sd_index_limit(struct irdma_hmc_info * hmc_info,u32 type,u32 idx,u32 cnt,u32 * sd_idx,u32 * sd_limit)23 static void irdma_find_sd_index_limit(struct irdma_hmc_info *hmc_info, u32 type,
24 u32 idx, u32 cnt, u32 *sd_idx,
25 u32 *sd_limit)
26 {
27 u64 fpm_addr, fpm_limit;
28
29 fpm_addr = hmc_info->hmc_obj[(type)].base +
30 hmc_info->hmc_obj[type].size * idx;
31 fpm_limit = fpm_addr + hmc_info->hmc_obj[type].size * cnt;
32 *sd_idx = (u32)(fpm_addr / IRDMA_HMC_DIRECT_BP_SIZE);
33 *sd_limit = (u32)((fpm_limit - 1) / IRDMA_HMC_DIRECT_BP_SIZE);
34 *sd_limit += 1;
35 }
36
37 /**
38 * irdma_find_pd_index_limit - finds page descriptor index limit
39 * @hmc_info: pointer to the HMC configuration information struct
40 * @type: HMC resource type we're examining
41 * @idx: starting index for the object
42 * @cnt: number of objects we're trying to create
43 * @pd_idx: pointer to return page descriptor index
44 * @pd_limit: pointer to return page descriptor index limit
45 *
46 * Calculates the page descriptor index and index limit for the resource
47 * defined by irdma_hmc_rsrc_type.
48 */
49
irdma_find_pd_index_limit(struct irdma_hmc_info * hmc_info,u32 type,u32 idx,u32 cnt,u32 * pd_idx,u32 * pd_limit)50 static void irdma_find_pd_index_limit(struct irdma_hmc_info *hmc_info, u32 type,
51 u32 idx, u32 cnt, u32 *pd_idx,
52 u32 *pd_limit)
53 {
54 u64 fpm_adr, fpm_limit;
55
56 fpm_adr = hmc_info->hmc_obj[type].base +
57 hmc_info->hmc_obj[type].size * idx;
58 fpm_limit = fpm_adr + (hmc_info)->hmc_obj[(type)].size * (cnt);
59 *pd_idx = (u32)(fpm_adr / IRDMA_HMC_PAGED_BP_SIZE);
60 *pd_limit = (u32)((fpm_limit - 1) / IRDMA_HMC_PAGED_BP_SIZE);
61 *pd_limit += 1;
62 }
63
64 /**
65 * irdma_set_sd_entry - setup entry for sd programming
66 * @pa: physical addr
67 * @idx: sd index
68 * @type: paged or direct sd
69 * @entry: sd entry ptr
70 */
irdma_set_sd_entry(u64 pa,u32 idx,enum irdma_sd_entry_type type,struct irdma_update_sd_entry * entry)71 static void irdma_set_sd_entry(u64 pa, u32 idx, enum irdma_sd_entry_type type,
72 struct irdma_update_sd_entry *entry)
73 {
74 entry->data = pa |
75 FIELD_PREP(IRDMA_PFHMC_SDDATALOW_PMSDBPCOUNT, IRDMA_HMC_MAX_BP_COUNT) |
76 FIELD_PREP(IRDMA_PFHMC_SDDATALOW_PMSDTYPE,
77 type == IRDMA_SD_TYPE_PAGED ? 0 : 1) |
78 FIELD_PREP(IRDMA_PFHMC_SDDATALOW_PMSDVALID, 1);
79
80 entry->cmd = idx | FIELD_PREP(IRDMA_PFHMC_SDCMD_PMSDWR, 1) | BIT(15);
81 }
82
83 /**
84 * irdma_clr_sd_entry - setup entry for sd clear
85 * @idx: sd index
86 * @type: paged or direct sd
87 * @entry: sd entry ptr
88 */
irdma_clr_sd_entry(u32 idx,enum irdma_sd_entry_type type,struct irdma_update_sd_entry * entry)89 static void irdma_clr_sd_entry(u32 idx, enum irdma_sd_entry_type type,
90 struct irdma_update_sd_entry *entry)
91 {
92 entry->data = FIELD_PREP(IRDMA_PFHMC_SDDATALOW_PMSDBPCOUNT, IRDMA_HMC_MAX_BP_COUNT) |
93 FIELD_PREP(IRDMA_PFHMC_SDDATALOW_PMSDTYPE,
94 type == IRDMA_SD_TYPE_PAGED ? 0 : 1);
95
96 entry->cmd = idx | FIELD_PREP(IRDMA_PFHMC_SDCMD_PMSDWR, 1) | BIT(15);
97 }
98
99 /**
100 * irdma_invalidate_pf_hmc_pd - Invalidates the pd cache in the hardware for PF
101 * @dev: pointer to our device struct
102 * @sd_idx: segment descriptor index
103 * @pd_idx: page descriptor index
104 */
irdma_invalidate_pf_hmc_pd(struct irdma_sc_dev * dev,u32 sd_idx,u32 pd_idx)105 static inline void irdma_invalidate_pf_hmc_pd(struct irdma_sc_dev *dev, u32 sd_idx,
106 u32 pd_idx)
107 {
108 u32 val = FIELD_PREP(IRDMA_PFHMC_PDINV_PMSDIDX, sd_idx) |
109 FIELD_PREP(IRDMA_PFHMC_PDINV_PMSDPARTSEL, 1) |
110 FIELD_PREP(IRDMA_PFHMC_PDINV_PMPDIDX, pd_idx);
111
112 writel(val, dev->hw_regs[IRDMA_PFHMC_PDINV]);
113 }
114
115 /**
116 * irdma_hmc_sd_one - setup 1 sd entry for cqp
117 * @dev: pointer to the device structure
118 * @hmc_fn_id: hmc's function id
119 * @pa: physical addr
120 * @sd_idx: sd index
121 * @type: paged or direct sd
122 * @setsd: flag to set or clear sd
123 */
irdma_hmc_sd_one(struct irdma_sc_dev * dev,u8 hmc_fn_id,u64 pa,u32 sd_idx,enum irdma_sd_entry_type type,bool setsd)124 enum irdma_status_code irdma_hmc_sd_one(struct irdma_sc_dev *dev, u8 hmc_fn_id,
125 u64 pa, u32 sd_idx,
126 enum irdma_sd_entry_type type,
127 bool setsd)
128 {
129 struct irdma_update_sds_info sdinfo;
130
131 sdinfo.cnt = 1;
132 sdinfo.hmc_fn_id = hmc_fn_id;
133 if (setsd)
134 irdma_set_sd_entry(pa, sd_idx, type, sdinfo.entry);
135 else
136 irdma_clr_sd_entry(sd_idx, type, sdinfo.entry);
137 return dev->cqp->process_cqp_sds(dev, &sdinfo);
138 }
139
140 /**
141 * irdma_hmc_sd_grp - setup group of sd entries for cqp
142 * @dev: pointer to the device structure
143 * @hmc_info: pointer to the HMC configuration information struct
144 * @sd_index: sd index
145 * @sd_cnt: number of sd entries
146 * @setsd: flag to set or clear sd
147 */
irdma_hmc_sd_grp(struct irdma_sc_dev * dev,struct irdma_hmc_info * hmc_info,u32 sd_index,u32 sd_cnt,bool setsd)148 static enum irdma_status_code irdma_hmc_sd_grp(struct irdma_sc_dev *dev,
149 struct irdma_hmc_info *hmc_info,
150 u32 sd_index, u32 sd_cnt,
151 bool setsd)
152 {
153 struct irdma_hmc_sd_entry *sd_entry;
154 struct irdma_update_sds_info sdinfo = {};
155 u64 pa;
156 u32 i;
157 enum irdma_status_code ret_code = 0;
158
159 sdinfo.hmc_fn_id = hmc_info->hmc_fn_id;
160 for (i = sd_index; i < sd_index + sd_cnt; i++) {
161 sd_entry = &hmc_info->sd_table.sd_entry[i];
162 if (!sd_entry || (!sd_entry->valid && setsd) ||
163 (sd_entry->valid && !setsd))
164 continue;
165 if (setsd) {
166 pa = (sd_entry->entry_type == IRDMA_SD_TYPE_PAGED) ?
167 sd_entry->u.pd_table.pd_page_addr.pa :
168 sd_entry->u.bp.addr.pa;
169 irdma_set_sd_entry(pa, i, sd_entry->entry_type,
170 &sdinfo.entry[sdinfo.cnt]);
171 } else {
172 irdma_clr_sd_entry(i, sd_entry->entry_type,
173 &sdinfo.entry[sdinfo.cnt]);
174 }
175 sdinfo.cnt++;
176 if (sdinfo.cnt == IRDMA_MAX_SD_ENTRIES) {
177 ret_code = dev->cqp->process_cqp_sds(dev, &sdinfo);
178 if (ret_code) {
179 ibdev_dbg(to_ibdev(dev),
180 "HMC: sd_programming failed err=%d\n",
181 ret_code);
182 return ret_code;
183 }
184
185 sdinfo.cnt = 0;
186 }
187 }
188 if (sdinfo.cnt)
189 ret_code = dev->cqp->process_cqp_sds(dev, &sdinfo);
190
191 return ret_code;
192 }
193
194 /**
195 * irdma_hmc_finish_add_sd_reg - program sd entries for objects
196 * @dev: pointer to the device structure
197 * @info: create obj info
198 */
199 static enum irdma_status_code
irdma_hmc_finish_add_sd_reg(struct irdma_sc_dev * dev,struct irdma_hmc_create_obj_info * info)200 irdma_hmc_finish_add_sd_reg(struct irdma_sc_dev *dev,
201 struct irdma_hmc_create_obj_info *info)
202 {
203 if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt)
204 return IRDMA_ERR_INVALID_HMC_OBJ_INDEX;
205
206 if ((info->start_idx + info->count) >
207 info->hmc_info->hmc_obj[info->rsrc_type].cnt)
208 return IRDMA_ERR_INVALID_HMC_OBJ_COUNT;
209
210 if (!info->add_sd_cnt)
211 return 0;
212 return irdma_hmc_sd_grp(dev, info->hmc_info,
213 info->hmc_info->sd_indexes[0], info->add_sd_cnt,
214 true);
215 }
216
217 /**
218 * irdma_sc_create_hmc_obj - allocate backing store for hmc objects
219 * @dev: pointer to the device structure
220 * @info: pointer to irdma_hmc_create_obj_info struct
221 *
222 * This will allocate memory for PDs and backing pages and populate
223 * the sd and pd entries.
224 */
225 enum irdma_status_code
irdma_sc_create_hmc_obj(struct irdma_sc_dev * dev,struct irdma_hmc_create_obj_info * info)226 irdma_sc_create_hmc_obj(struct irdma_sc_dev *dev,
227 struct irdma_hmc_create_obj_info *info)
228 {
229 struct irdma_hmc_sd_entry *sd_entry;
230 u32 sd_idx, sd_lmt;
231 u32 pd_idx = 0, pd_lmt = 0;
232 u32 pd_idx1 = 0, pd_lmt1 = 0;
233 u32 i, j;
234 bool pd_error = false;
235 enum irdma_status_code ret_code = 0;
236
237 if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt)
238 return IRDMA_ERR_INVALID_HMC_OBJ_INDEX;
239
240 if ((info->start_idx + info->count) >
241 info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
242 ibdev_dbg(to_ibdev(dev),
243 "HMC: error type %u, start = %u, req cnt %u, cnt = %u\n",
244 info->rsrc_type, info->start_idx, info->count,
245 info->hmc_info->hmc_obj[info->rsrc_type].cnt);
246 return IRDMA_ERR_INVALID_HMC_OBJ_COUNT;
247 }
248
249 irdma_find_sd_index_limit(info->hmc_info, info->rsrc_type,
250 info->start_idx, info->count, &sd_idx,
251 &sd_lmt);
252 if (sd_idx >= info->hmc_info->sd_table.sd_cnt ||
253 sd_lmt > info->hmc_info->sd_table.sd_cnt) {
254 return IRDMA_ERR_INVALID_SD_INDEX;
255 }
256
257 irdma_find_pd_index_limit(info->hmc_info, info->rsrc_type,
258 info->start_idx, info->count, &pd_idx,
259 &pd_lmt);
260
261 for (j = sd_idx; j < sd_lmt; j++) {
262 ret_code = irdma_add_sd_table_entry(dev->hw, info->hmc_info, j,
263 info->entry_type,
264 IRDMA_HMC_DIRECT_BP_SIZE);
265 if (ret_code)
266 goto exit_sd_error;
267
268 sd_entry = &info->hmc_info->sd_table.sd_entry[j];
269 if (sd_entry->entry_type == IRDMA_SD_TYPE_PAGED &&
270 (dev->hmc_info == info->hmc_info &&
271 info->rsrc_type != IRDMA_HMC_IW_PBLE)) {
272 pd_idx1 = max(pd_idx, (j * IRDMA_HMC_MAX_BP_COUNT));
273 pd_lmt1 = min(pd_lmt, (j + 1) * IRDMA_HMC_MAX_BP_COUNT);
274 for (i = pd_idx1; i < pd_lmt1; i++) {
275 /* update the pd table entry */
276 ret_code = irdma_add_pd_table_entry(dev,
277 info->hmc_info,
278 i, NULL);
279 if (ret_code) {
280 pd_error = true;
281 break;
282 }
283 }
284 if (pd_error) {
285 while (i && (i > pd_idx1)) {
286 irdma_remove_pd_bp(dev, info->hmc_info,
287 i - 1);
288 i--;
289 }
290 }
291 }
292 if (sd_entry->valid)
293 continue;
294
295 info->hmc_info->sd_indexes[info->add_sd_cnt] = (u16)j;
296 info->add_sd_cnt++;
297 sd_entry->valid = true;
298 }
299 return irdma_hmc_finish_add_sd_reg(dev, info);
300
301 exit_sd_error:
302 while (j && (j > sd_idx)) {
303 sd_entry = &info->hmc_info->sd_table.sd_entry[j - 1];
304 switch (sd_entry->entry_type) {
305 case IRDMA_SD_TYPE_PAGED:
306 pd_idx1 = max(pd_idx, (j - 1) * IRDMA_HMC_MAX_BP_COUNT);
307 pd_lmt1 = min(pd_lmt, (j * IRDMA_HMC_MAX_BP_COUNT));
308 for (i = pd_idx1; i < pd_lmt1; i++)
309 irdma_prep_remove_pd_page(info->hmc_info, i);
310 break;
311 case IRDMA_SD_TYPE_DIRECT:
312 irdma_prep_remove_pd_page(info->hmc_info, (j - 1));
313 break;
314 default:
315 ret_code = IRDMA_ERR_INVALID_SD_TYPE;
316 break;
317 }
318 j--;
319 }
320
321 return ret_code;
322 }
323
324 /**
325 * irdma_finish_del_sd_reg - delete sd entries for objects
326 * @dev: pointer to the device structure
327 * @info: dele obj info
328 * @reset: true if called before reset
329 */
330 static enum irdma_status_code
irdma_finish_del_sd_reg(struct irdma_sc_dev * dev,struct irdma_hmc_del_obj_info * info,bool reset)331 irdma_finish_del_sd_reg(struct irdma_sc_dev *dev,
332 struct irdma_hmc_del_obj_info *info, bool reset)
333 {
334 struct irdma_hmc_sd_entry *sd_entry;
335 enum irdma_status_code ret_code = 0;
336 u32 i, sd_idx;
337 struct irdma_dma_mem *mem;
338
339 if (!reset)
340 ret_code = irdma_hmc_sd_grp(dev, info->hmc_info,
341 info->hmc_info->sd_indexes[0],
342 info->del_sd_cnt, false);
343
344 if (ret_code)
345 ibdev_dbg(to_ibdev(dev), "HMC: error cqp sd sd_grp\n");
346 for (i = 0; i < info->del_sd_cnt; i++) {
347 sd_idx = info->hmc_info->sd_indexes[i];
348 sd_entry = &info->hmc_info->sd_table.sd_entry[sd_idx];
349 mem = (sd_entry->entry_type == IRDMA_SD_TYPE_PAGED) ?
350 &sd_entry->u.pd_table.pd_page_addr :
351 &sd_entry->u.bp.addr;
352
353 if (!mem || !mem->va) {
354 ibdev_dbg(to_ibdev(dev), "HMC: error cqp sd mem\n");
355 } else {
356 dma_free_coherent(dev->hw->device, mem->size, mem->va,
357 mem->pa);
358 mem->va = NULL;
359 }
360 }
361
362 return ret_code;
363 }
364
365 /**
366 * irdma_sc_del_hmc_obj - remove pe hmc objects
367 * @dev: pointer to the device structure
368 * @info: pointer to irdma_hmc_del_obj_info struct
369 * @reset: true if called before reset
370 *
371 * This will de-populate the SDs and PDs. It frees
372 * the memory for PDS and backing storage. After this function is returned,
373 * caller should deallocate memory allocated previously for
374 * book-keeping information about PDs and backing storage.
375 */
irdma_sc_del_hmc_obj(struct irdma_sc_dev * dev,struct irdma_hmc_del_obj_info * info,bool reset)376 enum irdma_status_code irdma_sc_del_hmc_obj(struct irdma_sc_dev *dev,
377 struct irdma_hmc_del_obj_info *info,
378 bool reset)
379 {
380 struct irdma_hmc_pd_table *pd_table;
381 u32 sd_idx, sd_lmt;
382 u32 pd_idx, pd_lmt, rel_pd_idx;
383 u32 i, j;
384 enum irdma_status_code ret_code = 0;
385
386 if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
387 ibdev_dbg(to_ibdev(dev),
388 "HMC: error start_idx[%04d] >= [type %04d].cnt[%04d]\n",
389 info->start_idx, info->rsrc_type,
390 info->hmc_info->hmc_obj[info->rsrc_type].cnt);
391 return IRDMA_ERR_INVALID_HMC_OBJ_INDEX;
392 }
393
394 if ((info->start_idx + info->count) >
395 info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
396 ibdev_dbg(to_ibdev(dev),
397 "HMC: error start_idx[%04d] + count %04d >= [type %04d].cnt[%04d]\n",
398 info->start_idx, info->count, info->rsrc_type,
399 info->hmc_info->hmc_obj[info->rsrc_type].cnt);
400 return IRDMA_ERR_INVALID_HMC_OBJ_COUNT;
401 }
402
403 irdma_find_pd_index_limit(info->hmc_info, info->rsrc_type,
404 info->start_idx, info->count, &pd_idx,
405 &pd_lmt);
406
407 for (j = pd_idx; j < pd_lmt; j++) {
408 sd_idx = j / IRDMA_HMC_PD_CNT_IN_SD;
409
410 if (!info->hmc_info->sd_table.sd_entry[sd_idx].valid)
411 continue;
412
413 if (info->hmc_info->sd_table.sd_entry[sd_idx].entry_type !=
414 IRDMA_SD_TYPE_PAGED)
415 continue;
416
417 rel_pd_idx = j % IRDMA_HMC_PD_CNT_IN_SD;
418 pd_table = &info->hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
419 if (pd_table->pd_entry &&
420 pd_table->pd_entry[rel_pd_idx].valid) {
421 ret_code = irdma_remove_pd_bp(dev, info->hmc_info, j);
422 if (ret_code) {
423 ibdev_dbg(to_ibdev(dev),
424 "HMC: remove_pd_bp error\n");
425 return ret_code;
426 }
427 }
428 }
429
430 irdma_find_sd_index_limit(info->hmc_info, info->rsrc_type,
431 info->start_idx, info->count, &sd_idx,
432 &sd_lmt);
433 if (sd_idx >= info->hmc_info->sd_table.sd_cnt ||
434 sd_lmt > info->hmc_info->sd_table.sd_cnt) {
435 ibdev_dbg(to_ibdev(dev), "HMC: invalid sd_idx\n");
436 return IRDMA_ERR_INVALID_SD_INDEX;
437 }
438
439 for (i = sd_idx; i < sd_lmt; i++) {
440 pd_table = &info->hmc_info->sd_table.sd_entry[i].u.pd_table;
441 if (!info->hmc_info->sd_table.sd_entry[i].valid)
442 continue;
443 switch (info->hmc_info->sd_table.sd_entry[i].entry_type) {
444 case IRDMA_SD_TYPE_DIRECT:
445 ret_code = irdma_prep_remove_sd_bp(info->hmc_info, i);
446 if (!ret_code) {
447 info->hmc_info->sd_indexes[info->del_sd_cnt] =
448 (u16)i;
449 info->del_sd_cnt++;
450 }
451 break;
452 case IRDMA_SD_TYPE_PAGED:
453 ret_code = irdma_prep_remove_pd_page(info->hmc_info, i);
454 if (ret_code)
455 break;
456 if (dev->hmc_info != info->hmc_info &&
457 info->rsrc_type == IRDMA_HMC_IW_PBLE &&
458 pd_table->pd_entry) {
459 kfree(pd_table->pd_entry_virt_mem.va);
460 pd_table->pd_entry = NULL;
461 }
462 info->hmc_info->sd_indexes[info->del_sd_cnt] = (u16)i;
463 info->del_sd_cnt++;
464 break;
465 default:
466 break;
467 }
468 }
469 return irdma_finish_del_sd_reg(dev, info, reset);
470 }
471
472 /**
473 * irdma_add_sd_table_entry - Adds a segment descriptor to the table
474 * @hw: pointer to our hw struct
475 * @hmc_info: pointer to the HMC configuration information struct
476 * @sd_index: segment descriptor index to manipulate
477 * @type: what type of segment descriptor we're manipulating
478 * @direct_mode_sz: size to alloc in direct mode
479 */
irdma_add_sd_table_entry(struct irdma_hw * hw,struct irdma_hmc_info * hmc_info,u32 sd_index,enum irdma_sd_entry_type type,u64 direct_mode_sz)480 enum irdma_status_code irdma_add_sd_table_entry(struct irdma_hw *hw,
481 struct irdma_hmc_info *hmc_info,
482 u32 sd_index,
483 enum irdma_sd_entry_type type,
484 u64 direct_mode_sz)
485 {
486 struct irdma_hmc_sd_entry *sd_entry;
487 struct irdma_dma_mem dma_mem;
488 u64 alloc_len;
489
490 sd_entry = &hmc_info->sd_table.sd_entry[sd_index];
491 if (!sd_entry->valid) {
492 if (type == IRDMA_SD_TYPE_PAGED)
493 alloc_len = IRDMA_HMC_PAGED_BP_SIZE;
494 else
495 alloc_len = direct_mode_sz;
496
497 /* allocate a 4K pd page or 2M backing page */
498 dma_mem.size = ALIGN(alloc_len, IRDMA_HMC_PD_BP_BUF_ALIGNMENT);
499 dma_mem.va = dma_alloc_coherent(hw->device, dma_mem.size,
500 &dma_mem.pa, GFP_KERNEL);
501 if (!dma_mem.va)
502 return IRDMA_ERR_NO_MEMORY;
503 if (type == IRDMA_SD_TYPE_PAGED) {
504 struct irdma_virt_mem *vmem =
505 &sd_entry->u.pd_table.pd_entry_virt_mem;
506
507 vmem->size = sizeof(struct irdma_hmc_pd_entry) * 512;
508 vmem->va = kzalloc(vmem->size, GFP_KERNEL);
509 if (!vmem->va) {
510 dma_free_coherent(hw->device, dma_mem.size,
511 dma_mem.va, dma_mem.pa);
512 dma_mem.va = NULL;
513 return IRDMA_ERR_NO_MEMORY;
514 }
515 sd_entry->u.pd_table.pd_entry = vmem->va;
516
517 memcpy(&sd_entry->u.pd_table.pd_page_addr, &dma_mem,
518 sizeof(sd_entry->u.pd_table.pd_page_addr));
519 } else {
520 memcpy(&sd_entry->u.bp.addr, &dma_mem,
521 sizeof(sd_entry->u.bp.addr));
522
523 sd_entry->u.bp.sd_pd_index = sd_index;
524 }
525
526 hmc_info->sd_table.sd_entry[sd_index].entry_type = type;
527 hmc_info->sd_table.use_cnt++;
528 }
529 if (sd_entry->entry_type == IRDMA_SD_TYPE_DIRECT)
530 sd_entry->u.bp.use_cnt++;
531
532 return 0;
533 }
534
535 /**
536 * irdma_add_pd_table_entry - Adds page descriptor to the specified table
537 * @dev: pointer to our device structure
538 * @hmc_info: pointer to the HMC configuration information structure
539 * @pd_index: which page descriptor index to manipulate
540 * @rsrc_pg: if not NULL, use preallocated page instead of allocating new one.
541 *
542 * This function:
543 * 1. Initializes the pd entry
544 * 2. Adds pd_entry in the pd_table
545 * 3. Mark the entry valid in irdma_hmc_pd_entry structure
546 * 4. Initializes the pd_entry's ref count to 1
547 * assumptions:
548 * 1. The memory for pd should be pinned down, physically contiguous and
549 * aligned on 4K boundary and zeroed memory.
550 * 2. It should be 4K in size.
551 */
irdma_add_pd_table_entry(struct irdma_sc_dev * dev,struct irdma_hmc_info * hmc_info,u32 pd_index,struct irdma_dma_mem * rsrc_pg)552 enum irdma_status_code irdma_add_pd_table_entry(struct irdma_sc_dev *dev,
553 struct irdma_hmc_info *hmc_info,
554 u32 pd_index,
555 struct irdma_dma_mem *rsrc_pg)
556 {
557 struct irdma_hmc_pd_table *pd_table;
558 struct irdma_hmc_pd_entry *pd_entry;
559 struct irdma_dma_mem mem;
560 struct irdma_dma_mem *page = &mem;
561 u32 sd_idx, rel_pd_idx;
562 u64 *pd_addr;
563 u64 page_desc;
564
565 if (pd_index / IRDMA_HMC_PD_CNT_IN_SD >= hmc_info->sd_table.sd_cnt)
566 return IRDMA_ERR_INVALID_PAGE_DESC_INDEX;
567
568 sd_idx = (pd_index / IRDMA_HMC_PD_CNT_IN_SD);
569 if (hmc_info->sd_table.sd_entry[sd_idx].entry_type !=
570 IRDMA_SD_TYPE_PAGED)
571 return 0;
572
573 rel_pd_idx = (pd_index % IRDMA_HMC_PD_CNT_IN_SD);
574 pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
575 pd_entry = &pd_table->pd_entry[rel_pd_idx];
576 if (!pd_entry->valid) {
577 if (rsrc_pg) {
578 pd_entry->rsrc_pg = true;
579 page = rsrc_pg;
580 } else {
581 page->size = ALIGN(IRDMA_HMC_PAGED_BP_SIZE,
582 IRDMA_HMC_PD_BP_BUF_ALIGNMENT);
583 page->va = dma_alloc_coherent(dev->hw->device,
584 page->size, &page->pa,
585 GFP_KERNEL);
586 if (!page->va)
587 return IRDMA_ERR_NO_MEMORY;
588
589 pd_entry->rsrc_pg = false;
590 }
591
592 memcpy(&pd_entry->bp.addr, page, sizeof(pd_entry->bp.addr));
593 pd_entry->bp.sd_pd_index = pd_index;
594 pd_entry->bp.entry_type = IRDMA_SD_TYPE_PAGED;
595 page_desc = page->pa | 0x1;
596 pd_addr = pd_table->pd_page_addr.va;
597 pd_addr += rel_pd_idx;
598 memcpy(pd_addr, &page_desc, sizeof(*pd_addr));
599 pd_entry->sd_index = sd_idx;
600 pd_entry->valid = true;
601 pd_table->use_cnt++;
602 irdma_invalidate_pf_hmc_pd(dev, sd_idx, rel_pd_idx);
603 }
604 pd_entry->bp.use_cnt++;
605
606 return 0;
607 }
608
609 /**
610 * irdma_remove_pd_bp - remove a backing page from a page descriptor
611 * @dev: pointer to our HW structure
612 * @hmc_info: pointer to the HMC configuration information structure
613 * @idx: the page index
614 *
615 * This function:
616 * 1. Marks the entry in pd table (for paged address mode) or in sd table
617 * (for direct address mode) invalid.
618 * 2. Write to register PMPDINV to invalidate the backing page in FV cache
619 * 3. Decrement the ref count for the pd _entry
620 * assumptions:
621 * 1. Caller can deallocate the memory used by backing storage after this
622 * function returns.
623 */
irdma_remove_pd_bp(struct irdma_sc_dev * dev,struct irdma_hmc_info * hmc_info,u32 idx)624 enum irdma_status_code irdma_remove_pd_bp(struct irdma_sc_dev *dev,
625 struct irdma_hmc_info *hmc_info,
626 u32 idx)
627 {
628 struct irdma_hmc_pd_entry *pd_entry;
629 struct irdma_hmc_pd_table *pd_table;
630 struct irdma_hmc_sd_entry *sd_entry;
631 u32 sd_idx, rel_pd_idx;
632 struct irdma_dma_mem *mem;
633 u64 *pd_addr;
634
635 sd_idx = idx / IRDMA_HMC_PD_CNT_IN_SD;
636 rel_pd_idx = idx % IRDMA_HMC_PD_CNT_IN_SD;
637 if (sd_idx >= hmc_info->sd_table.sd_cnt)
638 return IRDMA_ERR_INVALID_PAGE_DESC_INDEX;
639
640 sd_entry = &hmc_info->sd_table.sd_entry[sd_idx];
641 if (sd_entry->entry_type != IRDMA_SD_TYPE_PAGED)
642 return IRDMA_ERR_INVALID_SD_TYPE;
643
644 pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
645 pd_entry = &pd_table->pd_entry[rel_pd_idx];
646 if (--pd_entry->bp.use_cnt)
647 return 0;
648
649 pd_entry->valid = false;
650 pd_table->use_cnt--;
651 pd_addr = pd_table->pd_page_addr.va;
652 pd_addr += rel_pd_idx;
653 memset(pd_addr, 0, sizeof(u64));
654 irdma_invalidate_pf_hmc_pd(dev, sd_idx, idx);
655
656 if (!pd_entry->rsrc_pg) {
657 mem = &pd_entry->bp.addr;
658 if (!mem || !mem->va)
659 return IRDMA_ERR_PARAM;
660
661 dma_free_coherent(dev->hw->device, mem->size, mem->va,
662 mem->pa);
663 mem->va = NULL;
664 }
665 if (!pd_table->use_cnt)
666 kfree(pd_table->pd_entry_virt_mem.va);
667
668 return 0;
669 }
670
671 /**
672 * irdma_prep_remove_sd_bp - Prepares to remove a backing page from a sd entry
673 * @hmc_info: pointer to the HMC configuration information structure
674 * @idx: the page index
675 */
irdma_prep_remove_sd_bp(struct irdma_hmc_info * hmc_info,u32 idx)676 enum irdma_status_code irdma_prep_remove_sd_bp(struct irdma_hmc_info *hmc_info,
677 u32 idx)
678 {
679 struct irdma_hmc_sd_entry *sd_entry;
680
681 sd_entry = &hmc_info->sd_table.sd_entry[idx];
682 if (--sd_entry->u.bp.use_cnt)
683 return IRDMA_ERR_NOT_READY;
684
685 hmc_info->sd_table.use_cnt--;
686 sd_entry->valid = false;
687
688 return 0;
689 }
690
691 /**
692 * irdma_prep_remove_pd_page - Prepares to remove a PD page from sd entry.
693 * @hmc_info: pointer to the HMC configuration information structure
694 * @idx: segment descriptor index to find the relevant page descriptor
695 */
696 enum irdma_status_code
irdma_prep_remove_pd_page(struct irdma_hmc_info * hmc_info,u32 idx)697 irdma_prep_remove_pd_page(struct irdma_hmc_info *hmc_info, u32 idx)
698 {
699 struct irdma_hmc_sd_entry *sd_entry;
700
701 sd_entry = &hmc_info->sd_table.sd_entry[idx];
702
703 if (sd_entry->u.pd_table.use_cnt)
704 return IRDMA_ERR_NOT_READY;
705
706 sd_entry->valid = false;
707 hmc_info->sd_table.use_cnt--;
708
709 return 0;
710 }
711