• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 
3 /*
4  * Copyright 2016-2020 HabanaLabs, Ltd.
5  * All Rights Reserved.
6  */
7 
8 #include <linux/slab.h>
9 
10 #include "../habanalabs.h"
11 
hl_is_dram_va(struct hl_device * hdev,u64 virt_addr)12 bool hl_is_dram_va(struct hl_device *hdev, u64 virt_addr)
13 {
14 	struct asic_fixed_properties *prop = &hdev->asic_prop;
15 
16 	return hl_mem_area_inside_range(virt_addr, prop->dmmu.page_size,
17 					prop->dmmu.start_addr,
18 					prop->dmmu.end_addr);
19 }
20 
21 /**
22  * hl_mmu_init() - initialize the MMU module.
23  * @hdev: habanalabs device structure.
24  *
25  * Return: 0 for success, non-zero for failure.
26  */
hl_mmu_init(struct hl_device * hdev)27 int hl_mmu_init(struct hl_device *hdev)
28 {
29 	int rc = -EOPNOTSUPP;
30 
31 	if (!hdev->mmu_enable)
32 		return 0;
33 
34 	if (hdev->mmu_func[MMU_DR_PGT].init != NULL) {
35 		rc = hdev->mmu_func[MMU_DR_PGT].init(hdev);
36 		if (rc)
37 			return rc;
38 	}
39 
40 	if (hdev->mmu_func[MMU_HR_PGT].init != NULL)
41 		rc = hdev->mmu_func[MMU_HR_PGT].init(hdev);
42 
43 	return rc;
44 }
45 
46 /**
47  * hl_mmu_fini() - release the MMU module.
48  * @hdev: habanalabs device structure.
49  *
50  * This function does the following:
51  * - Disable MMU in H/W.
52  * - Free the pgt_infos pool.
53  *
54  * All contexts should be freed before calling this function.
55  */
hl_mmu_fini(struct hl_device * hdev)56 void hl_mmu_fini(struct hl_device *hdev)
57 {
58 	if (!hdev->mmu_enable)
59 		return;
60 
61 	if (hdev->mmu_func[MMU_DR_PGT].fini != NULL)
62 		hdev->mmu_func[MMU_DR_PGT].fini(hdev);
63 
64 	if (hdev->mmu_func[MMU_HR_PGT].fini != NULL)
65 		hdev->mmu_func[MMU_HR_PGT].fini(hdev);
66 }
67 
68 /**
69  * hl_mmu_ctx_init() - initialize a context for using the MMU module.
70  * @ctx: pointer to the context structure to initialize.
71  *
72  * Initialize a mutex to protect the concurrent mapping flow, a hash to hold all
73  * page tables hops related to this context.
74  * Return: 0 on success, non-zero otherwise.
75  */
hl_mmu_ctx_init(struct hl_ctx * ctx)76 int hl_mmu_ctx_init(struct hl_ctx *ctx)
77 {
78 	struct hl_device *hdev = ctx->hdev;
79 	int rc = -EOPNOTSUPP;
80 
81 	if (!hdev->mmu_enable)
82 		return 0;
83 
84 	mutex_init(&ctx->mmu_lock);
85 
86 	if (hdev->mmu_func[MMU_DR_PGT].ctx_init != NULL) {
87 		rc = hdev->mmu_func[MMU_DR_PGT].ctx_init(ctx);
88 		if (rc)
89 			return rc;
90 	}
91 
92 	if (hdev->mmu_func[MMU_HR_PGT].ctx_init != NULL)
93 		rc = hdev->mmu_func[MMU_HR_PGT].ctx_init(ctx);
94 
95 	return rc;
96 }
97 
98 /*
99  * hl_mmu_ctx_fini - disable a ctx from using the mmu module
100  *
101  * @ctx: pointer to the context structure
102  *
103  * This function does the following:
104  * - Free any pgts which were not freed yet
105  * - Free the mutex
106  * - Free DRAM default page mapping hops
107  */
hl_mmu_ctx_fini(struct hl_ctx * ctx)108 void hl_mmu_ctx_fini(struct hl_ctx *ctx)
109 {
110 	struct hl_device *hdev = ctx->hdev;
111 
112 	if (!hdev->mmu_enable)
113 		return;
114 
115 	if (hdev->mmu_func[MMU_DR_PGT].ctx_fini != NULL)
116 		hdev->mmu_func[MMU_DR_PGT].ctx_fini(ctx);
117 
118 	if (hdev->mmu_func[MMU_HR_PGT].ctx_fini != NULL)
119 		hdev->mmu_func[MMU_HR_PGT].ctx_fini(ctx);
120 
121 	mutex_destroy(&ctx->mmu_lock);
122 }
123 
124 /*
125  * hl_mmu_unmap_page - unmaps a virtual addr
126  *
127  * @ctx: pointer to the context structure
128  * @virt_addr: virt addr to map from
129  * @page_size: size of the page to unmap
130  * @flush_pte: whether to do a PCI flush
131  *
132  * This function does the following:
133  * - Check that the virt addr is mapped
134  * - Unmap the virt addr and frees pgts if possible
135  * - Returns 0 on success, -EINVAL if the given addr is not mapped
136  *
137  * Because this function changes the page tables in the device and because it
138  * changes the MMU hash, it must be protected by a lock.
139  * However, because it maps only a single page, the lock should be implemented
140  * in a higher level in order to protect the entire mapping of the memory area
141  *
142  * For optimization reasons PCI flush may be requested once after unmapping of
143  * large area.
144  */
hl_mmu_unmap_page(struct hl_ctx * ctx,u64 virt_addr,u32 page_size,bool flush_pte)145 int hl_mmu_unmap_page(struct hl_ctx *ctx, u64 virt_addr, u32 page_size,
146 		bool flush_pte)
147 {
148 	struct hl_device *hdev = ctx->hdev;
149 	struct asic_fixed_properties *prop = &hdev->asic_prop;
150 	struct hl_mmu_properties *mmu_prop;
151 	u64 real_virt_addr;
152 	u32 real_page_size, npages;
153 	int i, rc = 0, pgt_residency;
154 	bool is_dram_addr;
155 
156 	if (!hdev->mmu_enable)
157 		return 0;
158 
159 	is_dram_addr = hl_is_dram_va(hdev, virt_addr);
160 
161 	if (is_dram_addr)
162 		mmu_prop = &prop->dmmu;
163 	else if ((page_size % prop->pmmu_huge.page_size) == 0)
164 		mmu_prop = &prop->pmmu_huge;
165 	else
166 		mmu_prop = &prop->pmmu;
167 
168 	pgt_residency = mmu_prop->host_resident ? MMU_HR_PGT : MMU_DR_PGT;
169 	/*
170 	 * The H/W handles mapping of specific page sizes. Hence if the page
171 	 * size is bigger, we break it to sub-pages and unmap them separately.
172 	 */
173 	if ((page_size % mmu_prop->page_size) == 0) {
174 		real_page_size = mmu_prop->page_size;
175 	} else {
176 		/*
177 		 * MMU page size may differ from DRAM page size.
178 		 * In such case work with the DRAM page size and let the MMU
179 		 * scrambling routine to handle this mismatch when
180 		 * calculating the address to remove from the MMU page table
181 		 */
182 		if (is_dram_addr && ((page_size % prop->dram_page_size) == 0)) {
183 			real_page_size = prop->dram_page_size;
184 		} else {
185 			dev_err(hdev->dev,
186 				"page size of %u is not %uKB aligned, can't unmap\n",
187 				page_size, mmu_prop->page_size >> 10);
188 
189 			return -EFAULT;
190 		}
191 	}
192 
193 	npages = page_size / real_page_size;
194 	real_virt_addr = virt_addr;
195 
196 	for (i = 0 ; i < npages ; i++) {
197 		rc = hdev->mmu_func[pgt_residency].unmap(ctx,
198 						real_virt_addr, is_dram_addr);
199 		if (rc)
200 			break;
201 
202 		real_virt_addr += real_page_size;
203 	}
204 
205 	if (flush_pte)
206 		hdev->mmu_func[pgt_residency].flush(ctx);
207 
208 	return rc;
209 }
210 
211 /*
212  * hl_mmu_map_page - maps a virtual addr to physical addr
213  *
214  * @ctx: pointer to the context structure
215  * @virt_addr: virt addr to map from
216  * @phys_addr: phys addr to map to
217  * @page_size: physical page size
218  * @flush_pte: whether to do a PCI flush
219  *
220  * This function does the following:
221  * - Check that the virt addr is not mapped
222  * - Allocate pgts as necessary in order to map the virt addr to the phys
223  * - Returns 0 on success, -EINVAL if addr is already mapped, or -ENOMEM.
224  *
225  * Because this function changes the page tables in the device and because it
226  * changes the MMU hash, it must be protected by a lock.
227  * However, because it maps only a single page, the lock should be implemented
228  * in a higher level in order to protect the entire mapping of the memory area
229  *
230  * For optimization reasons PCI flush may be requested once after mapping of
231  * large area.
232  */
hl_mmu_map_page(struct hl_ctx * ctx,u64 virt_addr,u64 phys_addr,u32 page_size,bool flush_pte)233 int hl_mmu_map_page(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr,
234 		u32 page_size, bool flush_pte)
235 {
236 	struct hl_device *hdev = ctx->hdev;
237 	struct asic_fixed_properties *prop = &hdev->asic_prop;
238 	struct hl_mmu_properties *mmu_prop;
239 	u64 real_virt_addr, real_phys_addr;
240 	u32 real_page_size, npages;
241 	int i, rc, pgt_residency, mapped_cnt = 0;
242 	bool is_dram_addr;
243 
244 
245 	if (!hdev->mmu_enable)
246 		return 0;
247 
248 	is_dram_addr = hl_is_dram_va(hdev, virt_addr);
249 
250 	if (is_dram_addr)
251 		mmu_prop = &prop->dmmu;
252 	else if ((page_size % prop->pmmu_huge.page_size) == 0)
253 		mmu_prop = &prop->pmmu_huge;
254 	else
255 		mmu_prop = &prop->pmmu;
256 
257 	pgt_residency = mmu_prop->host_resident ? MMU_HR_PGT : MMU_DR_PGT;
258 
259 	/*
260 	 * The H/W handles mapping of specific page sizes. Hence if the page
261 	 * size is bigger, we break it to sub-pages and map them separately.
262 	 */
263 	if ((page_size % mmu_prop->page_size) == 0) {
264 		real_page_size = mmu_prop->page_size;
265 	} else if (is_dram_addr && ((page_size % prop->dram_page_size) == 0) &&
266 			(prop->dram_page_size < mmu_prop->page_size)) {
267 		/*
268 		 * MMU page size may differ from DRAM page size.
269 		 * In such case work with the DRAM page size and let the MMU
270 		 * scrambling routine handle this mismatch when calculating
271 		 * the address to place in the MMU page table. (in that case
272 		 * also make sure that the dram_page_size smaller than the
273 		 * mmu page size)
274 		 */
275 		real_page_size = prop->dram_page_size;
276 	} else {
277 		dev_err(hdev->dev,
278 			"page size of %u is not %uKB aligned, can't map\n",
279 			page_size, mmu_prop->page_size >> 10);
280 
281 		return -EFAULT;
282 	}
283 
284 	/*
285 	 * Verify that the phys and virt addresses are aligned with the
286 	 * MMU page size (in dram this means checking the address and MMU
287 	 * after scrambling)
288 	 */
289 	if ((is_dram_addr &&
290 			((hdev->asic_funcs->scramble_addr(hdev, phys_addr) &
291 				(mmu_prop->page_size - 1)) ||
292 			(hdev->asic_funcs->scramble_addr(hdev, virt_addr) &
293 				(mmu_prop->page_size - 1)))) ||
294 		(!is_dram_addr && ((phys_addr & (real_page_size - 1)) ||
295 				(virt_addr & (real_page_size - 1)))))
296 		dev_crit(hdev->dev,
297 			"Mapping address 0x%llx with virtual address 0x%llx and page size of 0x%x is erroneous! Addresses must be divisible by page size",
298 			phys_addr, virt_addr, real_page_size);
299 
300 	npages = page_size / real_page_size;
301 	real_virt_addr = virt_addr;
302 	real_phys_addr = phys_addr;
303 
304 	for (i = 0 ; i < npages ; i++) {
305 		rc = hdev->mmu_func[pgt_residency].map(ctx,
306 						real_virt_addr, real_phys_addr,
307 						real_page_size, is_dram_addr);
308 		if (rc)
309 			goto err;
310 
311 		real_virt_addr += real_page_size;
312 		real_phys_addr += real_page_size;
313 		mapped_cnt++;
314 	}
315 
316 	if (flush_pte)
317 		hdev->mmu_func[pgt_residency].flush(ctx);
318 
319 	return 0;
320 
321 err:
322 	real_virt_addr = virt_addr;
323 	for (i = 0 ; i < mapped_cnt ; i++) {
324 		if (hdev->mmu_func[pgt_residency].unmap(ctx,
325 						real_virt_addr, is_dram_addr))
326 			dev_warn_ratelimited(hdev->dev,
327 				"failed to unmap va: 0x%llx\n", real_virt_addr);
328 
329 		real_virt_addr += real_page_size;
330 	}
331 
332 	hdev->mmu_func[pgt_residency].flush(ctx);
333 
334 	return rc;
335 }
336 
337 /*
338  * hl_mmu_map_contiguous - implements a wrapper for hl_mmu_map_page
339  *                         for mapping contiguous physical memory
340  *
341  * @ctx: pointer to the context structure
342  * @virt_addr: virt addr to map from
343  * @phys_addr: phys addr to map to
344  * @size: size to map
345  *
346  */
hl_mmu_map_contiguous(struct hl_ctx * ctx,u64 virt_addr,u64 phys_addr,u32 size)347 int hl_mmu_map_contiguous(struct hl_ctx *ctx, u64 virt_addr,
348 					u64 phys_addr, u32 size)
349 {
350 	struct hl_device *hdev = ctx->hdev;
351 	struct asic_fixed_properties *prop = &hdev->asic_prop;
352 	u64 curr_va, curr_pa;
353 	u32 page_size;
354 	bool flush_pte;
355 	int rc = 0, off;
356 
357 	if (hl_mem_area_inside_range(virt_addr, size,
358 			prop->dmmu.start_addr, prop->dmmu.end_addr))
359 		page_size = prop->dmmu.page_size;
360 	else if (hl_mem_area_inside_range(virt_addr, size,
361 			prop->pmmu.start_addr, prop->pmmu.end_addr))
362 		page_size = prop->pmmu.page_size;
363 	else if (hl_mem_area_inside_range(virt_addr, size,
364 			prop->pmmu_huge.start_addr, prop->pmmu_huge.end_addr))
365 		page_size = prop->pmmu_huge.page_size;
366 	else
367 		return -EINVAL;
368 
369 	for (off = 0 ; off < size ; off += page_size) {
370 		curr_va = virt_addr + off;
371 		curr_pa = phys_addr + off;
372 		flush_pte = (off + page_size) >= size;
373 		rc = hl_mmu_map_page(ctx, curr_va, curr_pa, page_size,
374 								flush_pte);
375 		if (rc) {
376 			dev_err(hdev->dev,
377 				"Map failed for va 0x%llx to pa 0x%llx\n",
378 				curr_va, curr_pa);
379 			goto unmap;
380 		}
381 	}
382 
383 	return rc;
384 
385 unmap:
386 	for (; off >= 0 ; off -= page_size) {
387 		curr_va = virt_addr + off;
388 		flush_pte = (off - (s32) page_size) < 0;
389 		if (hl_mmu_unmap_page(ctx, curr_va, page_size, flush_pte))
390 			dev_warn_ratelimited(hdev->dev,
391 				"failed to unmap va 0x%llx\n", curr_va);
392 	}
393 
394 	return rc;
395 }
396 
397 /*
398  * hl_mmu_unmap_contiguous - implements a wrapper for hl_mmu_unmap_page
399  *                           for unmapping contiguous physical memory
400  *
401  * @ctx: pointer to the context structure
402  * @virt_addr: virt addr to unmap
403  * @size: size to unmap
404  *
405  */
hl_mmu_unmap_contiguous(struct hl_ctx * ctx,u64 virt_addr,u32 size)406 int hl_mmu_unmap_contiguous(struct hl_ctx *ctx, u64 virt_addr, u32 size)
407 {
408 	struct hl_device *hdev = ctx->hdev;
409 	struct asic_fixed_properties *prop = &hdev->asic_prop;
410 	u64 curr_va;
411 	u32 page_size;
412 	bool flush_pte;
413 	int rc = 0, off;
414 
415 	if (hl_mem_area_inside_range(virt_addr, size,
416 			prop->dmmu.start_addr, prop->dmmu.end_addr))
417 		page_size = prop->dmmu.page_size;
418 	else if (hl_mem_area_inside_range(virt_addr, size,
419 			prop->pmmu.start_addr, prop->pmmu.end_addr))
420 		page_size = prop->pmmu.page_size;
421 	else if (hl_mem_area_inside_range(virt_addr, size,
422 			prop->pmmu_huge.start_addr, prop->pmmu_huge.end_addr))
423 		page_size = prop->pmmu_huge.page_size;
424 	else
425 		return -EINVAL;
426 
427 	for (off = 0 ; off < size ; off += page_size) {
428 		curr_va = virt_addr + off;
429 		flush_pte = (off + page_size) >= size;
430 		rc = hl_mmu_unmap_page(ctx, curr_va, page_size, flush_pte);
431 		if (rc)
432 			dev_warn_ratelimited(hdev->dev,
433 				"Unmap failed for va 0x%llx\n", curr_va);
434 	}
435 
436 	return rc;
437 }
438 
439 /*
440  * hl_mmu_swap_out - marks all mapping of the given ctx as swapped out
441  *
442  * @ctx: pointer to the context structure
443  *
444  */
hl_mmu_swap_out(struct hl_ctx * ctx)445 void hl_mmu_swap_out(struct hl_ctx *ctx)
446 {
447 	struct hl_device *hdev = ctx->hdev;
448 
449 	if (!hdev->mmu_enable)
450 		return;
451 
452 	if (hdev->mmu_func[MMU_DR_PGT].swap_out != NULL)
453 		hdev->mmu_func[MMU_DR_PGT].swap_out(ctx);
454 
455 	if (hdev->mmu_func[MMU_HR_PGT].swap_out != NULL)
456 		hdev->mmu_func[MMU_HR_PGT].swap_out(ctx);
457 }
458 
459 /*
460  * hl_mmu_swap_in - marks all mapping of the given ctx as swapped in
461  *
462  * @ctx: pointer to the context structure
463  *
464  */
hl_mmu_swap_in(struct hl_ctx * ctx)465 void hl_mmu_swap_in(struct hl_ctx *ctx)
466 {
467 	struct hl_device *hdev = ctx->hdev;
468 
469 	if (!hdev->mmu_enable)
470 		return;
471 
472 	if (hdev->mmu_func[MMU_DR_PGT].swap_in != NULL)
473 		hdev->mmu_func[MMU_DR_PGT].swap_in(ctx);
474 
475 	if (hdev->mmu_func[MMU_HR_PGT].swap_in != NULL)
476 		hdev->mmu_func[MMU_HR_PGT].swap_in(ctx);
477 }
478 
hl_mmu_pa_page_with_offset(struct hl_ctx * ctx,u64 virt_addr,struct hl_mmu_hop_info * hops,u64 * phys_addr)479 static void hl_mmu_pa_page_with_offset(struct hl_ctx *ctx, u64 virt_addr,
480 						struct hl_mmu_hop_info *hops,
481 						u64 *phys_addr)
482 {
483 	struct hl_device *hdev = ctx->hdev;
484 	struct asic_fixed_properties *prop = &hdev->asic_prop;
485 	u64 offset_mask, addr_mask, hop_shift, tmp_phys_addr;
486 	u32 hop0_shift_off;
487 	void *p;
488 
489 	/* last hop holds the phys address and flags */
490 	if (hops->unscrambled_paddr)
491 		tmp_phys_addr = hops->unscrambled_paddr;
492 	else
493 		tmp_phys_addr = hops->hop_info[hops->used_hops - 1].hop_pte_val;
494 
495 	if (hops->range_type == HL_VA_RANGE_TYPE_HOST_HUGE)
496 		p = &prop->pmmu_huge;
497 	else if (hops->range_type == HL_VA_RANGE_TYPE_HOST)
498 		p = &prop->pmmu;
499 	else /* HL_VA_RANGE_TYPE_DRAM */
500 		p = &prop->dmmu;
501 
502 	if ((hops->range_type == HL_VA_RANGE_TYPE_DRAM) &&
503 			!is_power_of_2(prop->dram_page_size)) {
504 		unsigned long dram_page_size = prop->dram_page_size;
505 		u64 page_offset_mask;
506 		u64 phys_addr_mask;
507 		u32 bit;
508 
509 		/*
510 		 * find last set bit in page_size to cover all bits of page
511 		 * offset. note that 1 has to be added to bit index.
512 		 * note that the internal ulong variable is used to avoid
513 		 * alignment issue.
514 		 */
515 		bit = find_last_bit(&dram_page_size,
516 					sizeof(dram_page_size) * BITS_PER_BYTE) + 1;
517 		page_offset_mask = (BIT_ULL(bit) - 1);
518 		phys_addr_mask = ~page_offset_mask;
519 		*phys_addr = (tmp_phys_addr & phys_addr_mask) |
520 				(virt_addr & page_offset_mask);
521 	} else {
522 		/*
523 		 * find the correct hop shift field in hl_mmu_properties
524 		 * structure in order to determine the right masks
525 		 * for the page offset.
526 		 */
527 		hop0_shift_off = offsetof(struct hl_mmu_properties, hop0_shift);
528 		p = (char *)p + hop0_shift_off;
529 		p = (char *)p + ((hops->used_hops - 1) * sizeof(u64));
530 		hop_shift = *(u64 *)p;
531 		offset_mask = (1ull << hop_shift) - 1;
532 		addr_mask = ~(offset_mask);
533 		*phys_addr = (tmp_phys_addr & addr_mask) |
534 				(virt_addr & offset_mask);
535 	}
536 }
537 
hl_mmu_va_to_pa(struct hl_ctx * ctx,u64 virt_addr,u64 * phys_addr)538 int hl_mmu_va_to_pa(struct hl_ctx *ctx, u64 virt_addr, u64 *phys_addr)
539 {
540 	struct hl_mmu_hop_info hops;
541 	int rc;
542 
543 	memset(&hops, 0, sizeof(hops));
544 
545 	rc = hl_mmu_get_tlb_info(ctx, virt_addr, &hops);
546 	if (rc)
547 		return rc;
548 
549 	hl_mmu_pa_page_with_offset(ctx, virt_addr, &hops,  phys_addr);
550 
551 	return 0;
552 }
553 
hl_mmu_get_tlb_info(struct hl_ctx * ctx,u64 virt_addr,struct hl_mmu_hop_info * hops)554 int hl_mmu_get_tlb_info(struct hl_ctx *ctx, u64 virt_addr,
555 			struct hl_mmu_hop_info *hops)
556 {
557 	struct hl_device *hdev = ctx->hdev;
558 	struct asic_fixed_properties *prop = &hdev->asic_prop;
559 	struct hl_mmu_properties *mmu_prop;
560 	int rc;
561 	bool is_dram_addr;
562 
563 	if (!hdev->mmu_enable)
564 		return -EOPNOTSUPP;
565 
566 	hops->scrambled_vaddr = virt_addr;      /* assume no scrambling */
567 
568 	is_dram_addr = hl_mem_area_inside_range(virt_addr, prop->dmmu.page_size,
569 						prop->dmmu.start_addr,
570 						prop->dmmu.end_addr);
571 
572 	/* host-residency is the same in PMMU and HPMMU, use one of them */
573 	mmu_prop = is_dram_addr ? &prop->dmmu : &prop->pmmu;
574 
575 	mutex_lock(&ctx->mmu_lock);
576 
577 	if (mmu_prop->host_resident)
578 		rc = hdev->mmu_func[MMU_HR_PGT].get_tlb_info(ctx,
579 							virt_addr, hops);
580 	else
581 		rc = hdev->mmu_func[MMU_DR_PGT].get_tlb_info(ctx,
582 							virt_addr, hops);
583 
584 	mutex_unlock(&ctx->mmu_lock);
585 
586 	/* add page offset to physical address */
587 	if (hops->unscrambled_paddr)
588 		hl_mmu_pa_page_with_offset(ctx, virt_addr, hops,
589 					&hops->unscrambled_paddr);
590 
591 	return rc;
592 }
593 
hl_mmu_if_set_funcs(struct hl_device * hdev)594 int hl_mmu_if_set_funcs(struct hl_device *hdev)
595 {
596 	if (!hdev->mmu_enable)
597 		return 0;
598 
599 	switch (hdev->asic_type) {
600 	case ASIC_GOYA:
601 	case ASIC_GAUDI:
602 	case ASIC_GAUDI_SEC:
603 		hl_mmu_v1_set_funcs(hdev, &hdev->mmu_func[MMU_DR_PGT]);
604 		break;
605 	default:
606 		dev_err(hdev->dev, "Unrecognized ASIC type %d\n",
607 			hdev->asic_type);
608 		return -EOPNOTSUPP;
609 	}
610 
611 	return 0;
612 }
613 
614 /**
615  * hl_mmu_scramble_addr() - The generic mmu address scrambling routine.
616  * @hdev: pointer to device data.
617  * @addr: The address to scramble.
618  *
619  * Return: The scrambled address.
620  */
hl_mmu_scramble_addr(struct hl_device * hdev,u64 addr)621 u64 hl_mmu_scramble_addr(struct hl_device *hdev, u64 addr)
622 {
623 	return addr;
624 }
625 
626 /**
627  * hl_mmu_descramble_addr() - The generic mmu address descrambling
628  * routine.
629  * @hdev: pointer to device data.
630  * @addr: The address to descramble.
631  *
632  * Return: The un-scrambled address.
633  */
hl_mmu_descramble_addr(struct hl_device * hdev,u64 addr)634 u64 hl_mmu_descramble_addr(struct hl_device *hdev, u64 addr)
635 {
636 	return addr;
637 }
638