• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2022 Arm Ltd.
4  */
5 #include <asm/kvm_hyp.h>
6 #include <asm/kvm_mmu.h>
7 #include <linux/types.h>
8 #include <linux/gfp_types.h>
9 #include <linux/io-pgtable-arm.h>
10 
11 #include <nvhe/alloc.h>
12 #include <nvhe/iommu.h>
13 #include <nvhe/mem_protect.h>
14 
15 #include "arm_smmu_v3.h"
16 #include "arm-smmu-v3-module.h"
17 
18 #define io_pgtable_cfg_to_pgtable(x) container_of((x), struct io_pgtable, cfg)
19 
20 #define io_pgtable_cfg_to_data(x)					\
21 	io_pgtable_to_data(io_pgtable_cfg_to_pgtable(x))
22 
arm_lpae_map_exists(void)23 int arm_lpae_map_exists(void)
24 {
25 	return -EEXIST;
26 }
27 
arm_lpae_unmap_empty(void)28 void arm_lpae_unmap_empty(void)
29 {
30 }
31 
__arm_lpae_alloc_pages(size_t size,gfp_t gfp,struct io_pgtable_cfg * cfg,void * cookie)32 void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp,
33 			     struct io_pgtable_cfg *cfg, void *cookie)
34 {
35 	void *addr;
36 	struct arm_lpae_io_pgtable *data = io_pgtable_cfg_to_data(cfg);
37 
38 	if (!PAGE_ALIGNED(size))
39 		return NULL;
40 
41 	if (data->idmapped)
42 		addr = kvm_iommu_donate_pages_atomic(get_order(size));
43 	else
44 		addr = kvm_iommu_donate_pages(get_order(size), 0);
45 
46 	if (addr && !cfg->coherent_walk)
47 		kvm_flush_dcache_to_poc(addr, size);
48 
49 	return addr;
50 }
51 
__arm_lpae_free_pages(void * addr,size_t size,struct io_pgtable_cfg * cfg,void * cookie)52 void __arm_lpae_free_pages(void *addr, size_t size, struct io_pgtable_cfg *cfg,
53 			   void *cookie)
54 {
55 	u8 order;
56 	struct arm_lpae_io_pgtable *data = io_pgtable_cfg_to_data(cfg);
57 
58 	/*
59 	 * It's guaranteed all allocations are aligned, but core code
60 	 * might free PGD with it's actual size.
61 	 */
62 	order = get_order(PAGE_ALIGN(size));
63 
64 	if (!cfg->coherent_walk)
65 		kvm_flush_dcache_to_poc(addr, size);
66 
67 	if (data->idmapped)
68 		kvm_iommu_reclaim_pages_atomic(addr, order);
69 	else
70 		kvm_iommu_reclaim_pages(addr, order);
71 }
72 
__arm_lpae_sync_pte(arm_lpae_iopte * ptep,int num_entries,struct io_pgtable_cfg * cfg)73 void __arm_lpae_sync_pte(arm_lpae_iopte *ptep, int num_entries,
74 			 struct io_pgtable_cfg *cfg)
75 {
76 	if (!cfg->coherent_walk)
77 		kvm_flush_dcache_to_poc(ptep, sizeof(*ptep) * num_entries);
78 }
79 
kvm_arm_io_pgtable_init(struct io_pgtable_cfg * cfg,struct arm_lpae_io_pgtable * data)80 static int kvm_arm_io_pgtable_init(struct io_pgtable_cfg *cfg,
81 				   struct arm_lpae_io_pgtable *data)
82 {
83 	int ret = -EINVAL;
84 
85 	if (cfg->fmt == ARM_64_LPAE_S2)
86 		ret = arm_lpae_init_pgtable_s2(cfg, data);
87 	else if (cfg->fmt == ARM_64_LPAE_S1)
88 		ret = arm_lpae_init_pgtable_s1(cfg, data);
89 
90 	if (ret)
91 		return ret;
92 
93 	data->iop.cfg = *cfg;
94 	data->iop.fmt	= cfg->fmt;
95 
96 	return 0;
97 }
98 
kvm_arm_io_pgtable_alloc(struct io_pgtable_cfg * cfg,void * cookie,int * out_ret)99 struct io_pgtable *kvm_arm_io_pgtable_alloc(struct io_pgtable_cfg *cfg,
100 					    void *cookie,
101 					    int *out_ret)
102 {
103 	size_t pgd_size, alignment;
104 	struct arm_lpae_io_pgtable *data;
105 	int ret;
106 
107 	data = hyp_alloc(sizeof(*data));
108 	if (!data) {
109 		*out_ret = hyp_alloc_errno();
110 		return NULL;
111 	}
112 
113 	ret = kvm_arm_io_pgtable_init(cfg, data);
114 	if (ret)
115 		goto out_free;
116 
117 	pgd_size = PAGE_ALIGN(ARM_LPAE_PGD_SIZE(data));
118 	data->pgd = __arm_lpae_alloc_pages(pgd_size, 0, &data->iop.cfg, cookie);
119 	if (!data->pgd) {
120 		ret = -ENOMEM;
121 		goto out_free;
122 	}
123 	/*
124 	 * If it has eight or more entries, the table must be aligned on
125 	 * its size. Otherwise 64 bytes.
126 	 */
127 	alignment = max(pgd_size, 8 * sizeof(arm_lpae_iopte));
128 	if (!IS_ALIGNED(hyp_virt_to_phys(data->pgd), alignment)) {
129 		__arm_lpae_free_pages(data->pgd, pgd_size,
130 				      &data->iop.cfg, cookie);
131 		ret = -EINVAL;
132 		goto out_free;
133 	}
134 
135 	data->iop.cookie = cookie;
136 	if (cfg->fmt == ARM_64_LPAE_S2)
137 		data->iop.cfg.arm_lpae_s2_cfg.vttbr = __arm_lpae_virt_to_phys(data->pgd);
138 	else if (cfg->fmt == ARM_64_LPAE_S1)
139 		data->iop.cfg.arm_lpae_s1_cfg.ttbr = __arm_lpae_virt_to_phys(data->pgd);
140 
141 	if (!data->iop.cfg.coherent_walk)
142 		kvm_flush_dcache_to_poc(data->pgd, pgd_size);
143 
144 	/* Ensure the empty pgd is visible before any actual TTBR write */
145 	wmb();
146 
147 	*out_ret = 0;
148 	return &data->iop;
149 out_free:
150 	hyp_free(data);
151 	*out_ret = ret;
152 	return NULL;
153 }
154 
kvm_arm_io_pgtable_free(struct io_pgtable * iopt)155 int kvm_arm_io_pgtable_free(struct io_pgtable *iopt)
156 {
157 	struct arm_lpae_io_pgtable *data = io_pgtable_to_data(iopt);
158 	size_t pgd_size = ARM_LPAE_PGD_SIZE(data);
159 
160 	if (!data->iop.cfg.coherent_walk)
161 		kvm_flush_dcache_to_poc(data->pgd, pgd_size);
162 
163 	io_pgtable_tlb_flush_all(iopt);
164 	__arm_lpae_free_pgtable(data, data->start_level, data->pgd);
165 	hyp_free(data);
166 	return 0;
167 }
168