• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21  * SOFTWARE.
22  *
23  * Authors:
24  *    Zhi Wang <zhi.a.wang@intel.com>
25  *    Zhenyu Wang <zhenyuw@linux.intel.com>
26  *    Xiao Zheng <xiao.zheng@intel.com>
27  *
28  * Contributors:
29  *    Min He <min.he@intel.com>
30  *    Bing Niu <bing.niu@intel.com>
31  *
32  */
33 
34 #ifndef _GVT_GTT_H_
35 #define _GVT_GTT_H_
36 
37 #define I915_GTT_PAGE_SHIFT         12
38 
39 struct intel_vgpu_mm;
40 
41 #define INTEL_GVT_INVALID_ADDR (~0UL)
42 
43 struct intel_gvt_gtt_entry {
44 	u64 val64;
45 	int type;
46 };
47 
48 struct intel_gvt_gtt_pte_ops {
49 	int (*get_entry)(void *pt,
50 			 struct intel_gvt_gtt_entry *e,
51 			 unsigned long index,
52 			 bool hypervisor_access,
53 			 unsigned long gpa,
54 			 struct intel_vgpu *vgpu);
55 	int (*set_entry)(void *pt,
56 			 struct intel_gvt_gtt_entry *e,
57 			 unsigned long index,
58 			 bool hypervisor_access,
59 			 unsigned long gpa,
60 			 struct intel_vgpu *vgpu);
61 	bool (*test_present)(struct intel_gvt_gtt_entry *e);
62 	void (*clear_present)(struct intel_gvt_gtt_entry *e);
63 	void (*set_present)(struct intel_gvt_gtt_entry *e);
64 	bool (*test_pse)(struct intel_gvt_gtt_entry *e);
65 	void (*clear_pse)(struct intel_gvt_gtt_entry *e);
66 	bool (*test_ips)(struct intel_gvt_gtt_entry *e);
67 	void (*clear_ips)(struct intel_gvt_gtt_entry *e);
68 	bool (*test_64k_splited)(struct intel_gvt_gtt_entry *e);
69 	void (*clear_64k_splited)(struct intel_gvt_gtt_entry *e);
70 	void (*set_64k_splited)(struct intel_gvt_gtt_entry *e);
71 	void (*set_pfn)(struct intel_gvt_gtt_entry *e, unsigned long pfn);
72 	unsigned long (*get_pfn)(struct intel_gvt_gtt_entry *e);
73 };
74 
75 struct intel_gvt_gtt_gma_ops {
76 	unsigned long (*gma_to_ggtt_pte_index)(unsigned long gma);
77 	unsigned long (*gma_to_pte_index)(unsigned long gma);
78 	unsigned long (*gma_to_pde_index)(unsigned long gma);
79 	unsigned long (*gma_to_l3_pdp_index)(unsigned long gma);
80 	unsigned long (*gma_to_l4_pdp_index)(unsigned long gma);
81 	unsigned long (*gma_to_pml4_index)(unsigned long gma);
82 };
83 
84 struct intel_gvt_gtt {
85 	struct intel_gvt_gtt_pte_ops *pte_ops;
86 	struct intel_gvt_gtt_gma_ops *gma_ops;
87 	int (*mm_alloc_page_table)(struct intel_vgpu_mm *mm);
88 	void (*mm_free_page_table)(struct intel_vgpu_mm *mm);
89 	struct list_head oos_page_use_list_head;
90 	struct list_head oos_page_free_list_head;
91 	struct mutex ppgtt_mm_lock;
92 	struct list_head ppgtt_mm_lru_list_head;
93 
94 	struct page *scratch_page;
95 	unsigned long scratch_mfn;
96 };
97 
98 enum intel_gvt_gtt_type {
99 	GTT_TYPE_INVALID = 0,
100 
101 	GTT_TYPE_GGTT_PTE,
102 
103 	GTT_TYPE_PPGTT_PTE_4K_ENTRY,
104 	GTT_TYPE_PPGTT_PTE_64K_ENTRY,
105 	GTT_TYPE_PPGTT_PTE_2M_ENTRY,
106 	GTT_TYPE_PPGTT_PTE_1G_ENTRY,
107 
108 	GTT_TYPE_PPGTT_PTE_ENTRY,
109 
110 	GTT_TYPE_PPGTT_PDE_ENTRY,
111 	GTT_TYPE_PPGTT_PDP_ENTRY,
112 	GTT_TYPE_PPGTT_PML4_ENTRY,
113 
114 	GTT_TYPE_PPGTT_ROOT_ENTRY,
115 
116 	GTT_TYPE_PPGTT_ROOT_L3_ENTRY,
117 	GTT_TYPE_PPGTT_ROOT_L4_ENTRY,
118 
119 	GTT_TYPE_PPGTT_ENTRY,
120 
121 	GTT_TYPE_PPGTT_PTE_PT,
122 	GTT_TYPE_PPGTT_PDE_PT,
123 	GTT_TYPE_PPGTT_PDP_PT,
124 	GTT_TYPE_PPGTT_PML4_PT,
125 
126 	GTT_TYPE_MAX,
127 };
128 
129 enum intel_gvt_mm_type {
130 	INTEL_GVT_MM_GGTT,
131 	INTEL_GVT_MM_PPGTT,
132 };
133 
134 #define GVT_RING_CTX_NR_PDPS	GEN8_3LVL_PDPES
135 
136 struct intel_gvt_partial_pte {
137 	unsigned long offset;
138 	u64 data;
139 	struct list_head list;
140 };
141 
142 struct intel_vgpu_mm {
143 	enum intel_gvt_mm_type type;
144 	struct intel_vgpu *vgpu;
145 
146 	struct kref ref;
147 	atomic_t pincount;
148 
149 	union {
150 		struct {
151 			enum intel_gvt_gtt_type root_entry_type;
152 			/*
153 			 * The 4 PDPs in ring context. For 48bit addressing,
154 			 * only PDP0 is valid and point to PML4. For 32it
155 			 * addressing, all 4 are used as true PDPs.
156 			 */
157 			u64 guest_pdps[GVT_RING_CTX_NR_PDPS];
158 			u64 shadow_pdps[GVT_RING_CTX_NR_PDPS];
159 			bool shadowed;
160 
161 			struct list_head list;
162 			struct list_head lru_list;
163 			struct list_head link; /* possible LRI shadow mm list */
164 		} ppgtt_mm;
165 		struct {
166 			void *virtual_ggtt;
167 			/* Save/restore for PM */
168 			u64 *host_ggtt_aperture;
169 			u64 *host_ggtt_hidden;
170 			struct list_head partial_pte_list;
171 		} ggtt_mm;
172 	};
173 };
174 
175 struct intel_vgpu_mm *intel_vgpu_create_ppgtt_mm(struct intel_vgpu *vgpu,
176 		enum intel_gvt_gtt_type root_entry_type, u64 pdps[]);
177 
intel_vgpu_mm_get(struct intel_vgpu_mm * mm)178 static inline void intel_vgpu_mm_get(struct intel_vgpu_mm *mm)
179 {
180 	kref_get(&mm->ref);
181 }
182 
183 void _intel_vgpu_mm_release(struct kref *mm_ref);
184 
intel_vgpu_mm_put(struct intel_vgpu_mm * mm)185 static inline void intel_vgpu_mm_put(struct intel_vgpu_mm *mm)
186 {
187 	kref_put(&mm->ref, _intel_vgpu_mm_release);
188 }
189 
intel_vgpu_destroy_mm(struct intel_vgpu_mm * mm)190 static inline void intel_vgpu_destroy_mm(struct intel_vgpu_mm *mm)
191 {
192 	intel_vgpu_mm_put(mm);
193 }
194 
195 struct intel_vgpu_guest_page;
196 
197 struct intel_vgpu_scratch_pt {
198 	struct page *page;
199 	unsigned long page_mfn;
200 };
201 
202 struct intel_vgpu_gtt {
203 	struct intel_vgpu_mm *ggtt_mm;
204 	unsigned long active_ppgtt_mm_bitmap;
205 	struct list_head ppgtt_mm_list_head;
206 	struct radix_tree_root spt_tree;
207 	struct list_head oos_page_list_head;
208 	struct list_head post_shadow_list_head;
209 	struct intel_vgpu_scratch_pt scratch_pt[GTT_TYPE_MAX];
210 };
211 
212 int intel_vgpu_init_gtt(struct intel_vgpu *vgpu);
213 void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu);
214 void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu, bool invalidate_old);
215 void intel_vgpu_invalidate_ppgtt(struct intel_vgpu *vgpu);
216 
217 int intel_gvt_init_gtt(struct intel_gvt *gvt);
218 void intel_gvt_clean_gtt(struct intel_gvt *gvt);
219 
220 struct intel_vgpu_mm *intel_gvt_find_ppgtt_mm(struct intel_vgpu *vgpu,
221 					      int page_table_level,
222 					      void *root_entry);
223 
224 struct intel_vgpu_oos_page {
225 	struct intel_vgpu_ppgtt_spt *spt;
226 	struct list_head list;
227 	struct list_head vm_list;
228 	int id;
229 	void *mem;
230 };
231 
232 #define GTT_ENTRY_NUM_IN_ONE_PAGE 512
233 
234 /* Represent a vgpu shadow page table. */
235 struct intel_vgpu_ppgtt_spt {
236 	atomic_t refcount;
237 	struct intel_vgpu *vgpu;
238 
239 	struct {
240 		enum intel_gvt_gtt_type type;
241 		bool pde_ips; /* for 64KB PTEs */
242 		void *vaddr;
243 		struct page *page;
244 		unsigned long mfn;
245 	} shadow_page;
246 
247 	struct {
248 		enum intel_gvt_gtt_type type;
249 		bool pde_ips; /* for 64KB PTEs */
250 		unsigned long gfn;
251 		unsigned long write_cnt;
252 		struct intel_vgpu_oos_page *oos_page;
253 	} guest_page;
254 
255 	DECLARE_BITMAP(post_shadow_bitmap, GTT_ENTRY_NUM_IN_ONE_PAGE);
256 	struct list_head post_shadow_list;
257 };
258 
259 int intel_vgpu_sync_oos_pages(struct intel_vgpu *vgpu);
260 
261 int intel_vgpu_flush_post_shadow(struct intel_vgpu *vgpu);
262 
263 int intel_vgpu_pin_mm(struct intel_vgpu_mm *mm);
264 
265 void intel_vgpu_unpin_mm(struct intel_vgpu_mm *mm);
266 
267 unsigned long intel_vgpu_gma_to_gpa(struct intel_vgpu_mm *mm,
268 		unsigned long gma);
269 
270 struct intel_vgpu_mm *intel_vgpu_find_ppgtt_mm(struct intel_vgpu *vgpu,
271 		u64 pdps[]);
272 
273 struct intel_vgpu_mm *intel_vgpu_get_ppgtt_mm(struct intel_vgpu *vgpu,
274 		enum intel_gvt_gtt_type root_entry_type, u64 pdps[]);
275 
276 int intel_vgpu_put_ppgtt_mm(struct intel_vgpu *vgpu, u64 pdps[]);
277 
278 int intel_vgpu_emulate_ggtt_mmio_read(struct intel_vgpu *vgpu,
279 	unsigned int off, void *p_data, unsigned int bytes);
280 
281 int intel_vgpu_emulate_ggtt_mmio_write(struct intel_vgpu *vgpu,
282 	unsigned int off, void *p_data, unsigned int bytes);
283 
284 void intel_vgpu_destroy_all_ppgtt_mm(struct intel_vgpu *vgpu);
285 void intel_gvt_restore_ggtt(struct intel_gvt *gvt);
286 
287 #endif /* _GVT_GTT_H_ */
288