• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2023 Google LLC
4  * Author: Mostafa Saleh <smostafa@google.com>
5  */
6 #include <nvhe/pviommu.h>
7 #include <nvhe/pviommu-host.h>
8 
9 struct pviommu_host pviommus[MAX_NR_PVIOMMU];
10 static DEFINE_HYP_SPINLOCK(host_pviommu_lock);
11 
12 /*
13  * Attach a new pvIOMMU instance to VM host_kvm, and assign
14  * pviommu as an ID to it.
15  */
pkvm_pviommu_attach(struct kvm * host_kvm,int pviommu)16 int pkvm_pviommu_attach(struct kvm *host_kvm, int pviommu)
17 {
18 	int i, ret = -EBUSY;
19 
20 	if (!host_kvm)
21 		return -EINVAL;
22 	hyp_spin_lock(&host_pviommu_lock);
23 	for (i = 0 ; i < MAX_NR_PVIOMMU ; ++i) {
24 		struct pviommu_host *ph = &pviommus[i];
25 
26 		if (!ph->kvm && !ph->finalized) {
27 			ph->kvm = kern_hyp_va(host_kvm);
28 			ph->pviommu_id = pviommu;
29 			ret = 0;
30 			break;
31 		}
32 	}
33 	hyp_spin_unlock(&host_pviommu_lock);
34 	return ret;
35 }
36 
37 /*
38  * Although, having 1:many vsid:psid relation might have valid use cases,
39  * that complicates the hypervisor interface when dealing with attach/detach
40  * hypercalls.
41  * So, for now we constraint that a vsid only maps to a one psid.
42  * The other way around is allowed (many:1 vsid:psid). However that
43  * might not have as common cases, as that means changes to one of the
44  * vsids would reflect to the others as they have the same psid.
45  */
__pkvm_pviommu_vsid_valid(struct pviommu_host * ph,u32 vsid)46 static int __pkvm_pviommu_vsid_valid(struct pviommu_host *ph, u32 vsid)
47 {
48 	int i;
49 
50 	for (i = 0 ; i < ph->nr_entries ; ++i) {
51 		if (ph->entries[i].vsid == vsid)
52 			return -EEXIST;
53 	}
54 	return 0;
55 }
56 
57 /*
58  * For a pvIOMMU with ID pviommu, that is attached to host_kvm
59  * add new entry for a virtual sid, that maps to a physical IOMMU
60  * with id iommu and sid.
61  */
pkvm_pviommu_add_vsid(struct kvm * host_kvm,int pviommu,pkvm_handle_t iommu,u32 sid,u32 vsid)62 int pkvm_pviommu_add_vsid(struct kvm *host_kvm, int pviommu,
63 			  pkvm_handle_t iommu, u32 sid, u32 vsid)
64 {
65 	int i;
66 	int ret = -ENOENT;
67 
68 	hyp_spin_lock(&host_pviommu_lock);
69 	for (i = 0 ; i < MAX_NR_PVIOMMU ; ++i) {
70 		struct pviommu_host *ph = &pviommus[i];
71 
72 		if (!ph->kvm || ph->finalized)
73 			continue;
74 		if (ph->pviommu_id == pviommu && ph->kvm == kern_hyp_va(host_kvm)) {
75 			if (ph->nr_entries == MAX_NR_SID_PER_PVIOMMU) {
76 				ret = -EBUSY;
77 				break;
78 			}
79 			ret = __pkvm_pviommu_vsid_valid(ph, vsid);
80 			if (ret)
81 				break;
82 
83 			ph->entries[ph->nr_entries].sid = sid;
84 			ph->entries[ph->nr_entries].vsid = vsid;
85 			ph->entries[ph->nr_entries].iommu = iommu;
86 			ph->nr_entries++;
87 			break;
88 		}
89 	}
90 	hyp_spin_unlock(&host_pviommu_lock);
91 	return ret;
92 }
93 
94 /*
95  * Called at vm init, adds all the pvIOMMUs belonging to the VM
96  * in a list. No more changes allowed from the host to any of
97  * those pvIOMMU
98  */
pkvm_pviommu_finalise(struct pkvm_hyp_vm * hyp_vm)99 int pkvm_pviommu_finalise(struct pkvm_hyp_vm *hyp_vm)
100 {
101 	int i;
102 	int ret;
103 
104 	ret = hyp_pool_init_empty(&hyp_vm->iommu_pool, 64);
105 	if (ret)
106 		return ret;
107 
108 	hyp_spin_lock(&host_pviommu_lock);
109 	INIT_LIST_HEAD(&hyp_vm->pviommus);
110 	INIT_LIST_HEAD(&hyp_vm->domains);
111 	for (i = 0; i < MAX_NR_PVIOMMU ; ++i) {
112 		struct pviommu_host *ph = &pviommus[i];
113 
114 		if (ph->kvm == hyp_vm->host_kvm) {
115 			ph->finalized = true;
116 			list_add_tail(&ph->list, &hyp_vm->pviommus);
117 		}
118 	}
119 	hyp_spin_unlock(&host_pviommu_lock);
120 	return 0;
121 }
122 
123 /*
124  * Called when VM is torndown, to free pvIOMMU instance and clean
125  * any state.
126  */
pkvm_pviommu_teardown(struct pkvm_hyp_vm * hyp_vm)127 void pkvm_pviommu_teardown(struct pkvm_hyp_vm *hyp_vm)
128 {
129 	struct pviommu_host *ph;
130 
131 	hyp_spin_lock(&host_pviommu_lock);
132 	list_for_each_entry(ph, &hyp_vm->pviommus, list) {
133 		/* pvIOMMU is free now. */
134 		ph->kvm = NULL;
135 		ph->nr_entries = 0;
136 		ph->finalized = false;
137 	}
138 	kvm_iommu_teardown_guest_domains(hyp_vm);
139 	hyp_spin_unlock(&host_pviommu_lock);
140 }
141 
pkvm_pviommu_get_route(struct pkvm_hyp_vm * hyp_vm,pkvm_handle_t pviommu,u32 vsid,struct pviommu_route * route)142 int pkvm_pviommu_get_route(struct pkvm_hyp_vm *hyp_vm, pkvm_handle_t pviommu, u32 vsid,
143 			   struct pviommu_route *route)
144 {
145 	struct pviommu_host *ph;
146 	int i;
147 
148 	list_for_each_entry(ph, &hyp_vm->pviommus, list) {
149 		if (ph->pviommu_id == pviommu) {
150 			for (i = 0 ; i < ph->nr_entries ; ++i) {
151 				if (ph->entries[i].vsid == vsid) {
152 					route->sid = ph->entries[i].sid;
153 					route->iommu = ph->entries[i].iommu;
154 					return 0;
155 				}
156 			}
157 			break;
158 		}
159 	}
160 	return -ENOENT;
161 }
162