• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2021 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22 #include "cgrp.h"
23 #include "chan.h"
24 #include "chid.h"
25 #include "runl.h"
26 #include "priv.h"
27 
28 #include <core/gpuobj.h>
29 #include <subdev/mmu.h>
30 
31 static void
nvkm_cgrp_ectx_put(struct nvkm_cgrp * cgrp,struct nvkm_ectx ** pectx)32 nvkm_cgrp_ectx_put(struct nvkm_cgrp *cgrp, struct nvkm_ectx **pectx)
33 {
34 	struct nvkm_ectx *ectx = *pectx;
35 
36 	if (ectx) {
37 		struct nvkm_engn *engn = ectx->engn;
38 
39 		if (refcount_dec_and_test(&ectx->refs)) {
40 			CGRP_TRACE(cgrp, "dtor ectx %d[%s]", engn->id, engn->engine->subdev.name);
41 			nvkm_object_del(&ectx->object);
42 			list_del(&ectx->head);
43 			kfree(ectx);
44 		}
45 
46 		*pectx = NULL;
47 	}
48 }
49 
50 static int
nvkm_cgrp_ectx_get(struct nvkm_cgrp * cgrp,struct nvkm_engn * engn,struct nvkm_ectx ** pectx,struct nvkm_chan * chan,struct nvkm_client * client)51 nvkm_cgrp_ectx_get(struct nvkm_cgrp *cgrp, struct nvkm_engn *engn, struct nvkm_ectx **pectx,
52 		   struct nvkm_chan *chan, struct nvkm_client *client)
53 {
54 	struct nvkm_engine *engine = engn->engine;
55 	struct nvkm_oclass cclass = {
56 		.client = client,
57 		.engine = engine,
58 	};
59 	struct nvkm_ectx *ectx;
60 	int ret = 0;
61 
62 	/* Look for an existing context for this engine in the channel group. */
63 	ectx = nvkm_list_find(ectx, &cgrp->ectxs, head, ectx->engn == engn);
64 	if (ectx) {
65 		refcount_inc(&ectx->refs);
66 		*pectx = ectx;
67 		return 0;
68 	}
69 
70 	/* Nope - create a fresh one. */
71 	CGRP_TRACE(cgrp, "ctor ectx %d[%s]", engn->id, engn->engine->subdev.name);
72 	if (!(ectx = *pectx = kzalloc(sizeof(*ectx), GFP_KERNEL)))
73 		return -ENOMEM;
74 
75 	ectx->engn = engn;
76 	refcount_set(&ectx->refs, 1);
77 	refcount_set(&ectx->uses, 0);
78 	list_add_tail(&ectx->head, &cgrp->ectxs);
79 
80 	/* Allocate the HW structures. */
81 	if (engine->func->fifo.cclass)
82 		ret = engine->func->fifo.cclass(chan, &cclass, &ectx->object);
83 	else if (engine->func->cclass)
84 		ret = nvkm_object_new_(engine->func->cclass, &cclass, NULL, 0, &ectx->object);
85 
86 	if (ret)
87 		nvkm_cgrp_ectx_put(cgrp, pectx);
88 
89 	return ret;
90 }
91 
92 void
nvkm_cgrp_vctx_put(struct nvkm_cgrp * cgrp,struct nvkm_vctx ** pvctx)93 nvkm_cgrp_vctx_put(struct nvkm_cgrp *cgrp, struct nvkm_vctx **pvctx)
94 {
95 	struct nvkm_vctx *vctx = *pvctx;
96 
97 	if (vctx) {
98 		struct nvkm_engn *engn = vctx->ectx->engn;
99 
100 		if (refcount_dec_and_test(&vctx->refs)) {
101 			CGRP_TRACE(cgrp, "dtor vctx %d[%s]", engn->id, engn->engine->subdev.name);
102 			nvkm_vmm_put(vctx->vmm, &vctx->vma);
103 			nvkm_gpuobj_del(&vctx->inst);
104 
105 			nvkm_cgrp_ectx_put(cgrp, &vctx->ectx);
106 			if (vctx->vmm) {
107 				atomic_dec(&vctx->vmm->engref[engn->engine->subdev.type]);
108 				nvkm_vmm_unref(&vctx->vmm);
109 			}
110 			list_del(&vctx->head);
111 			kfree(vctx);
112 		}
113 
114 		*pvctx = NULL;
115 	}
116 }
117 
118 int
nvkm_cgrp_vctx_get(struct nvkm_cgrp * cgrp,struct nvkm_engn * engn,struct nvkm_chan * chan,struct nvkm_vctx ** pvctx,struct nvkm_client * client)119 nvkm_cgrp_vctx_get(struct nvkm_cgrp *cgrp, struct nvkm_engn *engn, struct nvkm_chan *chan,
120 		   struct nvkm_vctx **pvctx, struct nvkm_client *client)
121 {
122 	struct nvkm_ectx *ectx;
123 	struct nvkm_vctx *vctx;
124 	int ret;
125 
126 	/* Look for an existing sub-context for this engine+VEID in the channel group. */
127 	vctx = nvkm_list_find(vctx, &cgrp->vctxs, head,
128 			      vctx->ectx->engn == engn && vctx->vmm == chan->vmm);
129 	if (vctx) {
130 		refcount_inc(&vctx->refs);
131 		*pvctx = vctx;
132 		return 0;
133 	}
134 
135 	/* Nope - create a fresh one.  But, context first. */
136 	ret = nvkm_cgrp_ectx_get(cgrp, engn, &ectx, chan, client);
137 	if (ret) {
138 		CGRP_ERROR(cgrp, "ectx %d[%s]: %d", engn->id, engn->engine->subdev.name, ret);
139 		return ret;
140 	}
141 
142 	/* Now, create the sub-context. */
143 	CGRP_TRACE(cgrp, "ctor vctx %d[%s]", engn->id, engn->engine->subdev.name);
144 	if (!(vctx = *pvctx = kzalloc(sizeof(*vctx), GFP_KERNEL))) {
145 		nvkm_cgrp_ectx_put(cgrp, &ectx);
146 		return -ENOMEM;
147 	}
148 
149 	vctx->ectx = ectx;
150 	vctx->vmm = nvkm_vmm_ref(chan->vmm);
151 	refcount_set(&vctx->refs, 1);
152 	list_add_tail(&vctx->head, &cgrp->vctxs);
153 
154 	/* MMU on some GPUs needs to know engine usage for TLB invalidation. */
155 	if (vctx->vmm)
156 		atomic_inc(&vctx->vmm->engref[engn->engine->subdev.type]);
157 
158 	/* Allocate the HW structures. */
159 	if (engn->func->bind) {
160 		ret = nvkm_object_bind(vctx->ectx->object, NULL, 0, &vctx->inst);
161 		if (ret == 0 && engn->func->ctor)
162 			ret = engn->func->ctor(engn, vctx);
163 	}
164 
165 	if (ret)
166 		nvkm_cgrp_vctx_put(cgrp, pvctx);
167 
168 	return ret;
169 }
170 
171 static void
nvkm_cgrp_del(struct kref * kref)172 nvkm_cgrp_del(struct kref *kref)
173 {
174 	struct nvkm_cgrp *cgrp = container_of(kref, typeof(*cgrp), kref);
175 	struct nvkm_runl *runl = cgrp->runl;
176 
177 	if (runl->cgid)
178 		nvkm_chid_put(runl->cgid, cgrp->id, &cgrp->lock);
179 
180 	mutex_destroy(&cgrp->mutex);
181 	nvkm_vmm_unref(&cgrp->vmm);
182 	kfree(cgrp);
183 }
184 
185 void
nvkm_cgrp_unref(struct nvkm_cgrp ** pcgrp)186 nvkm_cgrp_unref(struct nvkm_cgrp **pcgrp)
187 {
188 	struct nvkm_cgrp *cgrp = *pcgrp;
189 
190 	if (!cgrp)
191 		return;
192 
193 	kref_put(&cgrp->kref, nvkm_cgrp_del);
194 	*pcgrp = NULL;
195 }
196 
197 struct nvkm_cgrp *
nvkm_cgrp_ref(struct nvkm_cgrp * cgrp)198 nvkm_cgrp_ref(struct nvkm_cgrp *cgrp)
199 {
200 	if (cgrp)
201 		kref_get(&cgrp->kref);
202 
203 	return cgrp;
204 }
205 
206 void
nvkm_cgrp_put(struct nvkm_cgrp ** pcgrp,unsigned long irqflags)207 nvkm_cgrp_put(struct nvkm_cgrp **pcgrp, unsigned long irqflags)
208 {
209 	struct nvkm_cgrp *cgrp = *pcgrp;
210 
211 	if (!cgrp)
212 		return;
213 
214 	*pcgrp = NULL;
215 	spin_unlock_irqrestore(&cgrp->lock, irqflags);
216 }
217 
218 int
nvkm_cgrp_new(struct nvkm_runl * runl,const char * name,struct nvkm_vmm * vmm,bool hw,struct nvkm_cgrp ** pcgrp)219 nvkm_cgrp_new(struct nvkm_runl *runl, const char *name, struct nvkm_vmm *vmm, bool hw,
220 	      struct nvkm_cgrp **pcgrp)
221 {
222 	struct nvkm_cgrp *cgrp;
223 
224 	if (!(cgrp = *pcgrp = kmalloc(sizeof(*cgrp), GFP_KERNEL)))
225 		return -ENOMEM;
226 
227 	cgrp->func = runl->fifo->func->cgrp.func;
228 	strscpy(cgrp->name, name, sizeof(cgrp->name));
229 	cgrp->runl = runl;
230 	cgrp->vmm = nvkm_vmm_ref(vmm);
231 	cgrp->hw = hw;
232 	cgrp->id = -1;
233 	kref_init(&cgrp->kref);
234 	INIT_LIST_HEAD(&cgrp->chans);
235 	cgrp->chan_nr = 0;
236 	spin_lock_init(&cgrp->lock);
237 	INIT_LIST_HEAD(&cgrp->ectxs);
238 	INIT_LIST_HEAD(&cgrp->vctxs);
239 	mutex_init(&cgrp->mutex);
240 	atomic_set(&cgrp->rc, NVKM_CGRP_RC_NONE);
241 
242 	if (runl->cgid) {
243 		cgrp->id = nvkm_chid_get(runl->cgid, cgrp);
244 		if (cgrp->id < 0) {
245 			RUNL_ERROR(runl, "!cgids");
246 			nvkm_cgrp_unref(pcgrp);
247 			return -ENOSPC;
248 		}
249 	}
250 
251 	return 0;
252 }
253