• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 
3 /*
4  * Copyright 2016-2019 HabanaLabs, Ltd.
5  * All Rights Reserved.
6  */
7 
8 #include "habanalabs.h"
9 
10 #include <linux/slab.h>
11 
hl_ctx_fini(struct hl_ctx * ctx)12 static void hl_ctx_fini(struct hl_ctx *ctx)
13 {
14 	struct hl_device *hdev = ctx->hdev;
15 	int i;
16 
17 	/*
18 	 * If we arrived here, there are no jobs waiting for this context
19 	 * on its queues so we can safely remove it.
20 	 * This is because for each CS, we increment the ref count and for
21 	 * every CS that was finished we decrement it and we won't arrive
22 	 * to this function unless the ref count is 0
23 	 */
24 
25 	for (i = 0 ; i < HL_MAX_PENDING_CS ; i++)
26 		dma_fence_put(ctx->cs_pending[i]);
27 
28 	if (ctx->asid != HL_KERNEL_ASID_ID) {
29 		/* The engines are stopped as there is no executing CS, but the
30 		 * Coresight might be still working by accessing addresses
31 		 * related to the stopped engines. Hence stop it explicitly.
32 		 * Stop only if this is the compute context, as there can be
33 		 * only one compute context
34 		 */
35 		if ((hdev->in_debug) && (hdev->compute_ctx == ctx))
36 			hl_device_set_debug_mode(hdev, false);
37 
38 		hl_vm_ctx_fini(ctx);
39 		hl_asid_free(hdev, ctx->asid);
40 	} else {
41 		hl_mmu_ctx_fini(ctx);
42 	}
43 }
44 
hl_ctx_do_release(struct kref * ref)45 void hl_ctx_do_release(struct kref *ref)
46 {
47 	struct hl_ctx *ctx;
48 
49 	ctx = container_of(ref, struct hl_ctx, refcount);
50 
51 	hl_ctx_fini(ctx);
52 
53 	if (ctx->hpriv)
54 		hl_hpriv_put(ctx->hpriv);
55 
56 	kfree(ctx);
57 }
58 
hl_ctx_create(struct hl_device * hdev,struct hl_fpriv * hpriv)59 int hl_ctx_create(struct hl_device *hdev, struct hl_fpriv *hpriv)
60 {
61 	struct hl_ctx_mgr *mgr = &hpriv->ctx_mgr;
62 	struct hl_ctx *ctx;
63 	int rc;
64 
65 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
66 	if (!ctx) {
67 		rc = -ENOMEM;
68 		goto out_err;
69 	}
70 
71 	mutex_lock(&mgr->ctx_lock);
72 	rc = idr_alloc(&mgr->ctx_handles, ctx, 1, 0, GFP_KERNEL);
73 	mutex_unlock(&mgr->ctx_lock);
74 
75 	if (rc < 0) {
76 		dev_err(hdev->dev, "Failed to allocate IDR for a new CTX\n");
77 		goto free_ctx;
78 	}
79 
80 	ctx->handle = rc;
81 
82 	rc = hl_ctx_init(hdev, ctx, false);
83 	if (rc)
84 		goto remove_from_idr;
85 
86 	hl_hpriv_get(hpriv);
87 	ctx->hpriv = hpriv;
88 
89 	/* TODO: remove for multiple contexts per process */
90 	hpriv->ctx = ctx;
91 
92 	/* TODO: remove the following line for multiple process support */
93 	hdev->compute_ctx = ctx;
94 
95 	return 0;
96 
97 remove_from_idr:
98 	mutex_lock(&mgr->ctx_lock);
99 	idr_remove(&mgr->ctx_handles, ctx->handle);
100 	mutex_unlock(&mgr->ctx_lock);
101 free_ctx:
102 	kfree(ctx);
103 out_err:
104 	return rc;
105 }
106 
hl_ctx_free(struct hl_device * hdev,struct hl_ctx * ctx)107 void hl_ctx_free(struct hl_device *hdev, struct hl_ctx *ctx)
108 {
109 	if (kref_put(&ctx->refcount, hl_ctx_do_release) == 1)
110 		return;
111 
112 	dev_warn(hdev->dev,
113 		"Context %d closed or terminated but its CS are executing\n",
114 		ctx->asid);
115 }
116 
hl_ctx_init(struct hl_device * hdev,struct hl_ctx * ctx,bool is_kernel_ctx)117 int hl_ctx_init(struct hl_device *hdev, struct hl_ctx *ctx, bool is_kernel_ctx)
118 {
119 	int rc = 0;
120 
121 	ctx->hdev = hdev;
122 
123 	kref_init(&ctx->refcount);
124 
125 	ctx->cs_sequence = 1;
126 	spin_lock_init(&ctx->cs_lock);
127 	atomic_set(&ctx->thread_ctx_switch_token, 1);
128 	ctx->thread_ctx_switch_wait_token = 0;
129 
130 	if (is_kernel_ctx) {
131 		ctx->asid = HL_KERNEL_ASID_ID; /* Kernel driver gets ASID 0 */
132 		rc = hl_mmu_ctx_init(ctx);
133 		if (rc) {
134 			dev_err(hdev->dev, "Failed to init mmu ctx module\n");
135 			goto mem_ctx_err;
136 		}
137 	} else {
138 		ctx->asid = hl_asid_alloc(hdev);
139 		if (!ctx->asid) {
140 			dev_err(hdev->dev, "No free ASID, failed to create context\n");
141 			return -ENOMEM;
142 		}
143 
144 		rc = hl_vm_ctx_init(ctx);
145 		if (rc) {
146 			dev_err(hdev->dev, "Failed to init mem ctx module\n");
147 			rc = -ENOMEM;
148 			goto mem_ctx_err;
149 		}
150 	}
151 
152 	return 0;
153 
154 mem_ctx_err:
155 	if (ctx->asid != HL_KERNEL_ASID_ID)
156 		hl_asid_free(hdev, ctx->asid);
157 
158 	return rc;
159 }
160 
hl_ctx_get(struct hl_device * hdev,struct hl_ctx * ctx)161 void hl_ctx_get(struct hl_device *hdev, struct hl_ctx *ctx)
162 {
163 	kref_get(&ctx->refcount);
164 }
165 
hl_ctx_put(struct hl_ctx * ctx)166 int hl_ctx_put(struct hl_ctx *ctx)
167 {
168 	return kref_put(&ctx->refcount, hl_ctx_do_release);
169 }
170 
hl_ctx_get_fence(struct hl_ctx * ctx,u64 seq)171 struct dma_fence *hl_ctx_get_fence(struct hl_ctx *ctx, u64 seq)
172 {
173 	struct hl_device *hdev = ctx->hdev;
174 	struct dma_fence *fence;
175 
176 	spin_lock(&ctx->cs_lock);
177 
178 	if (seq >= ctx->cs_sequence) {
179 		dev_notice_ratelimited(hdev->dev,
180 			"Can't wait on seq %llu because current CS is at seq %llu\n",
181 			seq, ctx->cs_sequence);
182 		spin_unlock(&ctx->cs_lock);
183 		return ERR_PTR(-EINVAL);
184 	}
185 
186 
187 	if (seq + HL_MAX_PENDING_CS < ctx->cs_sequence) {
188 		dev_dbg(hdev->dev,
189 			"Can't wait on seq %llu because current CS is at seq %llu (Fence is gone)\n",
190 			seq, ctx->cs_sequence);
191 		spin_unlock(&ctx->cs_lock);
192 		return NULL;
193 	}
194 
195 	fence = dma_fence_get(
196 			ctx->cs_pending[seq & (HL_MAX_PENDING_CS - 1)]);
197 	spin_unlock(&ctx->cs_lock);
198 
199 	return fence;
200 }
201 
202 /*
203  * hl_ctx_mgr_init - initialize the context manager
204  *
205  * @mgr: pointer to context manager structure
206  *
207  * This manager is an object inside the hpriv object of the user process.
208  * The function is called when a user process opens the FD.
209  */
hl_ctx_mgr_init(struct hl_ctx_mgr * mgr)210 void hl_ctx_mgr_init(struct hl_ctx_mgr *mgr)
211 {
212 	mutex_init(&mgr->ctx_lock);
213 	idr_init(&mgr->ctx_handles);
214 }
215 
216 /*
217  * hl_ctx_mgr_fini - finalize the context manager
218  *
219  * @hdev: pointer to device structure
220  * @mgr: pointer to context manager structure
221  *
222  * This function goes over all the contexts in the manager and frees them.
223  * It is called when a process closes the FD.
224  */
hl_ctx_mgr_fini(struct hl_device * hdev,struct hl_ctx_mgr * mgr)225 void hl_ctx_mgr_fini(struct hl_device *hdev, struct hl_ctx_mgr *mgr)
226 {
227 	struct hl_ctx *ctx;
228 	struct idr *idp;
229 	u32 id;
230 
231 	idp = &mgr->ctx_handles;
232 
233 	idr_for_each_entry(idp, ctx, id)
234 		hl_ctx_free(hdev, ctx);
235 
236 	idr_destroy(&mgr->ctx_handles);
237 	mutex_destroy(&mgr->ctx_lock);
238 }
239