• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2020-2024 Intel Corporation
4  */
5 
6 #include "ivpu_drv.h"
7 #include "ivpu_hw.h"
8 #include "ivpu_ipc.h"
9 #include "ivpu_jsm_msg.h"
10 #include "ivpu_pm.h"
11 #include "vpu_jsm_api.h"
12 
ivpu_jsm_msg_type_to_str(enum vpu_ipc_msg_type type)13 const char *ivpu_jsm_msg_type_to_str(enum vpu_ipc_msg_type type)
14 {
15 	#define IVPU_CASE_TO_STR(x) case x: return #x
16 	switch (type) {
17 	IVPU_CASE_TO_STR(VPU_JSM_MSG_UNKNOWN);
18 	IVPU_CASE_TO_STR(VPU_JSM_MSG_ENGINE_RESET);
19 	IVPU_CASE_TO_STR(VPU_JSM_MSG_ENGINE_PREEMPT);
20 	IVPU_CASE_TO_STR(VPU_JSM_MSG_REGISTER_DB);
21 	IVPU_CASE_TO_STR(VPU_JSM_MSG_UNREGISTER_DB);
22 	IVPU_CASE_TO_STR(VPU_JSM_MSG_QUERY_ENGINE_HB);
23 	IVPU_CASE_TO_STR(VPU_JSM_MSG_GET_POWER_LEVEL_COUNT);
24 	IVPU_CASE_TO_STR(VPU_JSM_MSG_GET_POWER_LEVEL);
25 	IVPU_CASE_TO_STR(VPU_JSM_MSG_SET_POWER_LEVEL);
26 	IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_OPEN);
27 	IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_CLOSE);
28 	IVPU_CASE_TO_STR(VPU_JSM_MSG_TRACE_SET_CONFIG);
29 	IVPU_CASE_TO_STR(VPU_JSM_MSG_TRACE_GET_CONFIG);
30 	IVPU_CASE_TO_STR(VPU_JSM_MSG_TRACE_GET_CAPABILITY);
31 	IVPU_CASE_TO_STR(VPU_JSM_MSG_TRACE_GET_NAME);
32 	IVPU_CASE_TO_STR(VPU_JSM_MSG_SSID_RELEASE);
33 	IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_START);
34 	IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_STOP);
35 	IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_UPDATE);
36 	IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_INFO);
37 	IVPU_CASE_TO_STR(VPU_JSM_MSG_SET_PRIORITY_BAND_SETUP);
38 	IVPU_CASE_TO_STR(VPU_JSM_MSG_CREATE_CMD_QUEUE);
39 	IVPU_CASE_TO_STR(VPU_JSM_MSG_DESTROY_CMD_QUEUE);
40 	IVPU_CASE_TO_STR(VPU_JSM_MSG_SET_CONTEXT_SCHED_PROPERTIES);
41 	IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_REGISTER_DB);
42 	IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_RESUME_CMDQ);
43 	IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_SUSPEND_CMDQ);
44 	IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_RESUME_CMDQ_RSP);
45 	IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_SUSPEND_CMDQ_DONE);
46 	IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_SET_SCHEDULING_LOG);
47 	IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_SET_SCHEDULING_LOG_RSP);
48 	IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_SCHEDULING_LOG_NOTIFICATION);
49 	IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_ENGINE_RESUME);
50 	IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_RESUME_ENGINE_DONE);
51 	IVPU_CASE_TO_STR(VPU_JSM_MSG_STATE_DUMP);
52 	IVPU_CASE_TO_STR(VPU_JSM_MSG_STATE_DUMP_RSP);
53 	IVPU_CASE_TO_STR(VPU_JSM_MSG_BLOB_DEINIT_DEPRECATED);
54 	IVPU_CASE_TO_STR(VPU_JSM_MSG_DYNDBG_CONTROL);
55 	IVPU_CASE_TO_STR(VPU_JSM_MSG_JOB_DONE);
56 	IVPU_CASE_TO_STR(VPU_JSM_MSG_NATIVE_FENCE_SIGNALLED);
57 	IVPU_CASE_TO_STR(VPU_JSM_MSG_ENGINE_RESET_DONE);
58 	IVPU_CASE_TO_STR(VPU_JSM_MSG_ENGINE_PREEMPT_DONE);
59 	IVPU_CASE_TO_STR(VPU_JSM_MSG_REGISTER_DB_DONE);
60 	IVPU_CASE_TO_STR(VPU_JSM_MSG_UNREGISTER_DB_DONE);
61 	IVPU_CASE_TO_STR(VPU_JSM_MSG_QUERY_ENGINE_HB_DONE);
62 	IVPU_CASE_TO_STR(VPU_JSM_MSG_GET_POWER_LEVEL_COUNT_DONE);
63 	IVPU_CASE_TO_STR(VPU_JSM_MSG_GET_POWER_LEVEL_DONE);
64 	IVPU_CASE_TO_STR(VPU_JSM_MSG_SET_POWER_LEVEL_DONE);
65 	IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_OPEN_DONE);
66 	IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_CLOSE_DONE);
67 	IVPU_CASE_TO_STR(VPU_JSM_MSG_TRACE_SET_CONFIG_RSP);
68 	IVPU_CASE_TO_STR(VPU_JSM_MSG_TRACE_GET_CONFIG_RSP);
69 	IVPU_CASE_TO_STR(VPU_JSM_MSG_TRACE_GET_CAPABILITY_RSP);
70 	IVPU_CASE_TO_STR(VPU_JSM_MSG_TRACE_GET_NAME_RSP);
71 	IVPU_CASE_TO_STR(VPU_JSM_MSG_SSID_RELEASE_DONE);
72 	IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_START_DONE);
73 	IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_STOP_DONE);
74 	IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_UPDATE_DONE);
75 	IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_INFO_DONE);
76 	IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_NOTIFICATION);
77 	IVPU_CASE_TO_STR(VPU_JSM_MSG_SET_PRIORITY_BAND_SETUP_RSP);
78 	IVPU_CASE_TO_STR(VPU_JSM_MSG_CREATE_CMD_QUEUE_RSP);
79 	IVPU_CASE_TO_STR(VPU_JSM_MSG_DESTROY_CMD_QUEUE_RSP);
80 	IVPU_CASE_TO_STR(VPU_JSM_MSG_SET_CONTEXT_SCHED_PROPERTIES_RSP);
81 	IVPU_CASE_TO_STR(VPU_JSM_MSG_BLOB_DEINIT_DONE);
82 	IVPU_CASE_TO_STR(VPU_JSM_MSG_DYNDBG_CONTROL_RSP);
83 	IVPU_CASE_TO_STR(VPU_JSM_MSG_PWR_D0I3_ENTER);
84 	IVPU_CASE_TO_STR(VPU_JSM_MSG_PWR_D0I3_ENTER_DONE);
85 	IVPU_CASE_TO_STR(VPU_JSM_MSG_DCT_ENABLE);
86 	IVPU_CASE_TO_STR(VPU_JSM_MSG_DCT_ENABLE_DONE);
87 	IVPU_CASE_TO_STR(VPU_JSM_MSG_DCT_DISABLE);
88 	IVPU_CASE_TO_STR(VPU_JSM_MSG_DCT_DISABLE_DONE);
89 	}
90 	#undef IVPU_CASE_TO_STR
91 
92 	return "Unknown JSM message type";
93 }
94 
ivpu_jsm_register_db(struct ivpu_device * vdev,u32 ctx_id,u32 db_id,u64 jobq_base,u32 jobq_size)95 int ivpu_jsm_register_db(struct ivpu_device *vdev, u32 ctx_id, u32 db_id,
96 			 u64 jobq_base, u32 jobq_size)
97 {
98 	struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_REGISTER_DB };
99 	struct vpu_jsm_msg resp;
100 	int ret = 0;
101 
102 	req.payload.register_db.db_idx = db_id;
103 	req.payload.register_db.jobq_base = jobq_base;
104 	req.payload.register_db.jobq_size = jobq_size;
105 	req.payload.register_db.host_ssid = ctx_id;
106 
107 	ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_REGISTER_DB_DONE, &resp,
108 				    VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
109 	if (ret)
110 		ivpu_err_ratelimited(vdev, "Failed to register doorbell %u: %d\n", db_id, ret);
111 
112 	return ret;
113 }
114 
ivpu_jsm_unregister_db(struct ivpu_device * vdev,u32 db_id)115 int ivpu_jsm_unregister_db(struct ivpu_device *vdev, u32 db_id)
116 {
117 	struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_UNREGISTER_DB };
118 	struct vpu_jsm_msg resp;
119 	int ret = 0;
120 
121 	req.payload.unregister_db.db_idx = db_id;
122 
123 	ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_UNREGISTER_DB_DONE, &resp,
124 				    VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
125 	if (ret)
126 		ivpu_warn_ratelimited(vdev, "Failed to unregister doorbell %u: %d\n", db_id, ret);
127 
128 	return ret;
129 }
130 
ivpu_jsm_get_heartbeat(struct ivpu_device * vdev,u32 engine,u64 * heartbeat)131 int ivpu_jsm_get_heartbeat(struct ivpu_device *vdev, u32 engine, u64 *heartbeat)
132 {
133 	struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_QUERY_ENGINE_HB };
134 	struct vpu_jsm_msg resp;
135 	int ret;
136 
137 	if (engine != VPU_ENGINE_COMPUTE)
138 		return -EINVAL;
139 
140 	req.payload.query_engine_hb.engine_idx = engine;
141 
142 	ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_QUERY_ENGINE_HB_DONE, &resp,
143 				    VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
144 	if (ret) {
145 		ivpu_err_ratelimited(vdev, "Failed to get heartbeat from engine %d: %d\n",
146 				     engine, ret);
147 		return ret;
148 	}
149 
150 	*heartbeat = resp.payload.query_engine_hb_done.heartbeat;
151 	return ret;
152 }
153 
ivpu_jsm_reset_engine(struct ivpu_device * vdev,u32 engine)154 int ivpu_jsm_reset_engine(struct ivpu_device *vdev, u32 engine)
155 {
156 	struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_ENGINE_RESET };
157 	struct vpu_jsm_msg resp;
158 	int ret;
159 
160 	if (engine != VPU_ENGINE_COMPUTE)
161 		return -EINVAL;
162 
163 	req.payload.engine_reset.engine_idx = engine;
164 
165 	ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_ENGINE_RESET_DONE, &resp,
166 				    VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
167 	if (ret) {
168 		ivpu_err_ratelimited(vdev, "Failed to reset engine %d: %d\n", engine, ret);
169 		ivpu_pm_trigger_recovery(vdev, "Engine reset failed");
170 	}
171 
172 	return ret;
173 }
174 
ivpu_jsm_preempt_engine(struct ivpu_device * vdev,u32 engine,u32 preempt_id)175 int ivpu_jsm_preempt_engine(struct ivpu_device *vdev, u32 engine, u32 preempt_id)
176 {
177 	struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_ENGINE_PREEMPT };
178 	struct vpu_jsm_msg resp;
179 	int ret;
180 
181 	if (engine != VPU_ENGINE_COMPUTE)
182 		return -EINVAL;
183 
184 	req.payload.engine_preempt.engine_idx = engine;
185 	req.payload.engine_preempt.preempt_id = preempt_id;
186 
187 	ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_ENGINE_PREEMPT_DONE, &resp,
188 				    VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
189 	if (ret)
190 		ivpu_err_ratelimited(vdev, "Failed to preempt engine %d: %d\n", engine, ret);
191 
192 	return ret;
193 }
194 
ivpu_jsm_dyndbg_control(struct ivpu_device * vdev,char * command,size_t size)195 int ivpu_jsm_dyndbg_control(struct ivpu_device *vdev, char *command, size_t size)
196 {
197 	struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_DYNDBG_CONTROL };
198 	struct vpu_jsm_msg resp;
199 	int ret;
200 
201 	strscpy(req.payload.dyndbg_control.dyndbg_cmd, command, VPU_DYNDBG_CMD_MAX_LEN);
202 
203 	ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_DYNDBG_CONTROL_RSP, &resp,
204 				    VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
205 	if (ret)
206 		ivpu_warn_ratelimited(vdev, "Failed to send command \"%s\": ret %d\n",
207 				      command, ret);
208 
209 	return ret;
210 }
211 
ivpu_jsm_trace_get_capability(struct ivpu_device * vdev,u32 * trace_destination_mask,u64 * trace_hw_component_mask)212 int ivpu_jsm_trace_get_capability(struct ivpu_device *vdev, u32 *trace_destination_mask,
213 				  u64 *trace_hw_component_mask)
214 {
215 	struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_TRACE_GET_CAPABILITY };
216 	struct vpu_jsm_msg resp;
217 	int ret;
218 
219 	ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_TRACE_GET_CAPABILITY_RSP, &resp,
220 				    VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
221 	if (ret) {
222 		ivpu_warn_ratelimited(vdev, "Failed to get trace capability: %d\n", ret);
223 		return ret;
224 	}
225 
226 	*trace_destination_mask = resp.payload.trace_capability.trace_destination_mask;
227 	*trace_hw_component_mask = resp.payload.trace_capability.trace_hw_component_mask;
228 
229 	return ret;
230 }
231 
ivpu_jsm_trace_set_config(struct ivpu_device * vdev,u32 trace_level,u32 trace_destination_mask,u64 trace_hw_component_mask)232 int ivpu_jsm_trace_set_config(struct ivpu_device *vdev, u32 trace_level, u32 trace_destination_mask,
233 			      u64 trace_hw_component_mask)
234 {
235 	struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_TRACE_SET_CONFIG };
236 	struct vpu_jsm_msg resp;
237 	int ret;
238 
239 	req.payload.trace_config.trace_level = trace_level;
240 	req.payload.trace_config.trace_destination_mask = trace_destination_mask;
241 	req.payload.trace_config.trace_hw_component_mask = trace_hw_component_mask;
242 
243 	ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_TRACE_SET_CONFIG_RSP, &resp,
244 				    VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
245 	if (ret)
246 		ivpu_warn_ratelimited(vdev, "Failed to set config: %d\n", ret);
247 
248 	return ret;
249 }
250 
ivpu_jsm_context_release(struct ivpu_device * vdev,u32 host_ssid)251 int ivpu_jsm_context_release(struct ivpu_device *vdev, u32 host_ssid)
252 {
253 	struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_SSID_RELEASE };
254 	struct vpu_jsm_msg resp;
255 	int ret;
256 
257 	req.payload.ssid_release.host_ssid = host_ssid;
258 
259 	ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_SSID_RELEASE_DONE, &resp,
260 				    VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
261 	if (ret)
262 		ivpu_warn_ratelimited(vdev, "Failed to release context: %d\n", ret);
263 
264 	return ret;
265 }
266 
ivpu_jsm_pwr_d0i3_enter(struct ivpu_device * vdev)267 int ivpu_jsm_pwr_d0i3_enter(struct ivpu_device *vdev)
268 {
269 	struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_PWR_D0I3_ENTER };
270 	struct vpu_jsm_msg resp;
271 	int ret;
272 
273 	if (IVPU_WA(disable_d0i3_msg))
274 		return 0;
275 
276 	req.payload.pwr_d0i3_enter.send_response = 1;
277 
278 	ret = ivpu_ipc_send_receive_internal(vdev, &req, VPU_JSM_MSG_PWR_D0I3_ENTER_DONE, &resp,
279 					     VPU_IPC_CHAN_GEN_CMD, vdev->timeout.d0i3_entry_msg);
280 	if (ret)
281 		return ret;
282 
283 	return ivpu_hw_wait_for_idle(vdev);
284 }
285 
ivpu_jsm_hws_create_cmdq(struct ivpu_device * vdev,u32 ctx_id,u32 cmdq_group,u32 cmdq_id,u32 pid,u32 engine,u64 cmdq_base,u32 cmdq_size)286 int ivpu_jsm_hws_create_cmdq(struct ivpu_device *vdev, u32 ctx_id, u32 cmdq_group, u32 cmdq_id,
287 			     u32 pid, u32 engine, u64 cmdq_base, u32 cmdq_size)
288 {
289 	struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_CREATE_CMD_QUEUE };
290 	struct vpu_jsm_msg resp;
291 	int ret;
292 
293 	req.payload.hws_create_cmdq.host_ssid = ctx_id;
294 	req.payload.hws_create_cmdq.process_id = pid;
295 	req.payload.hws_create_cmdq.engine_idx = engine;
296 	req.payload.hws_create_cmdq.cmdq_group = cmdq_group;
297 	req.payload.hws_create_cmdq.cmdq_id = cmdq_id;
298 	req.payload.hws_create_cmdq.cmdq_base = cmdq_base;
299 	req.payload.hws_create_cmdq.cmdq_size = cmdq_size;
300 
301 	ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_CREATE_CMD_QUEUE_RSP, &resp,
302 				    VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
303 	if (ret)
304 		ivpu_warn_ratelimited(vdev, "Failed to create command queue: %d\n", ret);
305 
306 	return ret;
307 }
308 
ivpu_jsm_hws_destroy_cmdq(struct ivpu_device * vdev,u32 ctx_id,u32 cmdq_id)309 int ivpu_jsm_hws_destroy_cmdq(struct ivpu_device *vdev, u32 ctx_id, u32 cmdq_id)
310 {
311 	struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_DESTROY_CMD_QUEUE };
312 	struct vpu_jsm_msg resp;
313 	int ret;
314 
315 	req.payload.hws_destroy_cmdq.host_ssid = ctx_id;
316 	req.payload.hws_destroy_cmdq.cmdq_id = cmdq_id;
317 
318 	ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_DESTROY_CMD_QUEUE_RSP, &resp,
319 				    VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
320 	if (ret)
321 		ivpu_warn_ratelimited(vdev, "Failed to destroy command queue: %d\n", ret);
322 
323 	return ret;
324 }
325 
ivpu_jsm_hws_register_db(struct ivpu_device * vdev,u32 ctx_id,u32 cmdq_id,u32 db_id,u64 cmdq_base,u32 cmdq_size)326 int ivpu_jsm_hws_register_db(struct ivpu_device *vdev, u32 ctx_id, u32 cmdq_id, u32 db_id,
327 			     u64 cmdq_base, u32 cmdq_size)
328 {
329 	struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_HWS_REGISTER_DB };
330 	struct vpu_jsm_msg resp;
331 	int ret = 0;
332 
333 	req.payload.hws_register_db.db_id = db_id;
334 	req.payload.hws_register_db.host_ssid = ctx_id;
335 	req.payload.hws_register_db.cmdq_id = cmdq_id;
336 	req.payload.hws_register_db.cmdq_base = cmdq_base;
337 	req.payload.hws_register_db.cmdq_size = cmdq_size;
338 
339 	ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_REGISTER_DB_DONE, &resp,
340 				    VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
341 	if (ret)
342 		ivpu_err_ratelimited(vdev, "Failed to register doorbell %u: %d\n", db_id, ret);
343 
344 	return ret;
345 }
346 
ivpu_jsm_hws_resume_engine(struct ivpu_device * vdev,u32 engine)347 int ivpu_jsm_hws_resume_engine(struct ivpu_device *vdev, u32 engine)
348 {
349 	struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_HWS_ENGINE_RESUME };
350 	struct vpu_jsm_msg resp;
351 	int ret;
352 
353 	if (engine != VPU_ENGINE_COMPUTE)
354 		return -EINVAL;
355 
356 	req.payload.hws_resume_engine.engine_idx = engine;
357 
358 	ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_HWS_RESUME_ENGINE_DONE, &resp,
359 				    VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
360 	if (ret) {
361 		ivpu_err_ratelimited(vdev, "Failed to resume engine %d: %d\n", engine, ret);
362 		ivpu_pm_trigger_recovery(vdev, "Engine resume failed");
363 	}
364 
365 	return ret;
366 }
367 
ivpu_jsm_hws_set_context_sched_properties(struct ivpu_device * vdev,u32 ctx_id,u32 cmdq_id,u32 priority)368 int ivpu_jsm_hws_set_context_sched_properties(struct ivpu_device *vdev, u32 ctx_id, u32 cmdq_id,
369 					      u32 priority)
370 {
371 	struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_SET_CONTEXT_SCHED_PROPERTIES };
372 	struct vpu_jsm_msg resp;
373 	int ret;
374 
375 	req.payload.hws_set_context_sched_properties.host_ssid = ctx_id;
376 	req.payload.hws_set_context_sched_properties.cmdq_id = cmdq_id;
377 	req.payload.hws_set_context_sched_properties.priority_band = priority;
378 	req.payload.hws_set_context_sched_properties.realtime_priority_level = 0;
379 	req.payload.hws_set_context_sched_properties.in_process_priority = 0;
380 	req.payload.hws_set_context_sched_properties.context_quantum = 20000;
381 	req.payload.hws_set_context_sched_properties.grace_period_same_priority = 10000;
382 	req.payload.hws_set_context_sched_properties.grace_period_lower_priority = 0;
383 
384 	ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_SET_CONTEXT_SCHED_PROPERTIES_RSP, &resp,
385 				    VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
386 	if (ret)
387 		ivpu_warn_ratelimited(vdev, "Failed to set context sched properties: %d\n", ret);
388 
389 	return ret;
390 }
391 
ivpu_jsm_hws_set_scheduling_log(struct ivpu_device * vdev,u32 engine_idx,u32 host_ssid,u64 vpu_log_buffer_va)392 int ivpu_jsm_hws_set_scheduling_log(struct ivpu_device *vdev, u32 engine_idx, u32 host_ssid,
393 				    u64 vpu_log_buffer_va)
394 {
395 	struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_HWS_SET_SCHEDULING_LOG };
396 	struct vpu_jsm_msg resp;
397 	int ret;
398 
399 	req.payload.hws_set_scheduling_log.engine_idx = engine_idx;
400 	req.payload.hws_set_scheduling_log.host_ssid = host_ssid;
401 	req.payload.hws_set_scheduling_log.vpu_log_buffer_va = vpu_log_buffer_va;
402 	req.payload.hws_set_scheduling_log.notify_index = 0;
403 	req.payload.hws_set_scheduling_log.enable_extra_events =
404 		ivpu_test_mode & IVPU_TEST_MODE_HWS_EXTRA_EVENTS;
405 
406 	ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_HWS_SET_SCHEDULING_LOG_RSP, &resp,
407 				    VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
408 	if (ret)
409 		ivpu_warn_ratelimited(vdev, "Failed to set scheduling log: %d\n", ret);
410 
411 	return ret;
412 }
413 
ivpu_jsm_hws_setup_priority_bands(struct ivpu_device * vdev)414 int ivpu_jsm_hws_setup_priority_bands(struct ivpu_device *vdev)
415 {
416 	struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_SET_PRIORITY_BAND_SETUP };
417 	struct vpu_jsm_msg resp;
418 	struct ivpu_hw_info *hw = vdev->hw;
419 	struct vpu_ipc_msg_payload_hws_priority_band_setup *setup =
420 		&req.payload.hws_priority_band_setup;
421 	int ret;
422 
423 	for (int band = VPU_JOB_SCHEDULING_PRIORITY_BAND_IDLE;
424 	     band < VPU_JOB_SCHEDULING_PRIORITY_BAND_COUNT; band++) {
425 		setup->grace_period[band] = hw->hws.grace_period[band];
426 		setup->process_grace_period[band] = hw->hws.process_grace_period[band];
427 		setup->process_quantum[band] = hw->hws.process_quantum[band];
428 	}
429 	setup->normal_band_percentage = 10;
430 
431 	ret = ivpu_ipc_send_receive_internal(vdev, &req, VPU_JSM_MSG_SET_PRIORITY_BAND_SETUP_RSP,
432 					     &resp, VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
433 	if (ret)
434 		ivpu_warn_ratelimited(vdev, "Failed to set priority bands: %d\n", ret);
435 
436 	return ret;
437 }
438 
ivpu_jsm_metric_streamer_start(struct ivpu_device * vdev,u64 metric_group_mask,u64 sampling_rate,u64 buffer_addr,u64 buffer_size)439 int ivpu_jsm_metric_streamer_start(struct ivpu_device *vdev, u64 metric_group_mask,
440 				   u64 sampling_rate, u64 buffer_addr, u64 buffer_size)
441 {
442 	struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_METRIC_STREAMER_START };
443 	struct vpu_jsm_msg resp;
444 	int ret;
445 
446 	req.payload.metric_streamer_start.metric_group_mask = metric_group_mask;
447 	req.payload.metric_streamer_start.sampling_rate = sampling_rate;
448 	req.payload.metric_streamer_start.buffer_addr = buffer_addr;
449 	req.payload.metric_streamer_start.buffer_size = buffer_size;
450 
451 	ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_METRIC_STREAMER_START_DONE, &resp,
452 				    VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
453 	if (ret) {
454 		ivpu_warn_ratelimited(vdev, "Failed to start metric streamer: ret %d\n", ret);
455 		return ret;
456 	}
457 
458 	return ret;
459 }
460 
ivpu_jsm_metric_streamer_stop(struct ivpu_device * vdev,u64 metric_group_mask)461 int ivpu_jsm_metric_streamer_stop(struct ivpu_device *vdev, u64 metric_group_mask)
462 {
463 	struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_METRIC_STREAMER_STOP };
464 	struct vpu_jsm_msg resp;
465 	int ret;
466 
467 	req.payload.metric_streamer_stop.metric_group_mask = metric_group_mask;
468 
469 	ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_METRIC_STREAMER_STOP_DONE, &resp,
470 				    VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
471 	if (ret)
472 		ivpu_warn_ratelimited(vdev, "Failed to stop metric streamer: ret %d\n", ret);
473 
474 	return ret;
475 }
476 
ivpu_jsm_metric_streamer_update(struct ivpu_device * vdev,u64 metric_group_mask,u64 buffer_addr,u64 buffer_size,u64 * bytes_written)477 int ivpu_jsm_metric_streamer_update(struct ivpu_device *vdev, u64 metric_group_mask,
478 				    u64 buffer_addr, u64 buffer_size, u64 *bytes_written)
479 {
480 	struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_METRIC_STREAMER_UPDATE };
481 	struct vpu_jsm_msg resp;
482 	int ret;
483 
484 	req.payload.metric_streamer_update.metric_group_mask = metric_group_mask;
485 	req.payload.metric_streamer_update.buffer_addr = buffer_addr;
486 	req.payload.metric_streamer_update.buffer_size = buffer_size;
487 
488 	ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_METRIC_STREAMER_UPDATE_DONE, &resp,
489 				    VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
490 	if (ret) {
491 		ivpu_warn_ratelimited(vdev, "Failed to update metric streamer: ret %d\n", ret);
492 		return ret;
493 	}
494 
495 	if (buffer_size && resp.payload.metric_streamer_done.bytes_written > buffer_size) {
496 		ivpu_warn_ratelimited(vdev, "MS buffer overflow: bytes_written %#llx > buffer_size %#llx\n",
497 				      resp.payload.metric_streamer_done.bytes_written, buffer_size);
498 		return -EOVERFLOW;
499 	}
500 
501 	*bytes_written = resp.payload.metric_streamer_done.bytes_written;
502 
503 	return ret;
504 }
505 
ivpu_jsm_metric_streamer_info(struct ivpu_device * vdev,u64 metric_group_mask,u64 buffer_addr,u64 buffer_size,u32 * sample_size,u64 * info_size)506 int ivpu_jsm_metric_streamer_info(struct ivpu_device *vdev, u64 metric_group_mask, u64 buffer_addr,
507 				  u64 buffer_size, u32 *sample_size, u64 *info_size)
508 {
509 	struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_METRIC_STREAMER_INFO };
510 	struct vpu_jsm_msg resp;
511 	int ret;
512 
513 	req.payload.metric_streamer_start.metric_group_mask = metric_group_mask;
514 	req.payload.metric_streamer_start.buffer_addr = buffer_addr;
515 	req.payload.metric_streamer_start.buffer_size = buffer_size;
516 
517 	ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_METRIC_STREAMER_INFO_DONE, &resp,
518 				    VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
519 	if (ret) {
520 		ivpu_warn_ratelimited(vdev, "Failed to get metric streamer info: ret %d\n", ret);
521 		return ret;
522 	}
523 
524 	if (!resp.payload.metric_streamer_done.sample_size) {
525 		ivpu_warn_ratelimited(vdev, "Invalid sample size\n");
526 		return -EBADMSG;
527 	}
528 
529 	if (sample_size)
530 		*sample_size = resp.payload.metric_streamer_done.sample_size;
531 	if (info_size)
532 		*info_size = resp.payload.metric_streamer_done.bytes_written;
533 
534 	return ret;
535 }
536 
ivpu_jsm_dct_enable(struct ivpu_device * vdev,u32 active_us,u32 inactive_us)537 int ivpu_jsm_dct_enable(struct ivpu_device *vdev, u32 active_us, u32 inactive_us)
538 {
539 	struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_DCT_ENABLE };
540 	struct vpu_jsm_msg resp;
541 
542 	req.payload.pwr_dct_control.dct_active_us = active_us;
543 	req.payload.pwr_dct_control.dct_inactive_us = inactive_us;
544 
545 	return ivpu_ipc_send_receive_internal(vdev, &req, VPU_JSM_MSG_DCT_ENABLE_DONE, &resp,
546 					      VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
547 }
548 
ivpu_jsm_dct_disable(struct ivpu_device * vdev)549 int ivpu_jsm_dct_disable(struct ivpu_device *vdev)
550 {
551 	struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_DCT_DISABLE };
552 	struct vpu_jsm_msg resp;
553 
554 	return ivpu_ipc_send_receive_internal(vdev, &req, VPU_JSM_MSG_DCT_DISABLE_DONE, &resp,
555 					      VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
556 }
557 
ivpu_jsm_state_dump(struct ivpu_device * vdev)558 int ivpu_jsm_state_dump(struct ivpu_device *vdev)
559 {
560 	struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_STATE_DUMP };
561 
562 	return ivpu_ipc_send_and_wait(vdev, &req, VPU_IPC_CHAN_ASYNC_CMD,
563 				      vdev->timeout.state_dump_msg);
564 }
565