• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2016-2019 Intel Corporation
4  */
5 
6 #include "i915_drv.h"
7 #include "intel_guc_ct.h"
8 #include "gt/intel_gt.h"
9 
10 #define CT_ERROR(_ct, _fmt, ...) \
11 	DRM_DEV_ERROR(ct_to_dev(_ct), "CT: " _fmt, ##__VA_ARGS__)
12 #ifdef CONFIG_DRM_I915_DEBUG_GUC
13 #define CT_DEBUG(_ct, _fmt, ...) \
14 	DRM_DEV_DEBUG_DRIVER(ct_to_dev(_ct), "CT: " _fmt, ##__VA_ARGS__)
15 #else
16 #define CT_DEBUG(...)	do { } while (0)
17 #endif
18 
19 struct ct_request {
20 	struct list_head link;
21 	u32 fence;
22 	u32 status;
23 	u32 response_len;
24 	u32 *response_buf;
25 };
26 
27 struct ct_incoming_request {
28 	struct list_head link;
29 	u32 msg[];
30 };
31 
32 enum { CTB_SEND = 0, CTB_RECV = 1 };
33 
34 enum { CTB_OWNER_HOST = 0 };
35 
36 static void ct_incoming_request_worker_func(struct work_struct *w);
37 
38 /**
39  * intel_guc_ct_init_early - Initialize CT state without requiring device access
40  * @ct: pointer to CT struct
41  */
intel_guc_ct_init_early(struct intel_guc_ct * ct)42 void intel_guc_ct_init_early(struct intel_guc_ct *ct)
43 {
44 	spin_lock_init(&ct->requests.lock);
45 	INIT_LIST_HEAD(&ct->requests.pending);
46 	INIT_LIST_HEAD(&ct->requests.incoming);
47 	INIT_WORK(&ct->requests.worker, ct_incoming_request_worker_func);
48 }
49 
ct_to_guc(struct intel_guc_ct * ct)50 static inline struct intel_guc *ct_to_guc(struct intel_guc_ct *ct)
51 {
52 	return container_of(ct, struct intel_guc, ct);
53 }
54 
ct_to_gt(struct intel_guc_ct * ct)55 static inline struct intel_gt *ct_to_gt(struct intel_guc_ct *ct)
56 {
57 	return guc_to_gt(ct_to_guc(ct));
58 }
59 
ct_to_i915(struct intel_guc_ct * ct)60 static inline struct drm_i915_private *ct_to_i915(struct intel_guc_ct *ct)
61 {
62 	return ct_to_gt(ct)->i915;
63 }
64 
ct_to_dev(struct intel_guc_ct * ct)65 static inline struct device *ct_to_dev(struct intel_guc_ct *ct)
66 {
67 	return ct_to_i915(ct)->drm.dev;
68 }
69 
guc_ct_buffer_type_to_str(u32 type)70 static inline const char *guc_ct_buffer_type_to_str(u32 type)
71 {
72 	switch (type) {
73 	case INTEL_GUC_CT_BUFFER_TYPE_SEND:
74 		return "SEND";
75 	case INTEL_GUC_CT_BUFFER_TYPE_RECV:
76 		return "RECV";
77 	default:
78 		return "<invalid>";
79 	}
80 }
81 
guc_ct_buffer_desc_init(struct guc_ct_buffer_desc * desc,u32 cmds_addr,u32 size)82 static void guc_ct_buffer_desc_init(struct guc_ct_buffer_desc *desc,
83 				    u32 cmds_addr, u32 size)
84 {
85 	memset(desc, 0, sizeof(*desc));
86 	desc->addr = cmds_addr;
87 	desc->size = size;
88 	desc->owner = CTB_OWNER_HOST;
89 }
90 
guc_ct_buffer_desc_reset(struct guc_ct_buffer_desc * desc)91 static void guc_ct_buffer_desc_reset(struct guc_ct_buffer_desc *desc)
92 {
93 	desc->head = 0;
94 	desc->tail = 0;
95 	desc->is_in_error = 0;
96 }
97 
guc_action_register_ct_buffer(struct intel_guc * guc,u32 desc_addr,u32 type)98 static int guc_action_register_ct_buffer(struct intel_guc *guc,
99 					 u32 desc_addr,
100 					 u32 type)
101 {
102 	u32 action[] = {
103 		INTEL_GUC_ACTION_REGISTER_COMMAND_TRANSPORT_BUFFER,
104 		desc_addr,
105 		sizeof(struct guc_ct_buffer_desc),
106 		type
107 	};
108 
109 	/* Can't use generic send(), CT registration must go over MMIO */
110 	return intel_guc_send_mmio(guc, action, ARRAY_SIZE(action), NULL, 0);
111 }
112 
ct_register_buffer(struct intel_guc_ct * ct,u32 desc_addr,u32 type)113 static int ct_register_buffer(struct intel_guc_ct *ct, u32 desc_addr, u32 type)
114 {
115 	int err = guc_action_register_ct_buffer(ct_to_guc(ct), desc_addr, type);
116 
117 	if (unlikely(err))
118 		CT_ERROR(ct, "Failed to register %s buffer (err=%d)\n",
119 			 guc_ct_buffer_type_to_str(type), err);
120 	return err;
121 }
122 
guc_action_deregister_ct_buffer(struct intel_guc * guc,u32 type)123 static int guc_action_deregister_ct_buffer(struct intel_guc *guc, u32 type)
124 {
125 	u32 action[] = {
126 		INTEL_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER,
127 		CTB_OWNER_HOST,
128 		type
129 	};
130 
131 	/* Can't use generic send(), CT deregistration must go over MMIO */
132 	return intel_guc_send_mmio(guc, action, ARRAY_SIZE(action), NULL, 0);
133 }
134 
ct_deregister_buffer(struct intel_guc_ct * ct,u32 type)135 static int ct_deregister_buffer(struct intel_guc_ct *ct, u32 type)
136 {
137 	int err = guc_action_deregister_ct_buffer(ct_to_guc(ct), type);
138 
139 	if (unlikely(err))
140 		CT_ERROR(ct, "Failed to deregister %s buffer (err=%d)\n",
141 			 guc_ct_buffer_type_to_str(type), err);
142 	return err;
143 }
144 
145 /**
146  * intel_guc_ct_init - Init buffer-based communication
147  * @ct: pointer to CT struct
148  *
149  * Allocate memory required for buffer-based communication.
150  *
151  * Return: 0 on success, a negative errno code on failure.
152  */
intel_guc_ct_init(struct intel_guc_ct * ct)153 int intel_guc_ct_init(struct intel_guc_ct *ct)
154 {
155 	struct intel_guc *guc = ct_to_guc(ct);
156 	void *blob;
157 	int err;
158 	int i;
159 
160 	GEM_BUG_ON(ct->vma);
161 
162 	/* We allocate 1 page to hold both descriptors and both buffers.
163 	 *       ___________.....................
164 	 *      |desc (SEND)|                   :
165 	 *      |___________|                   PAGE/4
166 	 *      :___________....................:
167 	 *      |desc (RECV)|                   :
168 	 *      |___________|                   PAGE/4
169 	 *      :_______________________________:
170 	 *      |cmds (SEND)                    |
171 	 *      |                               PAGE/4
172 	 *      |_______________________________|
173 	 *      |cmds (RECV)                    |
174 	 *      |                               PAGE/4
175 	 *      |_______________________________|
176 	 *
177 	 * Each message can use a maximum of 32 dwords and we don't expect to
178 	 * have more than 1 in flight at any time, so we have enough space.
179 	 * Some logic further ahead will rely on the fact that there is only 1
180 	 * page and that it is always mapped, so if the size is changed the
181 	 * other code will need updating as well.
182 	 */
183 
184 	err = intel_guc_allocate_and_map_vma(guc, PAGE_SIZE, &ct->vma, &blob);
185 	if (unlikely(err)) {
186 		CT_ERROR(ct, "Failed to allocate CT channel (err=%d)\n", err);
187 		return err;
188 	}
189 
190 	CT_DEBUG(ct, "vma base=%#x\n", intel_guc_ggtt_offset(guc, ct->vma));
191 
192 	/* store pointers to desc and cmds */
193 	for (i = 0; i < ARRAY_SIZE(ct->ctbs); i++) {
194 		GEM_BUG_ON((i !=  CTB_SEND) && (i != CTB_RECV));
195 		ct->ctbs[i].desc = blob + PAGE_SIZE/4 * i;
196 		ct->ctbs[i].cmds = blob + PAGE_SIZE/4 * i + PAGE_SIZE/2;
197 	}
198 
199 	return 0;
200 }
201 
202 /**
203  * intel_guc_ct_fini - Fini buffer-based communication
204  * @ct: pointer to CT struct
205  *
206  * Deallocate memory required for buffer-based communication.
207  */
intel_guc_ct_fini(struct intel_guc_ct * ct)208 void intel_guc_ct_fini(struct intel_guc_ct *ct)
209 {
210 	GEM_BUG_ON(ct->enabled);
211 
212 	i915_vma_unpin_and_release(&ct->vma, I915_VMA_RELEASE_MAP);
213 }
214 
215 /**
216  * intel_guc_ct_enable - Enable buffer based command transport.
217  * @ct: pointer to CT struct
218  *
219  * Return: 0 on success, a negative errno code on failure.
220  */
intel_guc_ct_enable(struct intel_guc_ct * ct)221 int intel_guc_ct_enable(struct intel_guc_ct *ct)
222 {
223 	struct intel_guc *guc = ct_to_guc(ct);
224 	u32 base, cmds, size;
225 	int err;
226 	int i;
227 
228 	GEM_BUG_ON(ct->enabled);
229 
230 	/* vma should be already allocated and map'ed */
231 	GEM_BUG_ON(!ct->vma);
232 	base = intel_guc_ggtt_offset(guc, ct->vma);
233 
234 	/* (re)initialize descriptors
235 	 * cmds buffers are in the second half of the blob page
236 	 */
237 	for (i = 0; i < ARRAY_SIZE(ct->ctbs); i++) {
238 		GEM_BUG_ON((i != CTB_SEND) && (i != CTB_RECV));
239 		cmds = base + PAGE_SIZE / 4 * i + PAGE_SIZE / 2;
240 		size = PAGE_SIZE / 4;
241 		CT_DEBUG(ct, "%d: addr=%#x size=%u\n", i, cmds, size);
242 		guc_ct_buffer_desc_init(ct->ctbs[i].desc, cmds, size);
243 	}
244 
245 	/*
246 	 * Register both CT buffers starting with RECV buffer.
247 	 * Descriptors are in first half of the blob.
248 	 */
249 	err = ct_register_buffer(ct, base + PAGE_SIZE / 4 * CTB_RECV,
250 				 INTEL_GUC_CT_BUFFER_TYPE_RECV);
251 	if (unlikely(err))
252 		goto err_out;
253 
254 	err = ct_register_buffer(ct, base + PAGE_SIZE / 4 * CTB_SEND,
255 				 INTEL_GUC_CT_BUFFER_TYPE_SEND);
256 	if (unlikely(err))
257 		goto err_deregister;
258 
259 	ct->enabled = true;
260 
261 	return 0;
262 
263 err_deregister:
264 	ct_deregister_buffer(ct, INTEL_GUC_CT_BUFFER_TYPE_RECV);
265 err_out:
266 	CT_ERROR(ct, "Failed to open open CT channel (err=%d)\n", err);
267 	return err;
268 }
269 
270 /**
271  * intel_guc_ct_disable - Disable buffer based command transport.
272  * @ct: pointer to CT struct
273  */
intel_guc_ct_disable(struct intel_guc_ct * ct)274 void intel_guc_ct_disable(struct intel_guc_ct *ct)
275 {
276 	struct intel_guc *guc = ct_to_guc(ct);
277 
278 	GEM_BUG_ON(!ct->enabled);
279 
280 	ct->enabled = false;
281 
282 	if (intel_guc_is_fw_running(guc)) {
283 		ct_deregister_buffer(ct, INTEL_GUC_CT_BUFFER_TYPE_SEND);
284 		ct_deregister_buffer(ct, INTEL_GUC_CT_BUFFER_TYPE_RECV);
285 	}
286 }
287 
ct_get_next_fence(struct intel_guc_ct * ct)288 static u32 ct_get_next_fence(struct intel_guc_ct *ct)
289 {
290 	/* For now it's trivial */
291 	return ++ct->requests.last_fence;
292 }
293 
294 /**
295  * DOC: CTB Host to GuC request
296  *
297  * Format of the CTB Host to GuC request message is as follows::
298  *
299  *      +------------+---------+---------+---------+---------+
300  *      |   msg[0]   |   [1]   |   [2]   |   ...   |  [n-1]  |
301  *      +------------+---------+---------+---------+---------+
302  *      |   MESSAGE  |       MESSAGE PAYLOAD                 |
303  *      +   HEADER   +---------+---------+---------+---------+
304  *      |            |    0    |    1    |   ...   |    n    |
305  *      +============+=========+=========+=========+=========+
306  *      |  len >= 1  |  FENCE  |     request specific data   |
307  *      +------+-----+---------+---------+---------+---------+
308  *
309  *                   ^-----------------len-------------------^
310  */
311 
ct_write(struct intel_guc_ct * ct,const u32 * action,u32 len,u32 fence,bool want_response)312 static int ct_write(struct intel_guc_ct *ct,
313 		    const u32 *action,
314 		    u32 len /* in dwords */,
315 		    u32 fence,
316 		    bool want_response)
317 {
318 	struct intel_guc_ct_buffer *ctb = &ct->ctbs[CTB_SEND];
319 	struct guc_ct_buffer_desc *desc = ctb->desc;
320 	u32 head = desc->head;
321 	u32 tail = desc->tail;
322 	u32 size = desc->size;
323 	u32 used;
324 	u32 header;
325 	u32 *cmds = ctb->cmds;
326 	unsigned int i;
327 
328 	if (unlikely(desc->is_in_error))
329 		return -EPIPE;
330 
331 	if (unlikely(!IS_ALIGNED(head | tail | size, 4) ||
332 		     (tail | head) >= size))
333 		goto corrupted;
334 
335 	/* later calculations will be done in dwords */
336 	head /= 4;
337 	tail /= 4;
338 	size /= 4;
339 
340 	/*
341 	 * tail == head condition indicates empty. GuC FW does not support
342 	 * using up the entire buffer to get tail == head meaning full.
343 	 */
344 	if (tail < head)
345 		used = (size - head) + tail;
346 	else
347 		used = tail - head;
348 
349 	/* make sure there is a space including extra dw for the fence */
350 	if (unlikely(used + len + 1 >= size))
351 		return -ENOSPC;
352 
353 	/*
354 	 * Write the message. The format is the following:
355 	 * DW0: header (including action code)
356 	 * DW1: fence
357 	 * DW2+: action data
358 	 */
359 	header = (len << GUC_CT_MSG_LEN_SHIFT) |
360 		 (GUC_CT_MSG_WRITE_FENCE_TO_DESC) |
361 		 (want_response ? GUC_CT_MSG_SEND_STATUS : 0) |
362 		 (action[0] << GUC_CT_MSG_ACTION_SHIFT);
363 
364 	CT_DEBUG(ct, "writing %*ph %*ph %*ph\n",
365 		 4, &header, 4, &fence, 4 * (len - 1), &action[1]);
366 
367 	cmds[tail] = header;
368 	tail = (tail + 1) % size;
369 
370 	cmds[tail] = fence;
371 	tail = (tail + 1) % size;
372 
373 	for (i = 1; i < len; i++) {
374 		cmds[tail] = action[i];
375 		tail = (tail + 1) % size;
376 	}
377 	GEM_BUG_ON(tail > size);
378 
379 	/* now update desc tail (back in bytes) */
380 	desc->tail = tail * 4;
381 	return 0;
382 
383 corrupted:
384 	CT_ERROR(ct, "Corrupted descriptor addr=%#x head=%u tail=%u size=%u\n",
385 		 desc->addr, desc->head, desc->tail, desc->size);
386 	desc->is_in_error = 1;
387 	return -EPIPE;
388 }
389 
390 /**
391  * wait_for_ctb_desc_update - Wait for the CT buffer descriptor update.
392  * @desc:	buffer descriptor
393  * @fence:	response fence
394  * @status:	placeholder for status
395  *
396  * Guc will update CT buffer descriptor with new fence and status
397  * after processing the command identified by the fence. Wait for
398  * specified fence and then read from the descriptor status of the
399  * command.
400  *
401  * Return:
402  * *	0 response received (status is valid)
403  * *	-ETIMEDOUT no response within hardcoded timeout
404  * *	-EPROTO no response, CT buffer is in error
405  */
wait_for_ctb_desc_update(struct guc_ct_buffer_desc * desc,u32 fence,u32 * status)406 static int wait_for_ctb_desc_update(struct guc_ct_buffer_desc *desc,
407 				    u32 fence,
408 				    u32 *status)
409 {
410 	int err;
411 
412 	/*
413 	 * Fast commands should complete in less than 10us, so sample quickly
414 	 * up to that length of time, then switch to a slower sleep-wait loop.
415 	 * No GuC command should ever take longer than 10ms.
416 	 */
417 #define done (READ_ONCE(desc->fence) == fence)
418 	err = wait_for_us(done, 10);
419 	if (err)
420 		err = wait_for(done, 10);
421 #undef done
422 
423 	if (unlikely(err)) {
424 		DRM_ERROR("CT: fence %u failed; reported fence=%u\n",
425 			  fence, desc->fence);
426 
427 		if (WARN_ON(desc->is_in_error)) {
428 			/* Something went wrong with the messaging, try to reset
429 			 * the buffer and hope for the best
430 			 */
431 			guc_ct_buffer_desc_reset(desc);
432 			err = -EPROTO;
433 		}
434 	}
435 
436 	*status = desc->status;
437 	return err;
438 }
439 
440 /**
441  * wait_for_ct_request_update - Wait for CT request state update.
442  * @req:	pointer to pending request
443  * @status:	placeholder for status
444  *
445  * For each sent request, Guc shall send bac CT response message.
446  * Our message handler will update status of tracked request once
447  * response message with given fence is received. Wait here and
448  * check for valid response status value.
449  *
450  * Return:
451  * *	0 response received (status is valid)
452  * *	-ETIMEDOUT no response within hardcoded timeout
453  */
wait_for_ct_request_update(struct ct_request * req,u32 * status)454 static int wait_for_ct_request_update(struct ct_request *req, u32 *status)
455 {
456 	int err;
457 
458 	/*
459 	 * Fast commands should complete in less than 10us, so sample quickly
460 	 * up to that length of time, then switch to a slower sleep-wait loop.
461 	 * No GuC command should ever take longer than 10ms.
462 	 */
463 #define done INTEL_GUC_MSG_IS_RESPONSE(READ_ONCE(req->status))
464 	err = wait_for_us(done, 10);
465 	if (err)
466 		err = wait_for(done, 10);
467 #undef done
468 
469 	if (unlikely(err))
470 		DRM_ERROR("CT: fence %u err %d\n", req->fence, err);
471 
472 	*status = req->status;
473 	return err;
474 }
475 
ct_send(struct intel_guc_ct * ct,const u32 * action,u32 len,u32 * response_buf,u32 response_buf_size,u32 * status)476 static int ct_send(struct intel_guc_ct *ct,
477 		   const u32 *action,
478 		   u32 len,
479 		   u32 *response_buf,
480 		   u32 response_buf_size,
481 		   u32 *status)
482 {
483 	struct intel_guc_ct_buffer *ctb = &ct->ctbs[CTB_SEND];
484 	struct guc_ct_buffer_desc *desc = ctb->desc;
485 	struct ct_request request;
486 	unsigned long flags;
487 	u32 fence;
488 	int err;
489 
490 	GEM_BUG_ON(!ct->enabled);
491 	GEM_BUG_ON(!len);
492 	GEM_BUG_ON(len & ~GUC_CT_MSG_LEN_MASK);
493 	GEM_BUG_ON(!response_buf && response_buf_size);
494 
495 	fence = ct_get_next_fence(ct);
496 	request.fence = fence;
497 	request.status = 0;
498 	request.response_len = response_buf_size;
499 	request.response_buf = response_buf;
500 
501 	spin_lock_irqsave(&ct->requests.lock, flags);
502 	list_add_tail(&request.link, &ct->requests.pending);
503 	spin_unlock_irqrestore(&ct->requests.lock, flags);
504 
505 	err = ct_write(ct, action, len, fence, !!response_buf);
506 	if (unlikely(err))
507 		goto unlink;
508 
509 	intel_guc_notify(ct_to_guc(ct));
510 
511 	if (response_buf)
512 		err = wait_for_ct_request_update(&request, status);
513 	else
514 		err = wait_for_ctb_desc_update(desc, fence, status);
515 	if (unlikely(err))
516 		goto unlink;
517 
518 	if (!INTEL_GUC_MSG_IS_RESPONSE_SUCCESS(*status)) {
519 		err = -EIO;
520 		goto unlink;
521 	}
522 
523 	if (response_buf) {
524 		/* There shall be no data in the status */
525 		WARN_ON(INTEL_GUC_MSG_TO_DATA(request.status));
526 		/* Return actual response len */
527 		err = request.response_len;
528 	} else {
529 		/* There shall be no response payload */
530 		WARN_ON(request.response_len);
531 		/* Return data decoded from the status dword */
532 		err = INTEL_GUC_MSG_TO_DATA(*status);
533 	}
534 
535 unlink:
536 	spin_lock_irqsave(&ct->requests.lock, flags);
537 	list_del(&request.link);
538 	spin_unlock_irqrestore(&ct->requests.lock, flags);
539 
540 	return err;
541 }
542 
543 /*
544  * Command Transport (CT) buffer based GuC send function.
545  */
intel_guc_ct_send(struct intel_guc_ct * ct,const u32 * action,u32 len,u32 * response_buf,u32 response_buf_size)546 int intel_guc_ct_send(struct intel_guc_ct *ct, const u32 *action, u32 len,
547 		      u32 *response_buf, u32 response_buf_size)
548 {
549 	struct intel_guc *guc = ct_to_guc(ct);
550 	u32 status = ~0; /* undefined */
551 	int ret;
552 
553 	if (unlikely(!ct->enabled)) {
554 		WARN(1, "Unexpected send: action=%#x\n", *action);
555 		return -ENODEV;
556 	}
557 
558 	mutex_lock(&guc->send_mutex);
559 
560 	ret = ct_send(ct, action, len, response_buf, response_buf_size, &status);
561 	if (unlikely(ret < 0)) {
562 		CT_ERROR(ct, "Sending action %#x failed (err=%d status=%#X)\n",
563 			 action[0], ret, status);
564 	} else if (unlikely(ret)) {
565 		CT_DEBUG(ct, "send action %#x returned %d (%#x)\n",
566 			 action[0], ret, ret);
567 	}
568 
569 	mutex_unlock(&guc->send_mutex);
570 	return ret;
571 }
572 
ct_header_get_len(u32 header)573 static inline unsigned int ct_header_get_len(u32 header)
574 {
575 	return (header >> GUC_CT_MSG_LEN_SHIFT) & GUC_CT_MSG_LEN_MASK;
576 }
577 
ct_header_get_action(u32 header)578 static inline unsigned int ct_header_get_action(u32 header)
579 {
580 	return (header >> GUC_CT_MSG_ACTION_SHIFT) & GUC_CT_MSG_ACTION_MASK;
581 }
582 
ct_header_is_response(u32 header)583 static inline bool ct_header_is_response(u32 header)
584 {
585 	return !!(header & GUC_CT_MSG_IS_RESPONSE);
586 }
587 
ct_read(struct intel_guc_ct * ct,u32 * data)588 static int ct_read(struct intel_guc_ct *ct, u32 *data)
589 {
590 	struct intel_guc_ct_buffer *ctb = &ct->ctbs[CTB_RECV];
591 	struct guc_ct_buffer_desc *desc = ctb->desc;
592 	u32 head = desc->head;
593 	u32 tail = desc->tail;
594 	u32 size = desc->size;
595 	u32 *cmds = ctb->cmds;
596 	s32 available;
597 	unsigned int len;
598 	unsigned int i;
599 
600 	if (unlikely(desc->is_in_error))
601 		return -EPIPE;
602 
603 	if (unlikely(!IS_ALIGNED(head | tail | size, 4) ||
604 		     (tail | head) >= size))
605 		goto corrupted;
606 
607 	/* later calculations will be done in dwords */
608 	head /= 4;
609 	tail /= 4;
610 	size /= 4;
611 
612 	/* tail == head condition indicates empty */
613 	available = tail - head;
614 	if (unlikely(available == 0))
615 		return -ENODATA;
616 
617 	/* beware of buffer wrap case */
618 	if (unlikely(available < 0))
619 		available += size;
620 	CT_DEBUG(ct, "available %d (%u:%u)\n", available, head, tail);
621 	GEM_BUG_ON(available < 0);
622 
623 	data[0] = cmds[head];
624 	head = (head + 1) % size;
625 
626 	/* message len with header */
627 	len = ct_header_get_len(data[0]) + 1;
628 	if (unlikely(len > (u32)available)) {
629 		CT_ERROR(ct, "Incomplete message %*ph %*ph %*ph\n",
630 			 4, data,
631 			 4 * (head + available - 1 > size ?
632 			      size - head : available - 1), &cmds[head],
633 			 4 * (head + available - 1 > size ?
634 			      available - 1 - size + head : 0), &cmds[0]);
635 		goto corrupted;
636 	}
637 
638 	for (i = 1; i < len; i++) {
639 		data[i] = cmds[head];
640 		head = (head + 1) % size;
641 	}
642 	CT_DEBUG(ct, "received %*ph\n", 4 * len, data);
643 
644 	desc->head = head * 4;
645 	return 0;
646 
647 corrupted:
648 	CT_ERROR(ct, "Corrupted descriptor addr=%#x head=%u tail=%u size=%u\n",
649 		 desc->addr, desc->head, desc->tail, desc->size);
650 	desc->is_in_error = 1;
651 	return -EPIPE;
652 }
653 
654 /**
655  * DOC: CTB GuC to Host response
656  *
657  * Format of the CTB GuC to Host response message is as follows::
658  *
659  *      +------------+---------+---------+---------+---------+---------+
660  *      |   msg[0]   |   [1]   |   [2]   |   [3]   |   ...   |  [n-1]  |
661  *      +------------+---------+---------+---------+---------+---------+
662  *      |   MESSAGE  |       MESSAGE PAYLOAD                           |
663  *      +   HEADER   +---------+---------+---------+---------+---------+
664  *      |            |    0    |    1    |    2    |   ...   |    n    |
665  *      +============+=========+=========+=========+=========+=========+
666  *      |  len >= 2  |  FENCE  |  STATUS |   response specific data    |
667  *      +------+-----+---------+---------+---------+---------+---------+
668  *
669  *                   ^-----------------------len-----------------------^
670  */
671 
ct_handle_response(struct intel_guc_ct * ct,const u32 * msg)672 static int ct_handle_response(struct intel_guc_ct *ct, const u32 *msg)
673 {
674 	u32 header = msg[0];
675 	u32 len = ct_header_get_len(header);
676 	u32 msgsize = (len + 1) * sizeof(u32); /* msg size in bytes w/header */
677 	u32 fence;
678 	u32 status;
679 	u32 datalen;
680 	struct ct_request *req;
681 	bool found = false;
682 
683 	GEM_BUG_ON(!ct_header_is_response(header));
684 	GEM_BUG_ON(!in_irq());
685 
686 	/* Response payload shall at least include fence and status */
687 	if (unlikely(len < 2)) {
688 		CT_ERROR(ct, "Corrupted response %*ph\n", msgsize, msg);
689 		return -EPROTO;
690 	}
691 
692 	fence = msg[1];
693 	status = msg[2];
694 	datalen = len - 2;
695 
696 	/* Format of the status follows RESPONSE message */
697 	if (unlikely(!INTEL_GUC_MSG_IS_RESPONSE(status))) {
698 		CT_ERROR(ct, "Corrupted response %*ph\n", msgsize, msg);
699 		return -EPROTO;
700 	}
701 
702 	CT_DEBUG(ct, "response fence %u status %#x\n", fence, status);
703 
704 	spin_lock(&ct->requests.lock);
705 	list_for_each_entry(req, &ct->requests.pending, link) {
706 		if (unlikely(fence != req->fence)) {
707 			CT_DEBUG(ct, "request %u awaits response\n",
708 				 req->fence);
709 			continue;
710 		}
711 		if (unlikely(datalen > req->response_len)) {
712 			CT_ERROR(ct, "Response for %u is too long %*ph\n",
713 				 req->fence, msgsize, msg);
714 			datalen = 0;
715 		}
716 		if (datalen)
717 			memcpy(req->response_buf, msg + 3, 4 * datalen);
718 		req->response_len = datalen;
719 		WRITE_ONCE(req->status, status);
720 		found = true;
721 		break;
722 	}
723 	spin_unlock(&ct->requests.lock);
724 
725 	if (!found)
726 		CT_ERROR(ct, "Unsolicited response %*ph\n", msgsize, msg);
727 	return 0;
728 }
729 
ct_process_request(struct intel_guc_ct * ct,u32 action,u32 len,const u32 * payload)730 static void ct_process_request(struct intel_guc_ct *ct,
731 			       u32 action, u32 len, const u32 *payload)
732 {
733 	struct intel_guc *guc = ct_to_guc(ct);
734 	int ret;
735 
736 	CT_DEBUG(ct, "request %x %*ph\n", action, 4 * len, payload);
737 
738 	switch (action) {
739 	case INTEL_GUC_ACTION_DEFAULT:
740 		ret = intel_guc_to_host_process_recv_msg(guc, payload, len);
741 		if (unlikely(ret))
742 			goto fail_unexpected;
743 		break;
744 
745 	default:
746 fail_unexpected:
747 		CT_ERROR(ct, "Unexpected request %x %*ph\n",
748 			 action, 4 * len, payload);
749 		break;
750 	}
751 }
752 
ct_process_incoming_requests(struct intel_guc_ct * ct)753 static bool ct_process_incoming_requests(struct intel_guc_ct *ct)
754 {
755 	unsigned long flags;
756 	struct ct_incoming_request *request;
757 	u32 header;
758 	u32 *payload;
759 	bool done;
760 
761 	spin_lock_irqsave(&ct->requests.lock, flags);
762 	request = list_first_entry_or_null(&ct->requests.incoming,
763 					   struct ct_incoming_request, link);
764 	if (request)
765 		list_del(&request->link);
766 	done = !!list_empty(&ct->requests.incoming);
767 	spin_unlock_irqrestore(&ct->requests.lock, flags);
768 
769 	if (!request)
770 		return true;
771 
772 	header = request->msg[0];
773 	payload = &request->msg[1];
774 	ct_process_request(ct,
775 			   ct_header_get_action(header),
776 			   ct_header_get_len(header),
777 			   payload);
778 
779 	kfree(request);
780 	return done;
781 }
782 
ct_incoming_request_worker_func(struct work_struct * w)783 static void ct_incoming_request_worker_func(struct work_struct *w)
784 {
785 	struct intel_guc_ct *ct =
786 		container_of(w, struct intel_guc_ct, requests.worker);
787 	bool done;
788 
789 	done = ct_process_incoming_requests(ct);
790 	if (!done)
791 		queue_work(system_unbound_wq, &ct->requests.worker);
792 }
793 
794 /**
795  * DOC: CTB GuC to Host request
796  *
797  * Format of the CTB GuC to Host request message is as follows::
798  *
799  *      +------------+---------+---------+---------+---------+---------+
800  *      |   msg[0]   |   [1]   |   [2]   |   [3]   |   ...   |  [n-1]  |
801  *      +------------+---------+---------+---------+---------+---------+
802  *      |   MESSAGE  |       MESSAGE PAYLOAD                           |
803  *      +   HEADER   +---------+---------+---------+---------+---------+
804  *      |            |    0    |    1    |    2    |   ...   |    n    |
805  *      +============+=========+=========+=========+=========+=========+
806  *      |     len    |            request specific data                |
807  *      +------+-----+---------+---------+---------+---------+---------+
808  *
809  *                   ^-----------------------len-----------------------^
810  */
811 
ct_handle_request(struct intel_guc_ct * ct,const u32 * msg)812 static int ct_handle_request(struct intel_guc_ct *ct, const u32 *msg)
813 {
814 	u32 header = msg[0];
815 	u32 len = ct_header_get_len(header);
816 	u32 msgsize = (len + 1) * sizeof(u32); /* msg size in bytes w/header */
817 	struct ct_incoming_request *request;
818 	unsigned long flags;
819 
820 	GEM_BUG_ON(ct_header_is_response(header));
821 
822 	request = kmalloc(sizeof(*request) + msgsize, GFP_ATOMIC);
823 	if (unlikely(!request)) {
824 		CT_ERROR(ct, "Dropping request %*ph\n", msgsize, msg);
825 		return 0; /* XXX: -ENOMEM ? */
826 	}
827 	memcpy(request->msg, msg, msgsize);
828 
829 	spin_lock_irqsave(&ct->requests.lock, flags);
830 	list_add_tail(&request->link, &ct->requests.incoming);
831 	spin_unlock_irqrestore(&ct->requests.lock, flags);
832 
833 	queue_work(system_unbound_wq, &ct->requests.worker);
834 	return 0;
835 }
836 
837 /*
838  * When we're communicating with the GuC over CT, GuC uses events
839  * to notify us about new messages being posted on the RECV buffer.
840  */
intel_guc_ct_event_handler(struct intel_guc_ct * ct)841 void intel_guc_ct_event_handler(struct intel_guc_ct *ct)
842 {
843 	u32 msg[GUC_CT_MSG_LEN_MASK + 1]; /* one extra dw for the header */
844 	int err = 0;
845 
846 	if (unlikely(!ct->enabled)) {
847 		WARN(1, "Unexpected GuC event received while CT disabled!\n");
848 		return;
849 	}
850 
851 	do {
852 		err = ct_read(ct, msg);
853 		if (err)
854 			break;
855 
856 		if (ct_header_is_response(msg[0]))
857 			err = ct_handle_response(ct, msg);
858 		else
859 			err = ct_handle_request(ct, msg);
860 	} while (!err);
861 }
862