• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2022 MediaTek Inc.
4  * Author: Ping-Hsun Wu <ping-hsun.wu@mediatek.com>
5  */
6 
7 #include <linux/remoteproc.h>
8 #include <linux/remoteproc/mtk_scp.h>
9 #include "mtk-mdp3-vpu.h"
10 #include "mtk-mdp3-core.h"
11 
12 #define MDP_VPU_MESSAGE_TIMEOUT 500U
13 #define vpu_alloc_size		0x600000
14 
vpu_to_mdp(struct mdp_vpu_dev * vpu)15 static inline struct mdp_dev *vpu_to_mdp(struct mdp_vpu_dev *vpu)
16 {
17 	return container_of(vpu, struct mdp_dev, vpu);
18 }
19 
mdp_vpu_shared_mem_alloc(struct mdp_vpu_dev * vpu)20 static int mdp_vpu_shared_mem_alloc(struct mdp_vpu_dev *vpu)
21 {
22 	if (vpu->work && vpu->work_addr)
23 		return 0;
24 
25 	vpu->work = dma_alloc_coherent(scp_get_device(vpu->scp), vpu_alloc_size,
26 				       &vpu->work_addr, GFP_KERNEL);
27 
28 	if (!vpu->work)
29 		return -ENOMEM;
30 	else
31 		return 0;
32 }
33 
mdp_vpu_shared_mem_free(struct mdp_vpu_dev * vpu)34 void mdp_vpu_shared_mem_free(struct mdp_vpu_dev *vpu)
35 {
36 	if (vpu->work && vpu->work_addr)
37 		dma_free_coherent(scp_get_device(vpu->scp), vpu_alloc_size,
38 				  vpu->work, vpu->work_addr);
39 }
40 
mdp_vpu_ipi_handle_init_ack(void * data,unsigned int len,void * priv)41 static void mdp_vpu_ipi_handle_init_ack(void *data, unsigned int len,
42 					void *priv)
43 {
44 	struct mdp_ipi_init_msg *msg = (struct mdp_ipi_init_msg *)data;
45 	struct mdp_vpu_dev *vpu =
46 		(struct mdp_vpu_dev *)(unsigned long)msg->drv_data;
47 
48 	if (!vpu->work_size)
49 		vpu->work_size = msg->work_size;
50 
51 	vpu->status = msg->status;
52 	complete(&vpu->ipi_acked);
53 }
54 
mdp_vpu_ipi_handle_deinit_ack(void * data,unsigned int len,void * priv)55 static void mdp_vpu_ipi_handle_deinit_ack(void *data, unsigned int len,
56 					  void *priv)
57 {
58 	struct mdp_ipi_deinit_msg *msg = (struct mdp_ipi_deinit_msg *)data;
59 	struct mdp_vpu_dev *vpu =
60 		(struct mdp_vpu_dev *)(unsigned long)msg->drv_data;
61 
62 	vpu->status = msg->status;
63 	complete(&vpu->ipi_acked);
64 }
65 
mdp_vpu_ipi_handle_frame_ack(void * data,unsigned int len,void * priv)66 static void mdp_vpu_ipi_handle_frame_ack(void *data, unsigned int len,
67 					 void *priv)
68 {
69 	struct img_sw_addr *addr = (struct img_sw_addr *)data;
70 	struct img_ipi_frameparam *param =
71 		(struct img_ipi_frameparam *)(unsigned long)addr->va;
72 	struct mdp_vpu_ctx *ctx =
73 		(struct mdp_vpu_ctx *)(unsigned long)param->drv_data;
74 
75 	if (param->state) {
76 		struct mdp_dev *mdp = vpu_to_mdp(ctx->vpu_dev);
77 
78 		dev_err(&mdp->pdev->dev, "VPU MDP failure:%d\n", param->state);
79 	}
80 	ctx->vpu_dev->status = param->state;
81 	complete(&ctx->vpu_dev->ipi_acked);
82 }
83 
mdp_vpu_register(struct mdp_dev * mdp)84 int mdp_vpu_register(struct mdp_dev *mdp)
85 {
86 	int err;
87 	struct mtk_scp *scp = mdp->scp;
88 	struct device *dev = &mdp->pdev->dev;
89 
90 	err = scp_ipi_register(scp, SCP_IPI_MDP_INIT,
91 			       mdp_vpu_ipi_handle_init_ack, NULL);
92 	if (err) {
93 		dev_err(dev, "scp_ipi_register failed %d\n", err);
94 		goto err_ipi_init;
95 	}
96 	err = scp_ipi_register(scp, SCP_IPI_MDP_DEINIT,
97 			       mdp_vpu_ipi_handle_deinit_ack, NULL);
98 	if (err) {
99 		dev_err(dev, "scp_ipi_register failed %d\n", err);
100 		goto err_ipi_deinit;
101 	}
102 	err = scp_ipi_register(scp, SCP_IPI_MDP_FRAME,
103 			       mdp_vpu_ipi_handle_frame_ack, NULL);
104 	if (err) {
105 		dev_err(dev, "scp_ipi_register failed %d\n", err);
106 		goto err_ipi_frame;
107 	}
108 	return 0;
109 
110 err_ipi_frame:
111 	scp_ipi_unregister(scp, SCP_IPI_MDP_DEINIT);
112 err_ipi_deinit:
113 	scp_ipi_unregister(scp, SCP_IPI_MDP_INIT);
114 err_ipi_init:
115 
116 	return err;
117 }
118 
mdp_vpu_unregister(struct mdp_dev * mdp)119 void mdp_vpu_unregister(struct mdp_dev *mdp)
120 {
121 	scp_ipi_unregister(mdp->scp, SCP_IPI_MDP_INIT);
122 	scp_ipi_unregister(mdp->scp, SCP_IPI_MDP_DEINIT);
123 	scp_ipi_unregister(mdp->scp, SCP_IPI_MDP_FRAME);
124 }
125 
mdp_vpu_sendmsg(struct mdp_vpu_dev * vpu,enum scp_ipi_id id,void * buf,unsigned int len)126 static int mdp_vpu_sendmsg(struct mdp_vpu_dev *vpu, enum scp_ipi_id id,
127 			   void *buf, unsigned int len)
128 {
129 	struct mdp_dev *mdp = vpu_to_mdp(vpu);
130 	unsigned int t = MDP_VPU_MESSAGE_TIMEOUT;
131 	int ret;
132 
133 	if (!vpu->scp) {
134 		dev_dbg(&mdp->pdev->dev, "vpu scp is NULL");
135 		return -EINVAL;
136 	}
137 	ret = scp_ipi_send(vpu->scp, id, buf, len, 2000);
138 
139 	if (ret) {
140 		dev_err(&mdp->pdev->dev, "scp_ipi_send failed %d\n", ret);
141 		return -EPERM;
142 	}
143 	ret = wait_for_completion_timeout(&vpu->ipi_acked,
144 					  msecs_to_jiffies(t));
145 	if (!ret)
146 		ret = -ETIME;
147 	else if (vpu->status)
148 		ret = -EINVAL;
149 	else
150 		ret = 0;
151 	return ret;
152 }
153 
mdp_vpu_dev_init(struct mdp_vpu_dev * vpu,struct mtk_scp * scp,struct mutex * lock)154 int mdp_vpu_dev_init(struct mdp_vpu_dev *vpu, struct mtk_scp *scp,
155 		     struct mutex *lock)
156 {
157 	struct mdp_ipi_init_msg msg = {
158 		.drv_data = (unsigned long)vpu,
159 	};
160 	size_t mem_size;
161 	phys_addr_t pool;
162 	const size_t pool_size = sizeof(struct mdp_config_pool);
163 	struct mdp_dev *mdp = vpu_to_mdp(vpu);
164 	int err;
165 
166 	init_completion(&vpu->ipi_acked);
167 	vpu->scp = scp;
168 	vpu->lock = lock;
169 	vpu->work_size = 0;
170 	err = mdp_vpu_sendmsg(vpu, SCP_IPI_MDP_INIT, &msg, sizeof(msg));
171 	if (err)
172 		goto err_work_size;
173 	/* vpu work_size was set in mdp_vpu_ipi_handle_init_ack */
174 
175 	mem_size = vpu_alloc_size;
176 	err = mdp_vpu_shared_mem_alloc(vpu);
177 	if (err) {
178 		dev_err(&mdp->pdev->dev, "VPU memory alloc fail!");
179 		goto err_mem_alloc;
180 	}
181 
182 	pool = ALIGN((uintptr_t)vpu->work + vpu->work_size, 8);
183 	if (pool + pool_size - (uintptr_t)vpu->work > mem_size) {
184 		dev_err(&mdp->pdev->dev,
185 			"VPU memory insufficient: %zx + %zx > %zx",
186 			vpu->work_size, pool_size, mem_size);
187 		err = -ENOMEM;
188 		goto err_mem_size;
189 	}
190 
191 	dev_dbg(&mdp->pdev->dev,
192 		"VPU work:%pK pa:%pad sz:%zx pool:%pa sz:%zx (mem sz:%zx)",
193 		vpu->work, &vpu->work_addr, vpu->work_size,
194 		&pool, pool_size, mem_size);
195 	vpu->pool = (struct mdp_config_pool *)(uintptr_t)pool;
196 	msg.work_addr = vpu->work_addr;
197 	msg.work_size = vpu->work_size;
198 	err = mdp_vpu_sendmsg(vpu, SCP_IPI_MDP_INIT, &msg, sizeof(msg));
199 	if (err)
200 		goto err_work_size;
201 
202 	memset(vpu->pool, 0, sizeof(*vpu->pool));
203 	return 0;
204 
205 err_work_size:
206 	switch (vpu->status) {
207 	case -MDP_IPI_EBUSY:
208 		err = -EBUSY;
209 		break;
210 	case -MDP_IPI_ENOMEM:
211 		err = -ENOSPC;	/* -ENOMEM */
212 		break;
213 	}
214 	return err;
215 err_mem_size:
216 err_mem_alloc:
217 	return err;
218 }
219 
mdp_vpu_dev_deinit(struct mdp_vpu_dev * vpu)220 int mdp_vpu_dev_deinit(struct mdp_vpu_dev *vpu)
221 {
222 	struct mdp_ipi_deinit_msg msg = {
223 		.drv_data = (unsigned long)vpu,
224 		.work_addr = vpu->work_addr,
225 	};
226 
227 	return mdp_vpu_sendmsg(vpu, SCP_IPI_MDP_DEINIT, &msg, sizeof(msg));
228 }
229 
mdp_config_get(struct mdp_vpu_dev * vpu,enum mdp_config_id id,uint32_t * addr)230 static struct img_config *mdp_config_get(struct mdp_vpu_dev *vpu,
231 					 enum mdp_config_id id, uint32_t *addr)
232 {
233 	struct img_config *config;
234 
235 	if (id < 0 || id >= MDP_CONFIG_POOL_SIZE)
236 		return ERR_PTR(-EINVAL);
237 
238 	mutex_lock(vpu->lock);
239 	vpu->pool->cfg_count[id]++;
240 	config = &vpu->pool->configs[id];
241 	*addr = vpu->work_addr + ((uintptr_t)config - (uintptr_t)vpu->work);
242 	mutex_unlock(vpu->lock);
243 
244 	return config;
245 }
246 
mdp_config_put(struct mdp_vpu_dev * vpu,enum mdp_config_id id,const struct img_config * config)247 static int mdp_config_put(struct mdp_vpu_dev *vpu,
248 			  enum mdp_config_id id,
249 			  const struct img_config *config)
250 {
251 	int err = 0;
252 
253 	if (id < 0 || id >= MDP_CONFIG_POOL_SIZE)
254 		return -EINVAL;
255 	if (vpu->lock)
256 		mutex_lock(vpu->lock);
257 	if (!vpu->pool->cfg_count[id] || config != &vpu->pool->configs[id])
258 		err = -EINVAL;
259 	else
260 		vpu->pool->cfg_count[id]--;
261 	if (vpu->lock)
262 		mutex_unlock(vpu->lock);
263 	return err;
264 }
265 
mdp_vpu_ctx_init(struct mdp_vpu_ctx * ctx,struct mdp_vpu_dev * vpu,enum mdp_config_id id)266 int mdp_vpu_ctx_init(struct mdp_vpu_ctx *ctx, struct mdp_vpu_dev *vpu,
267 		     enum mdp_config_id id)
268 {
269 	ctx->config = mdp_config_get(vpu, id, &ctx->inst_addr);
270 	if (IS_ERR(ctx->config)) {
271 		int err = PTR_ERR(ctx->config);
272 
273 		ctx->config = NULL;
274 		return err;
275 	}
276 	ctx->config_id = id;
277 	ctx->vpu_dev = vpu;
278 	return 0;
279 }
280 
mdp_vpu_ctx_deinit(struct mdp_vpu_ctx * ctx)281 int mdp_vpu_ctx_deinit(struct mdp_vpu_ctx *ctx)
282 {
283 	int err = mdp_config_put(ctx->vpu_dev, ctx->config_id, ctx->config);
284 
285 	ctx->config_id = 0;
286 	ctx->config = NULL;
287 	ctx->inst_addr = 0;
288 	return err;
289 }
290 
mdp_vpu_process(struct mdp_vpu_ctx * ctx,struct img_ipi_frameparam * param)291 int mdp_vpu_process(struct mdp_vpu_ctx *ctx, struct img_ipi_frameparam *param)
292 {
293 	struct mdp_vpu_dev *vpu = ctx->vpu_dev;
294 	struct mdp_dev *mdp = vpu_to_mdp(vpu);
295 	struct img_sw_addr addr;
296 
297 	if (!ctx->vpu_dev->work || !ctx->vpu_dev->work_addr) {
298 		if (mdp_vpu_shared_mem_alloc(vpu)) {
299 			dev_err(&mdp->pdev->dev, "VPU memory alloc fail!");
300 			return -ENOMEM;
301 		}
302 	}
303 	memset((void *)ctx->vpu_dev->work, 0, ctx->vpu_dev->work_size);
304 	memset(ctx->config, 0, sizeof(*ctx->config));
305 	param->config_data.va = (unsigned long)ctx->config;
306 	param->config_data.pa = ctx->inst_addr;
307 	param->drv_data = (unsigned long)ctx;
308 
309 	memcpy((void *)ctx->vpu_dev->work, param, sizeof(*param));
310 	addr.pa = ctx->vpu_dev->work_addr;
311 	addr.va = (uintptr_t)ctx->vpu_dev->work;
312 	return mdp_vpu_sendmsg(ctx->vpu_dev, SCP_IPI_MDP_FRAME,
313 		&addr, sizeof(addr));
314 }
315