• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
4  */
5 
6 #define pr_fmt(fmt)	"[drm:%s] " fmt, __func__
7 #include "dpu_kms.h"
8 #include "dpu_hw_lm.h"
9 #include "dpu_hw_ctl.h"
10 #include "dpu_hw_pingpong.h"
11 #include "dpu_hw_intf.h"
12 #include "dpu_hw_dspp.h"
13 #include "dpu_encoder.h"
14 #include "dpu_trace.h"
15 
16 
reserved_by_other(uint32_t * res_map,int idx,uint32_t enc_id)17 static inline bool reserved_by_other(uint32_t *res_map, int idx,
18 				     uint32_t enc_id)
19 {
20 	return res_map[idx] && res_map[idx] != enc_id;
21 }
22 
23 /**
24  * struct dpu_rm_requirements - Reservation requirements parameter bundle
25  * @topology:  selected topology for the display
26  * @hw_res:	   Hardware resources required as reported by the encoders
27  */
28 struct dpu_rm_requirements {
29 	struct msm_display_topology topology;
30 	struct dpu_encoder_hw_resources hw_res;
31 };
32 
dpu_rm_destroy(struct dpu_rm * rm)33 int dpu_rm_destroy(struct dpu_rm *rm)
34 {
35 	int i;
36 
37 	for (i = 0; i < ARRAY_SIZE(rm->dspp_blks); i++) {
38 		struct dpu_hw_dspp *hw;
39 
40 		if (rm->dspp_blks[i]) {
41 			hw = to_dpu_hw_dspp(rm->dspp_blks[i]);
42 			dpu_hw_dspp_destroy(hw);
43 		}
44 	}
45 	for (i = 0; i < ARRAY_SIZE(rm->pingpong_blks); i++) {
46 		struct dpu_hw_pingpong *hw;
47 
48 		if (rm->pingpong_blks[i]) {
49 			hw = to_dpu_hw_pingpong(rm->pingpong_blks[i]);
50 			dpu_hw_pingpong_destroy(hw);
51 		}
52 	}
53 	for (i = 0; i < ARRAY_SIZE(rm->mixer_blks); i++) {
54 		struct dpu_hw_mixer *hw;
55 
56 		if (rm->mixer_blks[i]) {
57 			hw = to_dpu_hw_mixer(rm->mixer_blks[i]);
58 			dpu_hw_lm_destroy(hw);
59 		}
60 	}
61 	for (i = 0; i < ARRAY_SIZE(rm->ctl_blks); i++) {
62 		struct dpu_hw_ctl *hw;
63 
64 		if (rm->ctl_blks[i]) {
65 			hw = to_dpu_hw_ctl(rm->ctl_blks[i]);
66 			dpu_hw_ctl_destroy(hw);
67 		}
68 	}
69 	for (i = 0; i < ARRAY_SIZE(rm->intf_blks); i++) {
70 		struct dpu_hw_intf *hw;
71 
72 		if (rm->intf_blks[i]) {
73 			hw = to_dpu_hw_intf(rm->intf_blks[i]);
74 			dpu_hw_intf_destroy(hw);
75 		}
76 	}
77 
78 	return 0;
79 }
80 
dpu_rm_init(struct dpu_rm * rm,struct dpu_mdss_cfg * cat,void __iomem * mmio)81 int dpu_rm_init(struct dpu_rm *rm,
82 		struct dpu_mdss_cfg *cat,
83 		void __iomem *mmio)
84 {
85 	int rc, i;
86 
87 	if (!rm || !cat || !mmio) {
88 		DPU_ERROR("invalid kms\n");
89 		return -EINVAL;
90 	}
91 
92 	/* Clear, setup lists */
93 	memset(rm, 0, sizeof(*rm));
94 
95 	/* Interrogate HW catalog and create tracking items for hw blocks */
96 	for (i = 0; i < cat->mixer_count; i++) {
97 		struct dpu_hw_mixer *hw;
98 		const struct dpu_lm_cfg *lm = &cat->mixer[i];
99 
100 		if (lm->pingpong == PINGPONG_MAX) {
101 			DPU_DEBUG("skip mixer %d without pingpong\n", lm->id);
102 			continue;
103 		}
104 
105 		if (lm->id < LM_0 || lm->id >= LM_MAX) {
106 			DPU_ERROR("skip mixer %d with invalid id\n", lm->id);
107 			continue;
108 		}
109 		hw = dpu_hw_lm_init(lm->id, mmio, cat);
110 		if (IS_ERR_OR_NULL(hw)) {
111 			rc = PTR_ERR(hw);
112 			DPU_ERROR("failed lm object creation: err %d\n", rc);
113 			goto fail;
114 		}
115 		rm->mixer_blks[lm->id - LM_0] = &hw->base;
116 
117 		if (!rm->lm_max_width) {
118 			rm->lm_max_width = lm->sblk->maxwidth;
119 		} else if (rm->lm_max_width != lm->sblk->maxwidth) {
120 			/*
121 			 * Don't expect to have hw where lm max widths differ.
122 			 * If found, take the min.
123 			 */
124 			DPU_ERROR("unsupported: lm maxwidth differs\n");
125 			if (rm->lm_max_width > lm->sblk->maxwidth)
126 				rm->lm_max_width = lm->sblk->maxwidth;
127 		}
128 	}
129 
130 	for (i = 0; i < cat->pingpong_count; i++) {
131 		struct dpu_hw_pingpong *hw;
132 		const struct dpu_pingpong_cfg *pp = &cat->pingpong[i];
133 
134 		if (pp->id < PINGPONG_0 || pp->id >= PINGPONG_MAX) {
135 			DPU_ERROR("skip pingpong %d with invalid id\n", pp->id);
136 			continue;
137 		}
138 		hw = dpu_hw_pingpong_init(pp->id, mmio, cat);
139 		if (IS_ERR_OR_NULL(hw)) {
140 			rc = PTR_ERR(hw);
141 			DPU_ERROR("failed pingpong object creation: err %d\n",
142 				rc);
143 			goto fail;
144 		}
145 		rm->pingpong_blks[pp->id - PINGPONG_0] = &hw->base;
146 	}
147 
148 	for (i = 0; i < cat->intf_count; i++) {
149 		struct dpu_hw_intf *hw;
150 		const struct dpu_intf_cfg *intf = &cat->intf[i];
151 
152 		if (intf->type == INTF_NONE) {
153 			DPU_DEBUG("skip intf %d with type none\n", i);
154 			continue;
155 		}
156 		if (intf->id < INTF_0 || intf->id >= INTF_MAX) {
157 			DPU_ERROR("skip intf %d with invalid id\n", intf->id);
158 			continue;
159 		}
160 		hw = dpu_hw_intf_init(intf->id, mmio, cat);
161 		if (IS_ERR_OR_NULL(hw)) {
162 			rc = PTR_ERR(hw);
163 			DPU_ERROR("failed intf object creation: err %d\n", rc);
164 			goto fail;
165 		}
166 		rm->intf_blks[intf->id - INTF_0] = &hw->base;
167 	}
168 
169 	for (i = 0; i < cat->ctl_count; i++) {
170 		struct dpu_hw_ctl *hw;
171 		const struct dpu_ctl_cfg *ctl = &cat->ctl[i];
172 
173 		if (ctl->id < CTL_0 || ctl->id >= CTL_MAX) {
174 			DPU_ERROR("skip ctl %d with invalid id\n", ctl->id);
175 			continue;
176 		}
177 		hw = dpu_hw_ctl_init(ctl->id, mmio, cat);
178 		if (IS_ERR_OR_NULL(hw)) {
179 			rc = PTR_ERR(hw);
180 			DPU_ERROR("failed ctl object creation: err %d\n", rc);
181 			goto fail;
182 		}
183 		rm->ctl_blks[ctl->id - CTL_0] = &hw->base;
184 	}
185 
186 	for (i = 0; i < cat->dspp_count; i++) {
187 		struct dpu_hw_dspp *hw;
188 		const struct dpu_dspp_cfg *dspp = &cat->dspp[i];
189 
190 		if (dspp->id < DSPP_0 || dspp->id >= DSPP_MAX) {
191 			DPU_ERROR("skip dspp %d with invalid id\n", dspp->id);
192 			continue;
193 		}
194 		hw = dpu_hw_dspp_init(dspp->id, mmio, cat);
195 		if (IS_ERR_OR_NULL(hw)) {
196 			rc = PTR_ERR(hw);
197 			DPU_ERROR("failed dspp object creation: err %d\n", rc);
198 			goto fail;
199 		}
200 		rm->dspp_blks[dspp->id - DSPP_0] = &hw->base;
201 	}
202 
203 	return 0;
204 
205 fail:
206 	dpu_rm_destroy(rm);
207 
208 	return rc ? rc : -EFAULT;
209 }
210 
_dpu_rm_needs_split_display(const struct msm_display_topology * top)211 static bool _dpu_rm_needs_split_display(const struct msm_display_topology *top)
212 {
213 	return top->num_intf > 1;
214 }
215 
216 /**
217  * _dpu_rm_check_lm_peer - check if a mixer is a peer of the primary
218  * @rm: dpu resource manager handle
219  * @primary_idx: index of primary mixer in rm->mixer_blks[]
220  * @peer_idx: index of other mixer in rm->mixer_blks[]
221  * @Return: true if rm->mixer_blks[peer_idx] is a peer of
222  *          rm->mixer_blks[primary_idx]
223  */
_dpu_rm_check_lm_peer(struct dpu_rm * rm,int primary_idx,int peer_idx)224 static bool _dpu_rm_check_lm_peer(struct dpu_rm *rm, int primary_idx,
225 		int peer_idx)
226 {
227 	const struct dpu_lm_cfg *prim_lm_cfg;
228 	const struct dpu_lm_cfg *peer_cfg;
229 
230 	prim_lm_cfg = to_dpu_hw_mixer(rm->mixer_blks[primary_idx])->cap;
231 	peer_cfg = to_dpu_hw_mixer(rm->mixer_blks[peer_idx])->cap;
232 
233 	if (!test_bit(peer_cfg->id, &prim_lm_cfg->lm_pair_mask)) {
234 		DPU_DEBUG("lm %d not peer of lm %d\n", peer_cfg->id,
235 				peer_cfg->id);
236 		return false;
237 	}
238 	return true;
239 }
240 
241 /**
242  * _dpu_rm_check_lm_and_get_connected_blks - check if proposed layer mixer meets
243  *	proposed use case requirements, incl. hardwired dependent blocks like
244  *	pingpong
245  * @rm: dpu resource manager handle
246  * @enc_id: encoder id requesting for allocation
247  * @lm_idx: index of proposed layer mixer in rm->mixer_blks[], function checks
248  *      if lm, and all other hardwired blocks connected to the lm (pp) is
249  *      available and appropriate
250  * @pp_idx: output parameter, index of pingpong block attached to the layer
251  *      mixer in rm->pingpong_blks[].
252  * @dspp_idx: output parameter, index of dspp block attached to the layer
253  *      mixer in rm->dspp_blks[].
254  * @reqs: input parameter, rm requirements for HW blocks needed in the
255  *      datapath.
256  * @Return: true if lm matches all requirements, false otherwise
257  */
_dpu_rm_check_lm_and_get_connected_blks(struct dpu_rm * rm,struct dpu_global_state * global_state,uint32_t enc_id,int lm_idx,int * pp_idx,int * dspp_idx,struct dpu_rm_requirements * reqs)258 static bool _dpu_rm_check_lm_and_get_connected_blks(struct dpu_rm *rm,
259 		struct dpu_global_state *global_state,
260 		uint32_t enc_id, int lm_idx, int *pp_idx, int *dspp_idx,
261 		struct dpu_rm_requirements *reqs)
262 {
263 	const struct dpu_lm_cfg *lm_cfg;
264 	int idx;
265 
266 	/* Already reserved? */
267 	if (reserved_by_other(global_state->mixer_to_enc_id, lm_idx, enc_id)) {
268 		DPU_DEBUG("lm %d already reserved\n", lm_idx + LM_0);
269 		return false;
270 	}
271 
272 	lm_cfg = to_dpu_hw_mixer(rm->mixer_blks[lm_idx])->cap;
273 	idx = lm_cfg->pingpong - PINGPONG_0;
274 	if (idx < 0 || idx >= ARRAY_SIZE(rm->pingpong_blks)) {
275 		DPU_ERROR("failed to get pp on lm %d\n", lm_cfg->pingpong);
276 		return false;
277 	}
278 
279 	if (reserved_by_other(global_state->pingpong_to_enc_id, idx, enc_id)) {
280 		DPU_DEBUG("lm %d pp %d already reserved\n", lm_cfg->id,
281 				lm_cfg->pingpong);
282 		return false;
283 	}
284 	*pp_idx = idx;
285 
286 	if (!reqs->topology.num_dspp)
287 		return true;
288 
289 	idx = lm_cfg->dspp - DSPP_0;
290 	if (idx < 0 || idx >= ARRAY_SIZE(rm->dspp_blks)) {
291 		DPU_ERROR("failed to get dspp on lm %d\n", lm_cfg->dspp);
292 		return false;
293 	}
294 
295 	if (reserved_by_other(global_state->dspp_to_enc_id, idx, enc_id)) {
296 		DPU_DEBUG("lm %d dspp %d already reserved\n", lm_cfg->id,
297 				lm_cfg->dspp);
298 		return false;
299 	}
300 	*dspp_idx = idx;
301 
302 	return true;
303 }
304 
_dpu_rm_reserve_lms(struct dpu_rm * rm,struct dpu_global_state * global_state,uint32_t enc_id,struct dpu_rm_requirements * reqs)305 static int _dpu_rm_reserve_lms(struct dpu_rm *rm,
306 			       struct dpu_global_state *global_state,
307 			       uint32_t enc_id,
308 			       struct dpu_rm_requirements *reqs)
309 
310 {
311 	int lm_idx[MAX_BLOCKS];
312 	int pp_idx[MAX_BLOCKS];
313 	int dspp_idx[MAX_BLOCKS] = {0};
314 	int i, j, lm_count = 0;
315 
316 	if (!reqs->topology.num_lm) {
317 		DPU_ERROR("invalid number of lm: %d\n", reqs->topology.num_lm);
318 		return -EINVAL;
319 	}
320 
321 	/* Find a primary mixer */
322 	for (i = 0; i < ARRAY_SIZE(rm->mixer_blks) &&
323 			lm_count < reqs->topology.num_lm; i++) {
324 		if (!rm->mixer_blks[i])
325 			continue;
326 
327 		lm_count = 0;
328 		lm_idx[lm_count] = i;
329 
330 		if (!_dpu_rm_check_lm_and_get_connected_blks(rm, global_state,
331 				enc_id, i, &pp_idx[lm_count],
332 				&dspp_idx[lm_count], reqs)) {
333 			continue;
334 		}
335 
336 		++lm_count;
337 
338 		/* Valid primary mixer found, find matching peers */
339 		for (j = i + 1; j < ARRAY_SIZE(rm->mixer_blks) &&
340 				lm_count < reqs->topology.num_lm; j++) {
341 			if (!rm->mixer_blks[j])
342 				continue;
343 
344 			if (!_dpu_rm_check_lm_peer(rm, i, j)) {
345 				DPU_DEBUG("lm %d not peer of lm %d\n", LM_0 + j,
346 						LM_0 + i);
347 				continue;
348 			}
349 
350 			if (!_dpu_rm_check_lm_and_get_connected_blks(rm,
351 					global_state, enc_id, j,
352 					&pp_idx[lm_count], &dspp_idx[lm_count],
353 					reqs)) {
354 				continue;
355 			}
356 
357 			lm_idx[lm_count] = j;
358 			++lm_count;
359 		}
360 	}
361 
362 	if (lm_count != reqs->topology.num_lm) {
363 		DPU_DEBUG("unable to find appropriate mixers\n");
364 		return -ENAVAIL;
365 	}
366 
367 	for (i = 0; i < lm_count; i++) {
368 		global_state->mixer_to_enc_id[lm_idx[i]] = enc_id;
369 		global_state->pingpong_to_enc_id[pp_idx[i]] = enc_id;
370 		global_state->dspp_to_enc_id[dspp_idx[i]] =
371 			reqs->topology.num_dspp ? enc_id : 0;
372 
373 		trace_dpu_rm_reserve_lms(lm_idx[i] + LM_0, enc_id,
374 					 pp_idx[i] + PINGPONG_0);
375 	}
376 
377 	return 0;
378 }
379 
_dpu_rm_reserve_ctls(struct dpu_rm * rm,struct dpu_global_state * global_state,uint32_t enc_id,const struct msm_display_topology * top)380 static int _dpu_rm_reserve_ctls(
381 		struct dpu_rm *rm,
382 		struct dpu_global_state *global_state,
383 		uint32_t enc_id,
384 		const struct msm_display_topology *top)
385 {
386 	int ctl_idx[MAX_BLOCKS];
387 	int i = 0, j, num_ctls;
388 	bool needs_split_display;
389 
390 	/* each hw_intf needs its own hw_ctrl to program its control path */
391 	num_ctls = top->num_intf;
392 
393 	needs_split_display = _dpu_rm_needs_split_display(top);
394 
395 	for (j = 0; j < ARRAY_SIZE(rm->ctl_blks); j++) {
396 		const struct dpu_hw_ctl *ctl;
397 		unsigned long features;
398 		bool has_split_display;
399 
400 		if (!rm->ctl_blks[j])
401 			continue;
402 		if (reserved_by_other(global_state->ctl_to_enc_id, j, enc_id))
403 			continue;
404 
405 		ctl = to_dpu_hw_ctl(rm->ctl_blks[j]);
406 		features = ctl->caps->features;
407 		has_split_display = BIT(DPU_CTL_SPLIT_DISPLAY) & features;
408 
409 		DPU_DEBUG("ctl %d caps 0x%lX\n", rm->ctl_blks[j]->id, features);
410 
411 		if (needs_split_display != has_split_display)
412 			continue;
413 
414 		ctl_idx[i] = j;
415 		DPU_DEBUG("ctl %d match\n", j + CTL_0);
416 
417 		if (++i == num_ctls)
418 			break;
419 
420 	}
421 
422 	if (i != num_ctls)
423 		return -ENAVAIL;
424 
425 	for (i = 0; i < ARRAY_SIZE(ctl_idx) && i < num_ctls; i++) {
426 		global_state->ctl_to_enc_id[ctl_idx[i]] = enc_id;
427 		trace_dpu_rm_reserve_ctls(i + CTL_0, enc_id);
428 	}
429 
430 	return 0;
431 }
432 
_dpu_rm_reserve_intf(struct dpu_rm * rm,struct dpu_global_state * global_state,uint32_t enc_id,uint32_t id)433 static int _dpu_rm_reserve_intf(
434 		struct dpu_rm *rm,
435 		struct dpu_global_state *global_state,
436 		uint32_t enc_id,
437 		uint32_t id)
438 {
439 	int idx = id - INTF_0;
440 
441 	if (idx < 0 || idx >= ARRAY_SIZE(rm->intf_blks)) {
442 		DPU_ERROR("invalid intf id: %d", id);
443 		return -EINVAL;
444 	}
445 
446 	if (!rm->intf_blks[idx]) {
447 		DPU_ERROR("couldn't find intf id %d\n", id);
448 		return -EINVAL;
449 	}
450 
451 	if (reserved_by_other(global_state->intf_to_enc_id, idx, enc_id)) {
452 		DPU_ERROR("intf id %d already reserved\n", id);
453 		return -ENAVAIL;
454 	}
455 
456 	global_state->intf_to_enc_id[idx] = enc_id;
457 	return 0;
458 }
459 
_dpu_rm_reserve_intf_related_hw(struct dpu_rm * rm,struct dpu_global_state * global_state,uint32_t enc_id,struct dpu_encoder_hw_resources * hw_res)460 static int _dpu_rm_reserve_intf_related_hw(
461 		struct dpu_rm *rm,
462 		struct dpu_global_state *global_state,
463 		uint32_t enc_id,
464 		struct dpu_encoder_hw_resources *hw_res)
465 {
466 	int i, ret = 0;
467 	u32 id;
468 
469 	for (i = 0; i < ARRAY_SIZE(hw_res->intfs); i++) {
470 		if (hw_res->intfs[i] == INTF_MODE_NONE)
471 			continue;
472 		id = i + INTF_0;
473 		ret = _dpu_rm_reserve_intf(rm, global_state, enc_id, id);
474 		if (ret)
475 			return ret;
476 	}
477 
478 	return ret;
479 }
480 
_dpu_rm_make_reservation(struct dpu_rm * rm,struct dpu_global_state * global_state,struct drm_encoder * enc,struct dpu_rm_requirements * reqs)481 static int _dpu_rm_make_reservation(
482 		struct dpu_rm *rm,
483 		struct dpu_global_state *global_state,
484 		struct drm_encoder *enc,
485 		struct dpu_rm_requirements *reqs)
486 {
487 	int ret;
488 
489 	ret = _dpu_rm_reserve_lms(rm, global_state, enc->base.id, reqs);
490 	if (ret) {
491 		DPU_ERROR("unable to find appropriate mixers\n");
492 		return ret;
493 	}
494 
495 	ret = _dpu_rm_reserve_ctls(rm, global_state, enc->base.id,
496 				&reqs->topology);
497 	if (ret) {
498 		DPU_ERROR("unable to find appropriate CTL\n");
499 		return ret;
500 	}
501 
502 	ret = _dpu_rm_reserve_intf_related_hw(rm, global_state, enc->base.id,
503 				&reqs->hw_res);
504 	if (ret)
505 		return ret;
506 
507 	return ret;
508 }
509 
_dpu_rm_populate_requirements(struct drm_encoder * enc,struct dpu_rm_requirements * reqs,struct msm_display_topology req_topology)510 static int _dpu_rm_populate_requirements(
511 		struct drm_encoder *enc,
512 		struct dpu_rm_requirements *reqs,
513 		struct msm_display_topology req_topology)
514 {
515 	dpu_encoder_get_hw_resources(enc, &reqs->hw_res);
516 
517 	reqs->topology = req_topology;
518 
519 	DRM_DEBUG_KMS("num_lm: %d num_enc: %d num_intf: %d\n",
520 		      reqs->topology.num_lm, reqs->topology.num_enc,
521 		      reqs->topology.num_intf);
522 
523 	return 0;
524 }
525 
_dpu_rm_clear_mapping(uint32_t * res_mapping,int cnt,uint32_t enc_id)526 static void _dpu_rm_clear_mapping(uint32_t *res_mapping, int cnt,
527 				  uint32_t enc_id)
528 {
529 	int i;
530 
531 	for (i = 0; i < cnt; i++) {
532 		if (res_mapping[i] == enc_id)
533 			res_mapping[i] = 0;
534 	}
535 }
536 
dpu_rm_release(struct dpu_global_state * global_state,struct drm_encoder * enc)537 void dpu_rm_release(struct dpu_global_state *global_state,
538 		    struct drm_encoder *enc)
539 {
540 	_dpu_rm_clear_mapping(global_state->pingpong_to_enc_id,
541 		ARRAY_SIZE(global_state->pingpong_to_enc_id), enc->base.id);
542 	_dpu_rm_clear_mapping(global_state->mixer_to_enc_id,
543 		ARRAY_SIZE(global_state->mixer_to_enc_id), enc->base.id);
544 	_dpu_rm_clear_mapping(global_state->ctl_to_enc_id,
545 		ARRAY_SIZE(global_state->ctl_to_enc_id), enc->base.id);
546 	_dpu_rm_clear_mapping(global_state->intf_to_enc_id,
547 		ARRAY_SIZE(global_state->intf_to_enc_id), enc->base.id);
548 }
549 
dpu_rm_reserve(struct dpu_rm * rm,struct dpu_global_state * global_state,struct drm_encoder * enc,struct drm_crtc_state * crtc_state,struct msm_display_topology topology)550 int dpu_rm_reserve(
551 		struct dpu_rm *rm,
552 		struct dpu_global_state *global_state,
553 		struct drm_encoder *enc,
554 		struct drm_crtc_state *crtc_state,
555 		struct msm_display_topology topology)
556 {
557 	struct dpu_rm_requirements reqs;
558 	int ret;
559 
560 	/* Check if this is just a page-flip */
561 	if (!drm_atomic_crtc_needs_modeset(crtc_state))
562 		return 0;
563 
564 	if (IS_ERR(global_state)) {
565 		DPU_ERROR("failed to global state\n");
566 		return PTR_ERR(global_state);
567 	}
568 
569 	DRM_DEBUG_KMS("reserving hw for enc %d crtc %d\n",
570 		      enc->base.id, crtc_state->crtc->base.id);
571 
572 	ret = _dpu_rm_populate_requirements(enc, &reqs, topology);
573 	if (ret) {
574 		DPU_ERROR("failed to populate hw requirements\n");
575 		return ret;
576 	}
577 
578 	ret = _dpu_rm_make_reservation(rm, global_state, enc, &reqs);
579 	if (ret)
580 		DPU_ERROR("failed to reserve hw resources: %d\n", ret);
581 
582 
583 
584 	return ret;
585 }
586 
dpu_rm_get_assigned_resources(struct dpu_rm * rm,struct dpu_global_state * global_state,uint32_t enc_id,enum dpu_hw_blk_type type,struct dpu_hw_blk ** blks,int blks_size)587 int dpu_rm_get_assigned_resources(struct dpu_rm *rm,
588 	struct dpu_global_state *global_state, uint32_t enc_id,
589 	enum dpu_hw_blk_type type, struct dpu_hw_blk **blks, int blks_size)
590 {
591 	struct dpu_hw_blk **hw_blks;
592 	uint32_t *hw_to_enc_id;
593 	int i, num_blks, max_blks;
594 
595 	switch (type) {
596 	case DPU_HW_BLK_PINGPONG:
597 		hw_blks = rm->pingpong_blks;
598 		hw_to_enc_id = global_state->pingpong_to_enc_id;
599 		max_blks = ARRAY_SIZE(rm->pingpong_blks);
600 		break;
601 	case DPU_HW_BLK_LM:
602 		hw_blks = rm->mixer_blks;
603 		hw_to_enc_id = global_state->mixer_to_enc_id;
604 		max_blks = ARRAY_SIZE(rm->mixer_blks);
605 		break;
606 	case DPU_HW_BLK_CTL:
607 		hw_blks = rm->ctl_blks;
608 		hw_to_enc_id = global_state->ctl_to_enc_id;
609 		max_blks = ARRAY_SIZE(rm->ctl_blks);
610 		break;
611 	case DPU_HW_BLK_INTF:
612 		hw_blks = rm->intf_blks;
613 		hw_to_enc_id = global_state->intf_to_enc_id;
614 		max_blks = ARRAY_SIZE(rm->intf_blks);
615 		break;
616 	case DPU_HW_BLK_DSPP:
617 		hw_blks = rm->dspp_blks;
618 		hw_to_enc_id = global_state->dspp_to_enc_id;
619 		max_blks = ARRAY_SIZE(rm->dspp_blks);
620 		break;
621 	default:
622 		DPU_ERROR("blk type %d not managed by rm\n", type);
623 		return 0;
624 	}
625 
626 	num_blks = 0;
627 	for (i = 0; i < max_blks; i++) {
628 		if (hw_to_enc_id[i] != enc_id)
629 			continue;
630 
631 		if (num_blks == blks_size) {
632 			DPU_ERROR("More than %d resources assigned to enc %d\n",
633 				  blks_size, enc_id);
634 			break;
635 		}
636 		if (!hw_blks[i]) {
637 			DPU_ERROR("Allocated resource %d unavailable to assign to enc %d\n",
638 				  type, enc_id);
639 			break;
640 		}
641 		blks[num_blks++] = hw_blks[i];
642 	}
643 
644 	return num_blks;
645 }
646