1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
4 */
5
6 #define pr_fmt(fmt) "[drm:%s] " fmt, __func__
7 #include "dpu_kms.h"
8 #include "dpu_hw_lm.h"
9 #include "dpu_hw_ctl.h"
10 #include "dpu_hw_pingpong.h"
11 #include "dpu_hw_intf.h"
12 #include "dpu_hw_dspp.h"
13 #include "dpu_encoder.h"
14 #include "dpu_trace.h"
15
16
reserved_by_other(uint32_t * res_map,int idx,uint32_t enc_id)17 static inline bool reserved_by_other(uint32_t *res_map, int idx,
18 uint32_t enc_id)
19 {
20 return res_map[idx] && res_map[idx] != enc_id;
21 }
22
23 /**
24 * struct dpu_rm_requirements - Reservation requirements parameter bundle
25 * @topology: selected topology for the display
26 * @hw_res: Hardware resources required as reported by the encoders
27 */
28 struct dpu_rm_requirements {
29 struct msm_display_topology topology;
30 struct dpu_encoder_hw_resources hw_res;
31 };
32
dpu_rm_destroy(struct dpu_rm * rm)33 int dpu_rm_destroy(struct dpu_rm *rm)
34 {
35 int i;
36
37 for (i = 0; i < ARRAY_SIZE(rm->pingpong_blks); i++) {
38 struct dpu_hw_pingpong *hw;
39
40 if (rm->pingpong_blks[i]) {
41 hw = to_dpu_hw_pingpong(rm->pingpong_blks[i]);
42 dpu_hw_pingpong_destroy(hw);
43 }
44 }
45 for (i = 0; i < ARRAY_SIZE(rm->mixer_blks); i++) {
46 struct dpu_hw_mixer *hw;
47
48 if (rm->mixer_blks[i]) {
49 hw = to_dpu_hw_mixer(rm->mixer_blks[i]);
50 dpu_hw_lm_destroy(hw);
51 }
52 }
53 for (i = 0; i < ARRAY_SIZE(rm->ctl_blks); i++) {
54 struct dpu_hw_ctl *hw;
55
56 if (rm->ctl_blks[i]) {
57 hw = to_dpu_hw_ctl(rm->ctl_blks[i]);
58 dpu_hw_ctl_destroy(hw);
59 }
60 }
61 for (i = 0; i < ARRAY_SIZE(rm->intf_blks); i++) {
62 struct dpu_hw_intf *hw;
63
64 if (rm->intf_blks[i]) {
65 hw = to_dpu_hw_intf(rm->intf_blks[i]);
66 dpu_hw_intf_destroy(hw);
67 }
68 }
69
70 return 0;
71 }
72
dpu_rm_init(struct dpu_rm * rm,struct dpu_mdss_cfg * cat,void __iomem * mmio)73 int dpu_rm_init(struct dpu_rm *rm,
74 struct dpu_mdss_cfg *cat,
75 void __iomem *mmio)
76 {
77 int rc, i;
78
79 if (!rm || !cat || !mmio) {
80 DPU_ERROR("invalid kms\n");
81 return -EINVAL;
82 }
83
84 /* Clear, setup lists */
85 memset(rm, 0, sizeof(*rm));
86
87 /* Interrogate HW catalog and create tracking items for hw blocks */
88 for (i = 0; i < cat->mixer_count; i++) {
89 struct dpu_hw_mixer *hw;
90 const struct dpu_lm_cfg *lm = &cat->mixer[i];
91
92 if (lm->pingpong == PINGPONG_MAX) {
93 DPU_DEBUG("skip mixer %d without pingpong\n", lm->id);
94 continue;
95 }
96
97 if (lm->id < LM_0 || lm->id >= LM_MAX) {
98 DPU_ERROR("skip mixer %d with invalid id\n", lm->id);
99 continue;
100 }
101 hw = dpu_hw_lm_init(lm->id, mmio, cat);
102 if (IS_ERR_OR_NULL(hw)) {
103 rc = PTR_ERR(hw);
104 DPU_ERROR("failed lm object creation: err %d\n", rc);
105 goto fail;
106 }
107 rm->mixer_blks[lm->id - LM_0] = &hw->base;
108
109 if (!rm->lm_max_width) {
110 rm->lm_max_width = lm->sblk->maxwidth;
111 } else if (rm->lm_max_width != lm->sblk->maxwidth) {
112 /*
113 * Don't expect to have hw where lm max widths differ.
114 * If found, take the min.
115 */
116 DPU_ERROR("unsupported: lm maxwidth differs\n");
117 if (rm->lm_max_width > lm->sblk->maxwidth)
118 rm->lm_max_width = lm->sblk->maxwidth;
119 }
120 }
121
122 for (i = 0; i < cat->pingpong_count; i++) {
123 struct dpu_hw_pingpong *hw;
124 const struct dpu_pingpong_cfg *pp = &cat->pingpong[i];
125
126 if (pp->id < PINGPONG_0 || pp->id >= PINGPONG_MAX) {
127 DPU_ERROR("skip pingpong %d with invalid id\n", pp->id);
128 continue;
129 }
130 hw = dpu_hw_pingpong_init(pp->id, mmio, cat);
131 if (IS_ERR_OR_NULL(hw)) {
132 rc = PTR_ERR(hw);
133 DPU_ERROR("failed pingpong object creation: err %d\n",
134 rc);
135 goto fail;
136 }
137 rm->pingpong_blks[pp->id - PINGPONG_0] = &hw->base;
138 }
139
140 for (i = 0; i < cat->intf_count; i++) {
141 struct dpu_hw_intf *hw;
142 const struct dpu_intf_cfg *intf = &cat->intf[i];
143
144 if (intf->type == INTF_NONE) {
145 DPU_DEBUG("skip intf %d with type none\n", i);
146 continue;
147 }
148 if (intf->id < INTF_0 || intf->id >= INTF_MAX) {
149 DPU_ERROR("skip intf %d with invalid id\n", intf->id);
150 continue;
151 }
152 hw = dpu_hw_intf_init(intf->id, mmio, cat);
153 if (IS_ERR_OR_NULL(hw)) {
154 rc = PTR_ERR(hw);
155 DPU_ERROR("failed intf object creation: err %d\n", rc);
156 goto fail;
157 }
158 rm->intf_blks[intf->id - INTF_0] = &hw->base;
159 }
160
161 for (i = 0; i < cat->ctl_count; i++) {
162 struct dpu_hw_ctl *hw;
163 const struct dpu_ctl_cfg *ctl = &cat->ctl[i];
164
165 if (ctl->id < CTL_0 || ctl->id >= CTL_MAX) {
166 DPU_ERROR("skip ctl %d with invalid id\n", ctl->id);
167 continue;
168 }
169 hw = dpu_hw_ctl_init(ctl->id, mmio, cat);
170 if (IS_ERR_OR_NULL(hw)) {
171 rc = PTR_ERR(hw);
172 DPU_ERROR("failed ctl object creation: err %d\n", rc);
173 goto fail;
174 }
175 rm->ctl_blks[ctl->id - CTL_0] = &hw->base;
176 }
177
178 for (i = 0; i < cat->dspp_count; i++) {
179 struct dpu_hw_dspp *hw;
180 const struct dpu_dspp_cfg *dspp = &cat->dspp[i];
181
182 if (dspp->id < DSPP_0 || dspp->id >= DSPP_MAX) {
183 DPU_ERROR("skip dspp %d with invalid id\n", dspp->id);
184 continue;
185 }
186 hw = dpu_hw_dspp_init(dspp->id, mmio, cat);
187 if (IS_ERR_OR_NULL(hw)) {
188 rc = PTR_ERR(hw);
189 DPU_ERROR("failed dspp object creation: err %d\n", rc);
190 goto fail;
191 }
192 rm->dspp_blks[dspp->id - DSPP_0] = &hw->base;
193 }
194
195 return 0;
196
197 fail:
198 dpu_rm_destroy(rm);
199
200 return rc ? rc : -EFAULT;
201 }
202
_dpu_rm_needs_split_display(const struct msm_display_topology * top)203 static bool _dpu_rm_needs_split_display(const struct msm_display_topology *top)
204 {
205 return top->num_intf > 1;
206 }
207
208 /**
209 * _dpu_rm_check_lm_peer - check if a mixer is a peer of the primary
210 * @rm: dpu resource manager handle
211 * @primary_idx: index of primary mixer in rm->mixer_blks[]
212 * @peer_idx: index of other mixer in rm->mixer_blks[]
213 * @Return: true if rm->mixer_blks[peer_idx] is a peer of
214 * rm->mixer_blks[primary_idx]
215 */
_dpu_rm_check_lm_peer(struct dpu_rm * rm,int primary_idx,int peer_idx)216 static bool _dpu_rm_check_lm_peer(struct dpu_rm *rm, int primary_idx,
217 int peer_idx)
218 {
219 const struct dpu_lm_cfg *prim_lm_cfg;
220 const struct dpu_lm_cfg *peer_cfg;
221
222 prim_lm_cfg = to_dpu_hw_mixer(rm->mixer_blks[primary_idx])->cap;
223 peer_cfg = to_dpu_hw_mixer(rm->mixer_blks[peer_idx])->cap;
224
225 if (!test_bit(peer_cfg->id, &prim_lm_cfg->lm_pair_mask)) {
226 DPU_DEBUG("lm %d not peer of lm %d\n", peer_cfg->id,
227 peer_cfg->id);
228 return false;
229 }
230 return true;
231 }
232
233 /**
234 * _dpu_rm_check_lm_and_get_connected_blks - check if proposed layer mixer meets
235 * proposed use case requirements, incl. hardwired dependent blocks like
236 * pingpong
237 * @rm: dpu resource manager handle
238 * @enc_id: encoder id requesting for allocation
239 * @lm_idx: index of proposed layer mixer in rm->mixer_blks[], function checks
240 * if lm, and all other hardwired blocks connected to the lm (pp) is
241 * available and appropriate
242 * @pp_idx: output parameter, index of pingpong block attached to the layer
243 * mixer in rm->pingpong_blks[].
244 * @dspp_idx: output parameter, index of dspp block attached to the layer
245 * mixer in rm->dspp_blks[].
246 * @reqs: input parameter, rm requirements for HW blocks needed in the
247 * datapath.
248 * @Return: true if lm matches all requirements, false otherwise
249 */
_dpu_rm_check_lm_and_get_connected_blks(struct dpu_rm * rm,struct dpu_global_state * global_state,uint32_t enc_id,int lm_idx,int * pp_idx,int * dspp_idx,struct dpu_rm_requirements * reqs)250 static bool _dpu_rm_check_lm_and_get_connected_blks(struct dpu_rm *rm,
251 struct dpu_global_state *global_state,
252 uint32_t enc_id, int lm_idx, int *pp_idx, int *dspp_idx,
253 struct dpu_rm_requirements *reqs)
254 {
255 const struct dpu_lm_cfg *lm_cfg;
256 int idx;
257
258 /* Already reserved? */
259 if (reserved_by_other(global_state->mixer_to_enc_id, lm_idx, enc_id)) {
260 DPU_DEBUG("lm %d already reserved\n", lm_idx + LM_0);
261 return false;
262 }
263
264 lm_cfg = to_dpu_hw_mixer(rm->mixer_blks[lm_idx])->cap;
265 idx = lm_cfg->pingpong - PINGPONG_0;
266 if (idx < 0 || idx >= ARRAY_SIZE(rm->pingpong_blks)) {
267 DPU_ERROR("failed to get pp on lm %d\n", lm_cfg->pingpong);
268 return false;
269 }
270
271 if (reserved_by_other(global_state->pingpong_to_enc_id, idx, enc_id)) {
272 DPU_DEBUG("lm %d pp %d already reserved\n", lm_cfg->id,
273 lm_cfg->pingpong);
274 return false;
275 }
276 *pp_idx = idx;
277
278 if (!reqs->topology.num_dspp)
279 return true;
280
281 idx = lm_cfg->dspp - DSPP_0;
282 if (idx < 0 || idx >= ARRAY_SIZE(rm->dspp_blks)) {
283 DPU_ERROR("failed to get dspp on lm %d\n", lm_cfg->dspp);
284 return false;
285 }
286
287 if (reserved_by_other(global_state->dspp_to_enc_id, idx, enc_id)) {
288 DPU_DEBUG("lm %d dspp %d already reserved\n", lm_cfg->id,
289 lm_cfg->dspp);
290 return false;
291 }
292 *dspp_idx = idx;
293
294 return true;
295 }
296
_dpu_rm_reserve_lms(struct dpu_rm * rm,struct dpu_global_state * global_state,uint32_t enc_id,struct dpu_rm_requirements * reqs)297 static int _dpu_rm_reserve_lms(struct dpu_rm *rm,
298 struct dpu_global_state *global_state,
299 uint32_t enc_id,
300 struct dpu_rm_requirements *reqs)
301
302 {
303 int lm_idx[MAX_BLOCKS];
304 int pp_idx[MAX_BLOCKS];
305 int dspp_idx[MAX_BLOCKS] = {0};
306 int i, j, lm_count = 0;
307
308 if (!reqs->topology.num_lm) {
309 DPU_ERROR("invalid number of lm: %d\n", reqs->topology.num_lm);
310 return -EINVAL;
311 }
312
313 /* Find a primary mixer */
314 for (i = 0; i < ARRAY_SIZE(rm->mixer_blks) &&
315 lm_count < reqs->topology.num_lm; i++) {
316 if (!rm->mixer_blks[i])
317 continue;
318
319 lm_count = 0;
320 lm_idx[lm_count] = i;
321
322 if (!_dpu_rm_check_lm_and_get_connected_blks(rm, global_state,
323 enc_id, i, &pp_idx[lm_count],
324 &dspp_idx[lm_count], reqs)) {
325 continue;
326 }
327
328 ++lm_count;
329
330 /* Valid primary mixer found, find matching peers */
331 for (j = i + 1; j < ARRAY_SIZE(rm->mixer_blks) &&
332 lm_count < reqs->topology.num_lm; j++) {
333 if (!rm->mixer_blks[j])
334 continue;
335
336 if (!_dpu_rm_check_lm_peer(rm, i, j)) {
337 DPU_DEBUG("lm %d not peer of lm %d\n", LM_0 + j,
338 LM_0 + i);
339 continue;
340 }
341
342 if (!_dpu_rm_check_lm_and_get_connected_blks(rm,
343 global_state, enc_id, j,
344 &pp_idx[lm_count], &dspp_idx[lm_count],
345 reqs)) {
346 continue;
347 }
348
349 lm_idx[lm_count] = j;
350 ++lm_count;
351 }
352 }
353
354 if (lm_count != reqs->topology.num_lm) {
355 DPU_DEBUG("unable to find appropriate mixers\n");
356 return -ENAVAIL;
357 }
358
359 for (i = 0; i < lm_count; i++) {
360 global_state->mixer_to_enc_id[lm_idx[i]] = enc_id;
361 global_state->pingpong_to_enc_id[pp_idx[i]] = enc_id;
362 global_state->dspp_to_enc_id[dspp_idx[i]] =
363 reqs->topology.num_dspp ? enc_id : 0;
364
365 trace_dpu_rm_reserve_lms(lm_idx[i] + LM_0, enc_id,
366 pp_idx[i] + PINGPONG_0);
367 }
368
369 return 0;
370 }
371
_dpu_rm_reserve_ctls(struct dpu_rm * rm,struct dpu_global_state * global_state,uint32_t enc_id,const struct msm_display_topology * top)372 static int _dpu_rm_reserve_ctls(
373 struct dpu_rm *rm,
374 struct dpu_global_state *global_state,
375 uint32_t enc_id,
376 const struct msm_display_topology *top)
377 {
378 int ctl_idx[MAX_BLOCKS];
379 int i = 0, j, num_ctls;
380 bool needs_split_display;
381
382 /* each hw_intf needs its own hw_ctrl to program its control path */
383 num_ctls = top->num_intf;
384
385 needs_split_display = _dpu_rm_needs_split_display(top);
386
387 for (j = 0; j < ARRAY_SIZE(rm->ctl_blks); j++) {
388 const struct dpu_hw_ctl *ctl;
389 unsigned long features;
390 bool has_split_display;
391
392 if (!rm->ctl_blks[j])
393 continue;
394 if (reserved_by_other(global_state->ctl_to_enc_id, j, enc_id))
395 continue;
396
397 ctl = to_dpu_hw_ctl(rm->ctl_blks[j]);
398 features = ctl->caps->features;
399 has_split_display = BIT(DPU_CTL_SPLIT_DISPLAY) & features;
400
401 DPU_DEBUG("ctl %d caps 0x%lX\n", rm->ctl_blks[j]->id, features);
402
403 if (needs_split_display != has_split_display)
404 continue;
405
406 ctl_idx[i] = j;
407 DPU_DEBUG("ctl %d match\n", j + CTL_0);
408
409 if (++i == num_ctls)
410 break;
411
412 }
413
414 if (i != num_ctls)
415 return -ENAVAIL;
416
417 for (i = 0; i < ARRAY_SIZE(ctl_idx) && i < num_ctls; i++) {
418 global_state->ctl_to_enc_id[ctl_idx[i]] = enc_id;
419 trace_dpu_rm_reserve_ctls(i + CTL_0, enc_id);
420 }
421
422 return 0;
423 }
424
_dpu_rm_reserve_intf(struct dpu_rm * rm,struct dpu_global_state * global_state,uint32_t enc_id,uint32_t id)425 static int _dpu_rm_reserve_intf(
426 struct dpu_rm *rm,
427 struct dpu_global_state *global_state,
428 uint32_t enc_id,
429 uint32_t id)
430 {
431 int idx = id - INTF_0;
432
433 if (idx < 0 || idx >= ARRAY_SIZE(rm->intf_blks)) {
434 DPU_ERROR("invalid intf id: %d", id);
435 return -EINVAL;
436 }
437
438 if (!rm->intf_blks[idx]) {
439 DPU_ERROR("couldn't find intf id %d\n", id);
440 return -EINVAL;
441 }
442
443 if (reserved_by_other(global_state->intf_to_enc_id, idx, enc_id)) {
444 DPU_ERROR("intf id %d already reserved\n", id);
445 return -ENAVAIL;
446 }
447
448 global_state->intf_to_enc_id[idx] = enc_id;
449 return 0;
450 }
451
_dpu_rm_reserve_intf_related_hw(struct dpu_rm * rm,struct dpu_global_state * global_state,uint32_t enc_id,struct dpu_encoder_hw_resources * hw_res)452 static int _dpu_rm_reserve_intf_related_hw(
453 struct dpu_rm *rm,
454 struct dpu_global_state *global_state,
455 uint32_t enc_id,
456 struct dpu_encoder_hw_resources *hw_res)
457 {
458 int i, ret = 0;
459 u32 id;
460
461 for (i = 0; i < ARRAY_SIZE(hw_res->intfs); i++) {
462 if (hw_res->intfs[i] == INTF_MODE_NONE)
463 continue;
464 id = i + INTF_0;
465 ret = _dpu_rm_reserve_intf(rm, global_state, enc_id, id);
466 if (ret)
467 return ret;
468 }
469
470 return ret;
471 }
472
_dpu_rm_make_reservation(struct dpu_rm * rm,struct dpu_global_state * global_state,struct drm_encoder * enc,struct dpu_rm_requirements * reqs)473 static int _dpu_rm_make_reservation(
474 struct dpu_rm *rm,
475 struct dpu_global_state *global_state,
476 struct drm_encoder *enc,
477 struct dpu_rm_requirements *reqs)
478 {
479 int ret;
480
481 ret = _dpu_rm_reserve_lms(rm, global_state, enc->base.id, reqs);
482 if (ret) {
483 DPU_ERROR("unable to find appropriate mixers\n");
484 return ret;
485 }
486
487 ret = _dpu_rm_reserve_ctls(rm, global_state, enc->base.id,
488 &reqs->topology);
489 if (ret) {
490 DPU_ERROR("unable to find appropriate CTL\n");
491 return ret;
492 }
493
494 ret = _dpu_rm_reserve_intf_related_hw(rm, global_state, enc->base.id,
495 &reqs->hw_res);
496 if (ret)
497 return ret;
498
499 return ret;
500 }
501
_dpu_rm_populate_requirements(struct drm_encoder * enc,struct dpu_rm_requirements * reqs,struct msm_display_topology req_topology)502 static int _dpu_rm_populate_requirements(
503 struct drm_encoder *enc,
504 struct dpu_rm_requirements *reqs,
505 struct msm_display_topology req_topology)
506 {
507 dpu_encoder_get_hw_resources(enc, &reqs->hw_res);
508
509 reqs->topology = req_topology;
510
511 DRM_DEBUG_KMS("num_lm: %d num_enc: %d num_intf: %d\n",
512 reqs->topology.num_lm, reqs->topology.num_enc,
513 reqs->topology.num_intf);
514
515 return 0;
516 }
517
_dpu_rm_clear_mapping(uint32_t * res_mapping,int cnt,uint32_t enc_id)518 static void _dpu_rm_clear_mapping(uint32_t *res_mapping, int cnt,
519 uint32_t enc_id)
520 {
521 int i;
522
523 for (i = 0; i < cnt; i++) {
524 if (res_mapping[i] == enc_id)
525 res_mapping[i] = 0;
526 }
527 }
528
dpu_rm_release(struct dpu_global_state * global_state,struct drm_encoder * enc)529 void dpu_rm_release(struct dpu_global_state *global_state,
530 struct drm_encoder *enc)
531 {
532 _dpu_rm_clear_mapping(global_state->pingpong_to_enc_id,
533 ARRAY_SIZE(global_state->pingpong_to_enc_id), enc->base.id);
534 _dpu_rm_clear_mapping(global_state->mixer_to_enc_id,
535 ARRAY_SIZE(global_state->mixer_to_enc_id), enc->base.id);
536 _dpu_rm_clear_mapping(global_state->ctl_to_enc_id,
537 ARRAY_SIZE(global_state->ctl_to_enc_id), enc->base.id);
538 _dpu_rm_clear_mapping(global_state->intf_to_enc_id,
539 ARRAY_SIZE(global_state->intf_to_enc_id), enc->base.id);
540 }
541
dpu_rm_reserve(struct dpu_rm * rm,struct dpu_global_state * global_state,struct drm_encoder * enc,struct drm_crtc_state * crtc_state,struct msm_display_topology topology)542 int dpu_rm_reserve(
543 struct dpu_rm *rm,
544 struct dpu_global_state *global_state,
545 struct drm_encoder *enc,
546 struct drm_crtc_state *crtc_state,
547 struct msm_display_topology topology)
548 {
549 struct dpu_rm_requirements reqs;
550 int ret;
551
552 /* Check if this is just a page-flip */
553 if (!drm_atomic_crtc_needs_modeset(crtc_state))
554 return 0;
555
556 if (IS_ERR(global_state)) {
557 DPU_ERROR("failed to global state\n");
558 return PTR_ERR(global_state);
559 }
560
561 DRM_DEBUG_KMS("reserving hw for enc %d crtc %d\n",
562 enc->base.id, crtc_state->crtc->base.id);
563
564 ret = _dpu_rm_populate_requirements(enc, &reqs, topology);
565 if (ret) {
566 DPU_ERROR("failed to populate hw requirements\n");
567 return ret;
568 }
569
570 ret = _dpu_rm_make_reservation(rm, global_state, enc, &reqs);
571 if (ret)
572 DPU_ERROR("failed to reserve hw resources: %d\n", ret);
573
574
575
576 return ret;
577 }
578
dpu_rm_get_assigned_resources(struct dpu_rm * rm,struct dpu_global_state * global_state,uint32_t enc_id,enum dpu_hw_blk_type type,struct dpu_hw_blk ** blks,int blks_size)579 int dpu_rm_get_assigned_resources(struct dpu_rm *rm,
580 struct dpu_global_state *global_state, uint32_t enc_id,
581 enum dpu_hw_blk_type type, struct dpu_hw_blk **blks, int blks_size)
582 {
583 struct dpu_hw_blk **hw_blks;
584 uint32_t *hw_to_enc_id;
585 int i, num_blks, max_blks;
586
587 switch (type) {
588 case DPU_HW_BLK_PINGPONG:
589 hw_blks = rm->pingpong_blks;
590 hw_to_enc_id = global_state->pingpong_to_enc_id;
591 max_blks = ARRAY_SIZE(rm->pingpong_blks);
592 break;
593 case DPU_HW_BLK_LM:
594 hw_blks = rm->mixer_blks;
595 hw_to_enc_id = global_state->mixer_to_enc_id;
596 max_blks = ARRAY_SIZE(rm->mixer_blks);
597 break;
598 case DPU_HW_BLK_CTL:
599 hw_blks = rm->ctl_blks;
600 hw_to_enc_id = global_state->ctl_to_enc_id;
601 max_blks = ARRAY_SIZE(rm->ctl_blks);
602 break;
603 case DPU_HW_BLK_INTF:
604 hw_blks = rm->intf_blks;
605 hw_to_enc_id = global_state->intf_to_enc_id;
606 max_blks = ARRAY_SIZE(rm->intf_blks);
607 break;
608 case DPU_HW_BLK_DSPP:
609 hw_blks = rm->dspp_blks;
610 hw_to_enc_id = global_state->dspp_to_enc_id;
611 max_blks = ARRAY_SIZE(rm->dspp_blks);
612 break;
613 default:
614 DPU_ERROR("blk type %d not managed by rm\n", type);
615 return 0;
616 }
617
618 num_blks = 0;
619 for (i = 0; i < max_blks; i++) {
620 if (hw_to_enc_id[i] != enc_id)
621 continue;
622
623 if (num_blks == blks_size) {
624 DPU_ERROR("More than %d resources assigned to enc %d\n",
625 blks_size, enc_id);
626 break;
627 }
628 blks[num_blks++] = hw_blks[i];
629 }
630
631 return num_blks;
632 }
633