• Home
  • Raw
  • Download

Lines Matching refs:ctl_mgr

75 struct mdp5_kms *get_kms(struct mdp5_ctl_manager *ctl_mgr)  in get_kms()  argument
77 struct msm_drm_private *priv = ctl_mgr->dev->dev_private; in get_kms()
253 struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm; in mdp5_ctl_set_cursor() local
259 DRM_DEV_ERROR(ctl_mgr->dev->dev, "CTL %d cannot find LM", in mdp5_ctl_set_cursor()
265 DRM_DEV_ERROR(ctl_mgr->dev->dev, "unsupported configuration"); in mdp5_ctl_set_cursor()
334 struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm; in mdp5_ctl_reset_blend_regs() local
339 for (i = 0; i < ctl_mgr->nlm; i++) { in mdp5_ctl_reset_blend_regs()
475 struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm; in fix_sw_flush() local
478 (!(ctl_mgr->flush_hw_mask & bit) && (flush_mask & bit)) in fix_sw_flush()
490 struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm; in fix_for_single_flush() local
495 ctl_mgr->single_flush_pending_mask |= (*flush_mask); in fix_for_single_flush()
500 *flush_mask = ctl_mgr->single_flush_pending_mask; in fix_for_single_flush()
504 ctl_mgr->single_flush_pending_mask = 0; in fix_for_single_flush()
539 struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm; in mdp5_ctl_commit() local
553 flush_mask &= ctl_mgr->flush_hw_mask; in mdp5_ctl_commit()
595 struct mdp5_ctl_manager *ctl_mgr = ctlx->ctlm; in mdp5_ctl_pair() local
596 struct mdp5_kms *mdp5_kms = get_kms(ctl_mgr); in mdp5_ctl_pair()
599 if (!ctl_mgr->single_flush_supported) in mdp5_ctl_pair()
608 DRM_DEV_ERROR(ctl_mgr->dev->dev, "CTLs already paired\n"); in mdp5_ctl_pair()
611 DRM_DEV_ERROR(ctl_mgr->dev->dev, "Only pair booked CTLs\n"); in mdp5_ctl_pair()
632 struct mdp5_ctl *mdp5_ctlm_request(struct mdp5_ctl_manager *ctl_mgr, in mdp5_ctlm_request() argument
641 spin_lock_irqsave(&ctl_mgr->pool_lock, flags); in mdp5_ctlm_request()
644 for (c = 0; c < ctl_mgr->nctl; c++) in mdp5_ctlm_request()
645 if ((ctl_mgr->ctls[c].status & checkm) == match) in mdp5_ctlm_request()
648 dev_warn(ctl_mgr->dev->dev, in mdp5_ctlm_request()
652 for (c = 0; c < ctl_mgr->nctl; c++) in mdp5_ctlm_request()
653 if ((ctl_mgr->ctls[c].status & checkm) == match) in mdp5_ctlm_request()
656 DRM_DEV_ERROR(ctl_mgr->dev->dev, "No more CTL available!"); in mdp5_ctlm_request()
660 ctl = &ctl_mgr->ctls[c]; in mdp5_ctlm_request()
666 spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags); in mdp5_ctlm_request()
670 void mdp5_ctlm_hw_reset(struct mdp5_ctl_manager *ctl_mgr) in mdp5_ctlm_hw_reset() argument
675 for (c = 0; c < ctl_mgr->nctl; c++) { in mdp5_ctlm_hw_reset()
676 struct mdp5_ctl *ctl = &ctl_mgr->ctls[c]; in mdp5_ctlm_hw_reset()
684 void mdp5_ctlm_destroy(struct mdp5_ctl_manager *ctl_mgr) in mdp5_ctlm_destroy() argument
686 kfree(ctl_mgr); in mdp5_ctlm_destroy()
692 struct mdp5_ctl_manager *ctl_mgr; in mdp5_ctlm_init() local
700 ctl_mgr = kzalloc(sizeof(*ctl_mgr), GFP_KERNEL); in mdp5_ctlm_init()
701 if (!ctl_mgr) { in mdp5_ctlm_init()
715 ctl_mgr->dev = dev; in mdp5_ctlm_init()
716 ctl_mgr->nlm = hw_cfg->lm.count; in mdp5_ctlm_init()
717 ctl_mgr->nctl = ctl_cfg->count; in mdp5_ctlm_init()
718 ctl_mgr->flush_hw_mask = ctl_cfg->flush_hw_mask; in mdp5_ctlm_init()
719 spin_lock_init(&ctl_mgr->pool_lock); in mdp5_ctlm_init()
722 spin_lock_irqsave(&ctl_mgr->pool_lock, flags); in mdp5_ctlm_init()
723 for (c = 0; c < ctl_mgr->nctl; c++) { in mdp5_ctlm_init()
724 struct mdp5_ctl *ctl = &ctl_mgr->ctls[c]; in mdp5_ctlm_init()
729 spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags); in mdp5_ctlm_init()
732 ctl->ctlm = ctl_mgr; in mdp5_ctlm_init()
749 ctl_mgr->single_flush_supported = true; in mdp5_ctlm_init()
751 ctl_mgr->ctls[0].status |= CTL_STAT_BOOKED; in mdp5_ctlm_init()
752 ctl_mgr->ctls[1].status |= CTL_STAT_BOOKED; in mdp5_ctlm_init()
754 spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags); in mdp5_ctlm_init()
755 DBG("Pool of %d CTLs created.", ctl_mgr->nctl); in mdp5_ctlm_init()
757 return ctl_mgr; in mdp5_ctlm_init()
760 if (ctl_mgr) in mdp5_ctlm_init()
761 mdp5_ctlm_destroy(ctl_mgr); in mdp5_ctlm_init()