Lines Matching refs:ring_id
42 #define execlist_ring_mmio(gvt, ring_id, offset) \ argument
43 (gvt->dev_priv->engine[ring_id]->mmio_base + (offset))
59 static int ring_id_to_context_switch_event(int ring_id) in ring_id_to_context_switch_event() argument
61 if (WARN_ON(ring_id < RCS || in ring_id_to_context_switch_event()
62 ring_id >= ARRAY_SIZE(context_switch_events))) in ring_id_to_context_switch_event()
65 return context_switch_events[ring_id]; in ring_id_to_context_switch_event()
99 int ring_id = execlist->ring_id; in emulate_execlist_status() local
101 ring_id, _EL_OFFSET_STATUS); in emulate_execlist_status()
134 int ring_id = execlist->ring_id; in emulate_csb_update() local
139 ctx_status_ptr_reg = execlist_ring_mmio(vgpu->gvt, ring_id, in emulate_csb_update()
141 ctx_status_buf_reg = execlist_ring_mmio(vgpu->gvt, ring_id, in emulate_csb_update()
170 ring_id_to_context_switch_event(execlist->ring_id)); in emulate_csb_update()
251 int ring_id = execlist->ring_id; in get_next_execlist_slot() local
252 u32 status_reg = execlist_ring_mmio(vgpu->gvt, ring_id, in get_next_execlist_slot()
402 int ring_id = workload->ring_id; in update_wa_ctx_2_shadow_ctx() local
405 shadow_ctx->engine[ring_id].state->obj; in update_wa_ctx_2_shadow_ctx()
456 int ring_id = workload->ring_id; in prepare_execlist_workload() local
469 return emulate_execlist_schedule_in(&vgpu->execlist[ring_id], ctx); in prepare_execlist_workload()
504 int ring_id = workload->ring_id; in complete_execlist_workload() local
505 struct intel_vgpu_execlist *execlist = &vgpu->execlist[ring_id]; in complete_execlist_workload()
507 struct list_head *next = workload_q_head(vgpu, ring_id)->next; in complete_execlist_workload()
517 if (workload->status || (vgpu->resetting_eng & ENGINE_MASK(ring_id))) { in complete_execlist_workload()
531 clean_workloads(vgpu, ENGINE_MASK(ring_id)); in complete_execlist_workload()
535 if (!list_empty(workload_q_head(vgpu, ring_id))) { in complete_execlist_workload()
618 static int submit_context(struct intel_vgpu *vgpu, int ring_id, in submit_context() argument
622 struct list_head *q = workload_q_head(vgpu, ring_id); in submit_context()
647 gvt_dbg_el("ring id %d cur workload == last\n", ring_id); in submit_context()
657 gvt_dbg_el("ring id %d begin a new workload\n", ring_id); in submit_context()
678 workload->ring_id = ring_id; in submit_context()
691 if (ring_id == RCS) { in submit_context()
708 workload->elsp_dwords = vgpu->execlist[ring_id].elsp_dwords; in submit_context()
711 workload, ring_id, head, tail, start, ctl); in submit_context()
725 if (list_empty(workload_q_head(vgpu, ring_id))) { in submit_context()
737 int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id) in intel_vgpu_submit_execlist() argument
739 struct intel_vgpu_execlist *execlist = &vgpu->execlist[ring_id]; in intel_vgpu_submit_execlist()
764 ret = submit_context(vgpu, ring_id, &desc[i], i == 0); in intel_vgpu_submit_execlist()
779 static void init_vgpu_execlist(struct intel_vgpu *vgpu, int ring_id) in init_vgpu_execlist() argument
781 struct intel_vgpu_execlist *execlist = &vgpu->execlist[ring_id]; in init_vgpu_execlist()
788 execlist->ring_id = ring_id; in init_vgpu_execlist()
792 ctx_status_ptr_reg = execlist_ring_mmio(vgpu->gvt, ring_id, in init_vgpu_execlist()