• Home
  • Raw
  • Download

Lines Matching refs:s

369 typedef int (*parser_cmd_handler)(struct parser_exec_state *s);
381 FIELD_GET(GENMASK(end, start), cmd_val(s, dword))
504 #define gmadr_dw_number(s) \ argument
505 (s->vgpu->gvt->device_info.gmadr_bytes_in_cmd >> 2)
701 static inline u32 *cmd_ptr(struct parser_exec_state *s, int index) in cmd_ptr() argument
703 return s->ip_va + (index << 2); in cmd_ptr()
706 static inline u32 cmd_val(struct parser_exec_state *s, int index) in cmd_val() argument
708 return *cmd_ptr(s, index); in cmd_val()
711 static void parser_exec_state_dump(struct parser_exec_state *s) in parser_exec_state_dump() argument
718 s->vgpu->id, s->engine->name, in parser_exec_state_dump()
719 s->ring_start, s->ring_start + s->ring_size, in parser_exec_state_dump()
720 s->ring_head, s->ring_tail); in parser_exec_state_dump()
723 s->buf_type == RING_BUFFER_INSTRUCTION ? in parser_exec_state_dump()
725 s->buf_addr_type == GTT_BUFFER ? in parser_exec_state_dump()
726 "GTT" : "PPGTT", s->ip_gma); in parser_exec_state_dump()
728 if (s->ip_va == NULL) { in parser_exec_state_dump()
734 s->ip_va, cmd_val(s, 0), cmd_val(s, 1), in parser_exec_state_dump()
735 cmd_val(s, 2), cmd_val(s, 3)); in parser_exec_state_dump()
737 print_opcode(cmd_val(s, 0), s->engine); in parser_exec_state_dump()
739 s->ip_va = (u32 *)((((u64)s->ip_va) >> 12) << 12); in parser_exec_state_dump()
742 gvt_dbg_cmd("ip_va=%p: ", s->ip_va); in parser_exec_state_dump()
744 gvt_dbg_cmd("%08x ", cmd_val(s, i)); in parser_exec_state_dump()
747 s->ip_va += 8 * sizeof(u32); in parser_exec_state_dump()
752 static inline void update_ip_va(struct parser_exec_state *s) in update_ip_va() argument
756 if (WARN_ON(s->ring_head == s->ring_tail)) in update_ip_va()
759 if (s->buf_type == RING_BUFFER_INSTRUCTION) { in update_ip_va()
760 unsigned long ring_top = s->ring_start + s->ring_size; in update_ip_va()
762 if (s->ring_head > s->ring_tail) { in update_ip_va()
763 if (s->ip_gma >= s->ring_head && s->ip_gma < ring_top) in update_ip_va()
764 len = (s->ip_gma - s->ring_head); in update_ip_va()
765 else if (s->ip_gma >= s->ring_start && in update_ip_va()
766 s->ip_gma <= s->ring_tail) in update_ip_va()
767 len = (ring_top - s->ring_head) + in update_ip_va()
768 (s->ip_gma - s->ring_start); in update_ip_va()
770 len = (s->ip_gma - s->ring_head); in update_ip_va()
772 s->ip_va = s->rb_va + len; in update_ip_va()
774 s->ip_va = s->ret_bb_va; in update_ip_va()
778 static inline int ip_gma_set(struct parser_exec_state *s, in ip_gma_set() argument
783 s->ip_gma = ip_gma; in ip_gma_set()
784 update_ip_va(s); in ip_gma_set()
788 static inline int ip_gma_advance(struct parser_exec_state *s, in ip_gma_advance() argument
791 s->ip_gma += (dw_len << 2); in ip_gma_advance()
793 if (s->buf_type == RING_BUFFER_INSTRUCTION) { in ip_gma_advance()
794 if (s->ip_gma >= s->ring_start + s->ring_size) in ip_gma_advance()
795 s->ip_gma -= s->ring_size; in ip_gma_advance()
796 update_ip_va(s); in ip_gma_advance()
798 s->ip_va += (dw_len << 2); in ip_gma_advance()
813 static inline int cmd_length(struct parser_exec_state *s) in cmd_length() argument
815 return get_cmd_length(s->info, cmd_val(s, 0)); in cmd_length()
819 #define patch_value(s, addr, val) do { \ argument
841 static int force_nonpriv_reg_handler(struct parser_exec_state *s, in force_nonpriv_reg_handler() argument
844 struct intel_gvt *gvt = s->vgpu->gvt; in force_nonpriv_reg_handler()
850 data = cmd_val(s, index + 1); in force_nonpriv_reg_handler()
857 ring_base = s->engine->mmio_base; in force_nonpriv_reg_handler()
864 patch_value(s, cmd_ptr(s, index), nopid); in force_nonpriv_reg_handler()
876 static int mocs_cmd_reg_handler(struct parser_exec_state *s, in mocs_cmd_reg_handler() argument
881 vgpu_vreg(s->vgpu, offset) = cmd_val(s, index + 1); in mocs_cmd_reg_handler()
886 struct parser_exec_state *s) in is_cmd_update_pdps() argument
888 u32 base = s->workload->engine->mmio_base; in is_cmd_update_pdps()
892 static int cmd_pdp_mmio_update_handler(struct parser_exec_state *s, in cmd_pdp_mmio_update_handler() argument
895 struct intel_vgpu *vgpu = s->vgpu; in cmd_pdp_mmio_update_handler()
896 struct intel_vgpu_mm *shadow_mm = s->workload->shadow_mm; in cmd_pdp_mmio_update_handler()
902 pdps[0] = (u64)cmd_val(s, 2) << 32; in cmd_pdp_mmio_update_handler()
903 pdps[0] |= cmd_val(s, 4); in cmd_pdp_mmio_update_handler()
912 &s->workload->lri_shadow_mm); in cmd_pdp_mmio_update_handler()
913 *cmd_ptr(s, 2) = upper_32_bits(mm->ppgtt_mm.shadow_pdps[0]); in cmd_pdp_mmio_update_handler()
914 *cmd_ptr(s, 4) = lower_32_bits(mm->ppgtt_mm.shadow_pdps[0]); in cmd_pdp_mmio_update_handler()
926 static int cmd_reg_handler(struct parser_exec_state *s, in cmd_reg_handler() argument
929 struct intel_vgpu *vgpu = s->vgpu; in cmd_reg_handler()
951 mocs_cmd_reg_handler(s, offset, index)) in cmd_reg_handler()
955 force_nonpriv_reg_handler(s, offset, index, cmd)) in cmd_reg_handler()
961 patch_value(s, cmd_ptr(s, index), VGT_PVINFO_PAGE); in cmd_reg_handler()
964 if (is_cmd_update_pdps(offset, s) && in cmd_reg_handler()
965 cmd_pdp_mmio_update_handler(s, offset, index)) in cmd_reg_handler()
978 if (IS_GEN(s->engine->i915, 9) && in cmd_reg_handler()
981 intel_gvt_hypervisor_read_gpa(s->vgpu, in cmd_reg_handler()
982 s->workload->ring_context_gpa + 12, &ctx_sr_ctl, 4); in cmd_reg_handler()
985 u32 data = cmd_val(s, index + 1); in cmd_reg_handler()
987 if (intel_gvt_mmio_has_mode_mask(s->vgpu->gvt, offset)) in cmd_reg_handler()
998 #define cmd_reg(s, i) \ argument
999 (cmd_val(s, i) & GENMASK(22, 2))
1001 #define cmd_reg_inhibit(s, i) \ argument
1002 (cmd_val(s, i) & GENMASK(22, 18))
1004 #define cmd_gma(s, i) \ argument
1005 (cmd_val(s, i) & GENMASK(31, 2))
1007 #define cmd_gma_hi(s, i) \ argument
1008 (cmd_val(s, i) & GENMASK(15, 0))
1010 static int cmd_handler_lri(struct parser_exec_state *s) in cmd_handler_lri() argument
1013 int cmd_len = cmd_length(s); in cmd_handler_lri()
1016 if (IS_BROADWELL(s->engine->i915) && s->engine->id != RCS0) { in cmd_handler_lri()
1017 if (s->engine->id == BCS0 && in cmd_handler_lri()
1018 cmd_reg(s, i) == i915_mmio_reg_offset(DERRMR)) in cmd_handler_lri()
1021 ret |= cmd_reg_inhibit(s, i) ? -EBADRQC : 0; in cmd_handler_lri()
1025 ret |= cmd_reg_handler(s, cmd_reg(s, i), i, "lri"); in cmd_handler_lri()
1032 static int cmd_handler_lrr(struct parser_exec_state *s) in cmd_handler_lrr() argument
1035 int cmd_len = cmd_length(s); in cmd_handler_lrr()
1038 if (IS_BROADWELL(s->engine->i915)) in cmd_handler_lrr()
1039 ret |= ((cmd_reg_inhibit(s, i) || in cmd_handler_lrr()
1040 (cmd_reg_inhibit(s, i + 1)))) ? in cmd_handler_lrr()
1044 ret |= cmd_reg_handler(s, cmd_reg(s, i), i, "lrr-src"); in cmd_handler_lrr()
1047 ret |= cmd_reg_handler(s, cmd_reg(s, i + 1), i, "lrr-dst"); in cmd_handler_lrr()
1054 static inline int cmd_address_audit(struct parser_exec_state *s,
1057 static int cmd_handler_lrm(struct parser_exec_state *s) in cmd_handler_lrm() argument
1059 struct intel_gvt *gvt = s->vgpu->gvt; in cmd_handler_lrm()
1063 int cmd_len = cmd_length(s); in cmd_handler_lrm()
1066 if (IS_BROADWELL(s->engine->i915)) in cmd_handler_lrm()
1067 ret |= (cmd_reg_inhibit(s, i)) ? -EBADRQC : 0; in cmd_handler_lrm()
1070 ret |= cmd_reg_handler(s, cmd_reg(s, i), i, "lrm"); in cmd_handler_lrm()
1073 if (cmd_val(s, 0) & (1 << 22)) { in cmd_handler_lrm()
1074 gma = cmd_gma(s, i + 1); in cmd_handler_lrm()
1076 gma |= (cmd_gma_hi(s, i + 2)) << 32; in cmd_handler_lrm()
1077 ret |= cmd_address_audit(s, gma, sizeof(u32), false); in cmd_handler_lrm()
1081 i += gmadr_dw_number(s) + 1; in cmd_handler_lrm()
1086 static int cmd_handler_srm(struct parser_exec_state *s) in cmd_handler_srm() argument
1088 int gmadr_bytes = s->vgpu->gvt->device_info.gmadr_bytes_in_cmd; in cmd_handler_srm()
1091 int cmd_len = cmd_length(s); in cmd_handler_srm()
1094 ret |= cmd_reg_handler(s, cmd_reg(s, i), i, "srm"); in cmd_handler_srm()
1097 if (cmd_val(s, 0) & (1 << 22)) { in cmd_handler_srm()
1098 gma = cmd_gma(s, i + 1); in cmd_handler_srm()
1100 gma |= (cmd_gma_hi(s, i + 2)) << 32; in cmd_handler_srm()
1101 ret |= cmd_address_audit(s, gma, sizeof(u32), false); in cmd_handler_srm()
1105 i += gmadr_dw_number(s) + 1; in cmd_handler_srm()
1144 static int cmd_handler_pipe_control(struct parser_exec_state *s) in cmd_handler_pipe_control() argument
1146 int gmadr_bytes = s->vgpu->gvt->device_info.gmadr_bytes_in_cmd; in cmd_handler_pipe_control()
1153 post_sync = (cmd_val(s, 1) & PIPE_CONTROL_POST_SYNC_OP_MASK) >> 14; in cmd_handler_pipe_control()
1156 if (cmd_val(s, 1) & PIPE_CONTROL_MMIO_WRITE) in cmd_handler_pipe_control()
1157 ret = cmd_reg_handler(s, cmd_reg(s, 2), 1, "pipe_ctrl"); in cmd_handler_pipe_control()
1161 ret = cmd_reg_handler(s, 0x2350, 1, "pipe_ctrl"); in cmd_handler_pipe_control()
1163 ret = cmd_reg_handler(s, 0x2358, 1, "pipe_ctrl"); in cmd_handler_pipe_control()
1166 if ((cmd_val(s, 1) & PIPE_CONTROL_GLOBAL_GTT_IVB)) { in cmd_handler_pipe_control()
1167 gma = cmd_val(s, 2) & GENMASK(31, 3); in cmd_handler_pipe_control()
1169 gma |= (cmd_gma_hi(s, 3)) << 32; in cmd_handler_pipe_control()
1171 if (cmd_val(s, 1) & (1 << 21)) in cmd_handler_pipe_control()
1173 ret |= cmd_address_audit(s, gma, sizeof(u64), in cmd_handler_pipe_control()
1178 hws_pga = s->vgpu->hws_pga[s->engine->id]; in cmd_handler_pipe_control()
1180 patch_value(s, cmd_ptr(s, 2), gma); in cmd_handler_pipe_control()
1181 val = cmd_val(s, 1) & (~(1 << 21)); in cmd_handler_pipe_control()
1182 patch_value(s, cmd_ptr(s, 1), val); in cmd_handler_pipe_control()
1191 if (cmd_val(s, 1) & PIPE_CONTROL_NOTIFY) in cmd_handler_pipe_control()
1192 set_bit(cmd_interrupt_events[s->engine->id].pipe_control_notify, in cmd_handler_pipe_control()
1193 s->workload->pending_events); in cmd_handler_pipe_control()
1197 static int cmd_handler_mi_user_interrupt(struct parser_exec_state *s) in cmd_handler_mi_user_interrupt() argument
1199 set_bit(cmd_interrupt_events[s->engine->id].mi_user_interrupt, in cmd_handler_mi_user_interrupt()
1200 s->workload->pending_events); in cmd_handler_mi_user_interrupt()
1201 patch_value(s, cmd_ptr(s, 0), MI_NOOP); in cmd_handler_mi_user_interrupt()
1205 static int cmd_advance_default(struct parser_exec_state *s) in cmd_advance_default() argument
1207 return ip_gma_advance(s, cmd_length(s)); in cmd_advance_default()
1210 static int cmd_handler_mi_batch_buffer_end(struct parser_exec_state *s) in cmd_handler_mi_batch_buffer_end() argument
1214 if (s->buf_type == BATCH_BUFFER_2ND_LEVEL) { in cmd_handler_mi_batch_buffer_end()
1215 s->buf_type = BATCH_BUFFER_INSTRUCTION; in cmd_handler_mi_batch_buffer_end()
1216 ret = ip_gma_set(s, s->ret_ip_gma_bb); in cmd_handler_mi_batch_buffer_end()
1217 s->buf_addr_type = s->saved_buf_addr_type; in cmd_handler_mi_batch_buffer_end()
1219 s->buf_type = RING_BUFFER_INSTRUCTION; in cmd_handler_mi_batch_buffer_end()
1220 s->buf_addr_type = GTT_BUFFER; in cmd_handler_mi_batch_buffer_end()
1221 if (s->ret_ip_gma_ring >= s->ring_start + s->ring_size) in cmd_handler_mi_batch_buffer_end()
1222 s->ret_ip_gma_ring -= s->ring_size; in cmd_handler_mi_batch_buffer_end()
1223 ret = ip_gma_set(s, s->ret_ip_gma_ring); in cmd_handler_mi_batch_buffer_end()
1247 static int gen8_decode_mi_display_flip(struct parser_exec_state *s, in gen8_decode_mi_display_flip() argument
1250 struct drm_i915_private *dev_priv = s->engine->i915; in gen8_decode_mi_display_flip()
1262 dword0 = cmd_val(s, 0); in gen8_decode_mi_display_flip()
1263 dword1 = cmd_val(s, 1); in gen8_decode_mi_display_flip()
1264 dword2 = cmd_val(s, 2); in gen8_decode_mi_display_flip()
1293 static int skl_decode_mi_display_flip(struct parser_exec_state *s, in skl_decode_mi_display_flip() argument
1296 struct drm_i915_private *dev_priv = s->engine->i915; in skl_decode_mi_display_flip()
1297 struct intel_vgpu *vgpu = s->vgpu; in skl_decode_mi_display_flip()
1298 u32 dword0 = cmd_val(s, 0); in skl_decode_mi_display_flip()
1299 u32 dword1 = cmd_val(s, 1); in skl_decode_mi_display_flip()
1300 u32 dword2 = cmd_val(s, 2); in skl_decode_mi_display_flip()
1352 static int gen8_check_mi_display_flip(struct parser_exec_state *s, in gen8_check_mi_display_flip() argument
1360 if (INTEL_GEN(s->engine->i915) >= 9) { in gen8_check_mi_display_flip()
1361 stride = vgpu_vreg_t(s->vgpu, info->stride_reg) & GENMASK(9, 0); in gen8_check_mi_display_flip()
1362 tile = (vgpu_vreg_t(s->vgpu, info->ctrl_reg) & in gen8_check_mi_display_flip()
1365 stride = (vgpu_vreg_t(s->vgpu, info->stride_reg) & in gen8_check_mi_display_flip()
1367 tile = (vgpu_vreg_t(s->vgpu, info->ctrl_reg) & (1 << 10)) >> 10; in gen8_check_mi_display_flip()
1380 struct parser_exec_state *s, in gen8_update_plane_mmio_from_mi_display_flip() argument
1383 struct drm_i915_private *dev_priv = s->engine->i915; in gen8_update_plane_mmio_from_mi_display_flip()
1384 struct intel_vgpu *vgpu = s->vgpu; in gen8_update_plane_mmio_from_mi_display_flip()
1411 static int decode_mi_display_flip(struct parser_exec_state *s, in decode_mi_display_flip() argument
1414 if (IS_BROADWELL(s->engine->i915)) in decode_mi_display_flip()
1415 return gen8_decode_mi_display_flip(s, info); in decode_mi_display_flip()
1416 if (INTEL_GEN(s->engine->i915) >= 9) in decode_mi_display_flip()
1417 return skl_decode_mi_display_flip(s, info); in decode_mi_display_flip()
1422 static int check_mi_display_flip(struct parser_exec_state *s, in check_mi_display_flip() argument
1425 return gen8_check_mi_display_flip(s, info); in check_mi_display_flip()
1429 struct parser_exec_state *s, in update_plane_mmio_from_mi_display_flip() argument
1432 return gen8_update_plane_mmio_from_mi_display_flip(s, info); in update_plane_mmio_from_mi_display_flip()
1435 static int cmd_handler_mi_display_flip(struct parser_exec_state *s) in cmd_handler_mi_display_flip() argument
1438 struct intel_vgpu *vgpu = s->vgpu; in cmd_handler_mi_display_flip()
1441 int len = cmd_length(s); in cmd_handler_mi_display_flip()
1447 ret = gvt_check_valid_cmd_length(cmd_length(s), in cmd_handler_mi_display_flip()
1452 ret = decode_mi_display_flip(s, &info); in cmd_handler_mi_display_flip()
1458 ret = check_mi_display_flip(s, &info); in cmd_handler_mi_display_flip()
1464 ret = update_plane_mmio_from_mi_display_flip(s, &info); in cmd_handler_mi_display_flip()
1471 patch_value(s, cmd_ptr(s, i), MI_NOOP); in cmd_handler_mi_display_flip()
1485 static int cmd_handler_mi_wait_for_event(struct parser_exec_state *s) in cmd_handler_mi_wait_for_event() argument
1487 u32 cmd = cmd_val(s, 0); in cmd_handler_mi_wait_for_event()
1492 patch_value(s, cmd_ptr(s, 0), MI_NOOP); in cmd_handler_mi_wait_for_event()
1496 static unsigned long get_gma_bb_from_cmd(struct parser_exec_state *s, int index) in get_gma_bb_from_cmd() argument
1500 struct intel_vgpu *vgpu = s->vgpu; in get_gma_bb_from_cmd()
1508 gma_low = cmd_val(s, index) & BATCH_BUFFER_ADDR_MASK; in get_gma_bb_from_cmd()
1512 gma_high = cmd_val(s, index + 1) & BATCH_BUFFER_ADDR_HIGH_MASK; in get_gma_bb_from_cmd()
1518 static inline int cmd_address_audit(struct parser_exec_state *s, in cmd_address_audit() argument
1521 struct intel_vgpu *vgpu = s->vgpu; in cmd_address_audit()
1528 s->info->name); in cmd_address_audit()
1546 s->info->name, guest_gma, op_size); in cmd_address_audit()
1549 for (i = 0; i < cmd_length(s); i++) { in cmd_address_audit()
1551 pr_err("\n%08x ", cmd_val(s, i)); in cmd_address_audit()
1553 pr_err("%08x ", cmd_val(s, i)); in cmd_address_audit()
1564 static int cmd_handler_mi_store_data_imm(struct parser_exec_state *s) in cmd_handler_mi_store_data_imm() argument
1566 int gmadr_bytes = s->vgpu->gvt->device_info.gmadr_bytes_in_cmd; in cmd_handler_mi_store_data_imm()
1567 int op_size = (cmd_length(s) - 3) * sizeof(u32); in cmd_handler_mi_store_data_imm()
1568 int core_id = (cmd_val(s, 2) & (1 << 0)) ? 1 : 0; in cmd_handler_mi_store_data_imm()
1574 if (!(cmd_val(s, 0) & (1 << 22))) in cmd_handler_mi_store_data_imm()
1580 ret = gvt_check_valid_cmd_length(cmd_length(s), in cmd_handler_mi_store_data_imm()
1585 gma = cmd_val(s, 2) & GENMASK(31, 2); in cmd_handler_mi_store_data_imm()
1588 gma_low = cmd_val(s, 1) & GENMASK(31, 2); in cmd_handler_mi_store_data_imm()
1589 gma_high = cmd_val(s, 2) & GENMASK(15, 0); in cmd_handler_mi_store_data_imm()
1591 core_id = (cmd_val(s, 1) & (1 << 0)) ? 1 : 0; in cmd_handler_mi_store_data_imm()
1593 ret = cmd_address_audit(s, gma + op_size * core_id, op_size, false); in cmd_handler_mi_store_data_imm()
1597 static inline int unexpected_cmd(struct parser_exec_state *s) in unexpected_cmd() argument
1599 struct intel_vgpu *vgpu = s->vgpu; in unexpected_cmd()
1601 gvt_vgpu_err("Unexpected %s in command buffer!\n", s->info->name); in unexpected_cmd()
1606 static int cmd_handler_mi_semaphore_wait(struct parser_exec_state *s) in cmd_handler_mi_semaphore_wait() argument
1608 return unexpected_cmd(s); in cmd_handler_mi_semaphore_wait()
1611 static int cmd_handler_mi_report_perf_count(struct parser_exec_state *s) in cmd_handler_mi_report_perf_count() argument
1613 return unexpected_cmd(s); in cmd_handler_mi_report_perf_count()
1616 static int cmd_handler_mi_op_2e(struct parser_exec_state *s) in cmd_handler_mi_op_2e() argument
1618 return unexpected_cmd(s); in cmd_handler_mi_op_2e()
1621 static int cmd_handler_mi_op_2f(struct parser_exec_state *s) in cmd_handler_mi_op_2f() argument
1623 int gmadr_bytes = s->vgpu->gvt->device_info.gmadr_bytes_in_cmd; in cmd_handler_mi_op_2f()
1624 int op_size = (1 << ((cmd_val(s, 0) & GENMASK(20, 19)) >> 19)) * in cmd_handler_mi_op_2f()
1630 if (!(cmd_val(s, 0) & (1 << 22))) in cmd_handler_mi_op_2f()
1634 if (cmd_val(s, 0) & BIT(18)) in cmd_handler_mi_op_2f()
1636 ret = gvt_check_valid_cmd_length(cmd_length(s), in cmd_handler_mi_op_2f()
1641 gma = cmd_val(s, 1) & GENMASK(31, 2); in cmd_handler_mi_op_2f()
1643 gma_high = cmd_val(s, 2) & GENMASK(15, 0); in cmd_handler_mi_op_2f()
1646 ret = cmd_address_audit(s, gma, op_size, false); in cmd_handler_mi_op_2f()
1650 static int cmd_handler_mi_store_data_index(struct parser_exec_state *s) in cmd_handler_mi_store_data_index() argument
1652 return unexpected_cmd(s); in cmd_handler_mi_store_data_index()
1655 static int cmd_handler_mi_clflush(struct parser_exec_state *s) in cmd_handler_mi_clflush() argument
1657 return unexpected_cmd(s); in cmd_handler_mi_clflush()
1661 struct parser_exec_state *s) in cmd_handler_mi_conditional_batch_buffer_end() argument
1663 return unexpected_cmd(s); in cmd_handler_mi_conditional_batch_buffer_end()
1666 static int cmd_handler_mi_update_gtt(struct parser_exec_state *s) in cmd_handler_mi_update_gtt() argument
1668 return unexpected_cmd(s); in cmd_handler_mi_update_gtt()
1671 static int cmd_handler_mi_flush_dw(struct parser_exec_state *s) in cmd_handler_mi_flush_dw() argument
1673 int gmadr_bytes = s->vgpu->gvt->device_info.gmadr_bytes_in_cmd; in cmd_handler_mi_flush_dw()
1680 ret = gvt_check_valid_cmd_length(cmd_length(s), in cmd_handler_mi_flush_dw()
1684 ret = gvt_check_valid_cmd_length(cmd_length(s), in cmd_handler_mi_flush_dw()
1690 if (((cmd_val(s, 0) >> 14) & 0x3) && (cmd_val(s, 1) & (1 << 2))) { in cmd_handler_mi_flush_dw()
1691 gma = cmd_val(s, 1) & GENMASK(31, 3); in cmd_handler_mi_flush_dw()
1693 gma |= (cmd_val(s, 2) & GENMASK(15, 0)) << 32; in cmd_handler_mi_flush_dw()
1695 if (cmd_val(s, 0) & (1 << 21)) in cmd_handler_mi_flush_dw()
1697 ret = cmd_address_audit(s, gma, sizeof(u64), index_mode); in cmd_handler_mi_flush_dw()
1701 hws_pga = s->vgpu->hws_pga[s->engine->id]; in cmd_handler_mi_flush_dw()
1703 patch_value(s, cmd_ptr(s, 1), gma); in cmd_handler_mi_flush_dw()
1704 val = cmd_val(s, 0) & (~(1 << 21)); in cmd_handler_mi_flush_dw()
1705 patch_value(s, cmd_ptr(s, 0), val); in cmd_handler_mi_flush_dw()
1709 if ((cmd_val(s, 0) & (1 << 8))) in cmd_handler_mi_flush_dw()
1710 set_bit(cmd_interrupt_events[s->engine->id].mi_flush_dw, in cmd_handler_mi_flush_dw()
1711 s->workload->pending_events); in cmd_handler_mi_flush_dw()
1715 static void addr_type_update_snb(struct parser_exec_state *s) in addr_type_update_snb() argument
1717 if ((s->buf_type == RING_BUFFER_INSTRUCTION) && in addr_type_update_snb()
1718 (BATCH_BUFFER_ADR_SPACE_BIT(cmd_val(s, 0)) == 1)) { in addr_type_update_snb()
1719 s->buf_addr_type = PPGTT_BUFFER; in addr_type_update_snb()
1756 static int batch_buffer_needs_scan(struct parser_exec_state *s) in batch_buffer_needs_scan() argument
1759 if (cmd_val(s, 0) & BIT(8) && in batch_buffer_needs_scan()
1760 !(s->vgpu->scan_nonprivbb & s->engine->mask)) in batch_buffer_needs_scan()
1771 static int find_bb_size(struct parser_exec_state *s, in find_bb_size() argument
1779 struct intel_vgpu *vgpu = s->vgpu; in find_bb_size()
1781 struct intel_vgpu_mm *mm = (s->buf_addr_type == GTT_BUFFER) ? in find_bb_size()
1782 s->vgpu->gtt.ggtt_mm : s->workload->shadow_mm; in find_bb_size()
1788 gma = get_gma_bb_from_cmd(s, 1); in find_bb_size()
1792 cmd = cmd_val(s, 0); in find_bb_size()
1793 info = get_cmd_info(s->vgpu->gvt, cmd, s->engine); in find_bb_size()
1796 cmd, get_opcode(cmd, s->engine), in find_bb_size()
1797 repr_addr_type(s->buf_addr_type), in find_bb_size()
1798 s->engine->name, s->workload); in find_bb_size()
1802 if (copy_gma_to_hva(s->vgpu, mm, in find_bb_size()
1805 info = get_cmd_info(s->vgpu->gvt, cmd, s->engine); in find_bb_size()
1808 cmd, get_opcode(cmd, s->engine), in find_bb_size()
1809 repr_addr_type(s->buf_addr_type), in find_bb_size()
1810 s->engine->name, s->workload); in find_bb_size()
1833 static int audit_bb_end(struct parser_exec_state *s, void *va) in audit_bb_end() argument
1835 struct intel_vgpu *vgpu = s->vgpu; in audit_bb_end()
1839 info = get_cmd_info(s->vgpu->gvt, cmd, s->engine); in audit_bb_end()
1842 cmd, get_opcode(cmd, s->engine), in audit_bb_end()
1843 repr_addr_type(s->buf_addr_type), in audit_bb_end()
1844 s->engine->name, s->workload); in audit_bb_end()
1856 static int perform_bb_shadow(struct parser_exec_state *s) in perform_bb_shadow() argument
1858 struct intel_vgpu *vgpu = s->vgpu; in perform_bb_shadow()
1864 struct intel_vgpu_mm *mm = (s->buf_addr_type == GTT_BUFFER) ? in perform_bb_shadow()
1865 s->vgpu->gtt.ggtt_mm : s->workload->shadow_mm; in perform_bb_shadow()
1869 gma = get_gma_bb_from_cmd(s, 1); in perform_bb_shadow()
1873 ret = find_bb_size(s, &bb_size, &bb_end_cmd_offset); in perform_bb_shadow()
1881 bb->ppgtt = (s->buf_addr_type == GTT_BUFFER) ? false : true; in perform_bb_shadow()
1897 bb->obj = i915_gem_object_create_shmem(s->engine->i915, in perform_bb_shadow()
1911 ret = copy_gma_to_hva(s->vgpu, mm, in perform_bb_shadow()
1920 ret = audit_bb_end(s, bb->va + start_offset + bb_end_cmd_offset); in perform_bb_shadow()
1926 list_add(&bb->list, &s->workload->shadow_bb); in perform_bb_shadow()
1928 bb->bb_start_cmd_va = s->ip_va; in perform_bb_shadow()
1930 if ((s->buf_type == BATCH_BUFFER_INSTRUCTION) && (!s->is_ctx_wa)) in perform_bb_shadow()
1931 bb->bb_offset = s->ip_va - s->rb_va; in perform_bb_shadow()
1943 s->ip_va = bb->va + start_offset; in perform_bb_shadow()
1944 s->ip_gma = gma; in perform_bb_shadow()
1955 static int cmd_handler_mi_batch_buffer_start(struct parser_exec_state *s) in cmd_handler_mi_batch_buffer_start() argument
1959 struct intel_vgpu *vgpu = s->vgpu; in cmd_handler_mi_batch_buffer_start()
1961 if (s->buf_type == BATCH_BUFFER_2ND_LEVEL) { in cmd_handler_mi_batch_buffer_start()
1966 second_level = BATCH_BUFFER_2ND_LEVEL_BIT(cmd_val(s, 0)) == 1; in cmd_handler_mi_batch_buffer_start()
1967 if (second_level && (s->buf_type != BATCH_BUFFER_INSTRUCTION)) { in cmd_handler_mi_batch_buffer_start()
1972 s->saved_buf_addr_type = s->buf_addr_type; in cmd_handler_mi_batch_buffer_start()
1973 addr_type_update_snb(s); in cmd_handler_mi_batch_buffer_start()
1974 if (s->buf_type == RING_BUFFER_INSTRUCTION) { in cmd_handler_mi_batch_buffer_start()
1975 s->ret_ip_gma_ring = s->ip_gma + cmd_length(s) * sizeof(u32); in cmd_handler_mi_batch_buffer_start()
1976 s->buf_type = BATCH_BUFFER_INSTRUCTION; in cmd_handler_mi_batch_buffer_start()
1978 s->buf_type = BATCH_BUFFER_2ND_LEVEL; in cmd_handler_mi_batch_buffer_start()
1979 s->ret_ip_gma_bb = s->ip_gma + cmd_length(s) * sizeof(u32); in cmd_handler_mi_batch_buffer_start()
1980 s->ret_bb_va = s->ip_va + cmd_length(s) * sizeof(u32); in cmd_handler_mi_batch_buffer_start()
1983 if (batch_buffer_needs_scan(s)) { in cmd_handler_mi_batch_buffer_start()
1984 ret = perform_bb_shadow(s); in cmd_handler_mi_batch_buffer_start()
1989 ret = cmd_handler_mi_batch_buffer_end(s); in cmd_handler_mi_batch_buffer_start()
2685 static int cmd_parser_exec(struct parser_exec_state *s) in cmd_parser_exec() argument
2687 struct intel_vgpu *vgpu = s->vgpu; in cmd_parser_exec()
2692 cmd = cmd_val(s, 0); in cmd_parser_exec()
2698 info = get_cmd_info(s->vgpu->gvt, cmd, s->engine); in cmd_parser_exec()
2702 cmd, get_opcode(cmd, s->engine), in cmd_parser_exec()
2703 repr_addr_type(s->buf_addr_type), in cmd_parser_exec()
2704 s->engine->name, s->workload); in cmd_parser_exec()
2708 s->info = info; in cmd_parser_exec()
2710 trace_gvt_command(vgpu->id, s->engine->id, s->ip_gma, s->ip_va, in cmd_parser_exec()
2711 cmd_length(s), s->buf_type, s->buf_addr_type, in cmd_parser_exec()
2712 s->workload, info->name); in cmd_parser_exec()
2715 ret = gvt_check_valid_cmd_length(cmd_length(s), in cmd_parser_exec()
2722 ret = info->handler(s); in cmd_parser_exec()
2730 ret = cmd_advance_default(s); in cmd_parser_exec()
2752 static int command_scan(struct parser_exec_state *s, in command_scan() argument
2759 struct intel_vgpu *vgpu = s->vgpu; in command_scan()
2765 while (s->ip_gma != gma_tail) { in command_scan()
2766 if (s->buf_type == RING_BUFFER_INSTRUCTION) { in command_scan()
2767 if (!(s->ip_gma >= rb_start) || in command_scan()
2768 !(s->ip_gma < gma_bottom)) { in command_scan()
2771 s->ip_gma, rb_start, in command_scan()
2773 parser_exec_state_dump(s); in command_scan()
2776 if (gma_out_of_range(s->ip_gma, gma_head, gma_tail)) { in command_scan()
2779 s->ip_gma, rb_start, in command_scan()
2781 parser_exec_state_dump(s); in command_scan()
2785 ret = cmd_parser_exec(s); in command_scan()
2788 parser_exec_state_dump(s); in command_scan()
2799 struct parser_exec_state s; in scan_workload() local
2810 s.buf_type = RING_BUFFER_INSTRUCTION; in scan_workload()
2811 s.buf_addr_type = GTT_BUFFER; in scan_workload()
2812 s.vgpu = workload->vgpu; in scan_workload()
2813 s.engine = workload->engine; in scan_workload()
2814 s.ring_start = workload->rb_start; in scan_workload()
2815 s.ring_size = _RING_CTL_BUF_SIZE(workload->rb_ctl); in scan_workload()
2816 s.ring_head = gma_head; in scan_workload()
2817 s.ring_tail = gma_tail; in scan_workload()
2818 s.rb_va = workload->shadow_ring_buffer_va; in scan_workload()
2819 s.workload = workload; in scan_workload()
2820 s.is_ctx_wa = false; in scan_workload()
2825 ret = ip_gma_set(&s, gma_head); in scan_workload()
2829 ret = command_scan(&s, workload->rb_head, workload->rb_tail, in scan_workload()
2840 struct parser_exec_state s; in scan_wa_ctx() local
2858 s.buf_type = RING_BUFFER_INSTRUCTION; in scan_wa_ctx()
2859 s.buf_addr_type = GTT_BUFFER; in scan_wa_ctx()
2860 s.vgpu = workload->vgpu; in scan_wa_ctx()
2861 s.engine = workload->engine; in scan_wa_ctx()
2862 s.ring_start = wa_ctx->indirect_ctx.guest_gma; in scan_wa_ctx()
2863 s.ring_size = ring_size; in scan_wa_ctx()
2864 s.ring_head = gma_head; in scan_wa_ctx()
2865 s.ring_tail = gma_tail; in scan_wa_ctx()
2866 s.rb_va = wa_ctx->indirect_ctx.shadow_va; in scan_wa_ctx()
2867 s.workload = workload; in scan_wa_ctx()
2868 s.is_ctx_wa = true; in scan_wa_ctx()
2870 ret = ip_gma_set(&s, gma_head); in scan_wa_ctx()
2874 ret = command_scan(&s, 0, ring_tail, in scan_wa_ctx()
2883 struct intel_vgpu_submission *s = &vgpu->submission; in shadow_workload_ring_buffer() local
2898 if (workload->rb_len > s->ring_scan_buffer_size[workload->engine->id]) { in shadow_workload_ring_buffer()
2902 p = krealloc(s->ring_scan_buffer[workload->engine->id], in shadow_workload_ring_buffer()
2908 s->ring_scan_buffer[workload->engine->id] = p; in shadow_workload_ring_buffer()
2909 s->ring_scan_buffer_size[workload->engine->id] = workload->rb_len; in shadow_workload_ring_buffer()
2912 shadow_ring_buffer_va = s->ring_scan_buffer[workload->engine->id]; in shadow_workload_ring_buffer()