• Home
  • Raw
  • Download

Lines Matching refs:s

377 typedef int (*parser_cmd_handler)(struct parser_exec_state *s);
389 FIELD_GET(GENMASK(end, start), cmd_val(s, dword))
514 #define gmadr_dw_number(s) \ argument
515 (s->vgpu->gvt->device_info.gmadr_bytes_in_cmd >> 2)
711 static inline u32 *cmd_ptr(struct parser_exec_state *s, int index) in cmd_ptr() argument
713 return s->ip_va + (index << 2); in cmd_ptr()
716 static inline u32 cmd_val(struct parser_exec_state *s, int index) in cmd_val() argument
718 return *cmd_ptr(s, index); in cmd_val()
721 static inline bool is_init_ctx(struct parser_exec_state *s) in is_init_ctx() argument
723 return (s->buf_type == RING_BUFFER_CTX && s->is_init_ctx); in is_init_ctx()
726 static void parser_exec_state_dump(struct parser_exec_state *s) in parser_exec_state_dump() argument
733 s->vgpu->id, s->engine->name, in parser_exec_state_dump()
734 s->ring_start, s->ring_start + s->ring_size, in parser_exec_state_dump()
735 s->ring_head, s->ring_tail); in parser_exec_state_dump()
738 s->buf_type == RING_BUFFER_INSTRUCTION ? in parser_exec_state_dump()
739 "RING_BUFFER" : ((s->buf_type == RING_BUFFER_CTX) ? in parser_exec_state_dump()
741 s->buf_addr_type == GTT_BUFFER ? in parser_exec_state_dump()
742 "GTT" : "PPGTT", s->ip_gma); in parser_exec_state_dump()
744 if (s->ip_va == NULL) { in parser_exec_state_dump()
750 s->ip_va, cmd_val(s, 0), cmd_val(s, 1), in parser_exec_state_dump()
751 cmd_val(s, 2), cmd_val(s, 3)); in parser_exec_state_dump()
753 print_opcode(cmd_val(s, 0), s->engine); in parser_exec_state_dump()
755 s->ip_va = (u32 *)((((u64)s->ip_va) >> 12) << 12); in parser_exec_state_dump()
758 gvt_dbg_cmd("ip_va=%p: ", s->ip_va); in parser_exec_state_dump()
760 gvt_dbg_cmd("%08x ", cmd_val(s, i)); in parser_exec_state_dump()
763 s->ip_va += 8 * sizeof(u32); in parser_exec_state_dump()
768 static inline void update_ip_va(struct parser_exec_state *s) in update_ip_va() argument
772 if (WARN_ON(s->ring_head == s->ring_tail)) in update_ip_va()
775 if (s->buf_type == RING_BUFFER_INSTRUCTION || in update_ip_va()
776 s->buf_type == RING_BUFFER_CTX) { in update_ip_va()
777 unsigned long ring_top = s->ring_start + s->ring_size; in update_ip_va()
779 if (s->ring_head > s->ring_tail) { in update_ip_va()
780 if (s->ip_gma >= s->ring_head && s->ip_gma < ring_top) in update_ip_va()
781 len = (s->ip_gma - s->ring_head); in update_ip_va()
782 else if (s->ip_gma >= s->ring_start && in update_ip_va()
783 s->ip_gma <= s->ring_tail) in update_ip_va()
784 len = (ring_top - s->ring_head) + in update_ip_va()
785 (s->ip_gma - s->ring_start); in update_ip_va()
787 len = (s->ip_gma - s->ring_head); in update_ip_va()
789 s->ip_va = s->rb_va + len; in update_ip_va()
791 s->ip_va = s->ret_bb_va; in update_ip_va()
795 static inline int ip_gma_set(struct parser_exec_state *s, in ip_gma_set() argument
800 s->ip_gma = ip_gma; in ip_gma_set()
801 update_ip_va(s); in ip_gma_set()
805 static inline int ip_gma_advance(struct parser_exec_state *s, in ip_gma_advance() argument
808 s->ip_gma += (dw_len << 2); in ip_gma_advance()
810 if (s->buf_type == RING_BUFFER_INSTRUCTION) { in ip_gma_advance()
811 if (s->ip_gma >= s->ring_start + s->ring_size) in ip_gma_advance()
812 s->ip_gma -= s->ring_size; in ip_gma_advance()
813 update_ip_va(s); in ip_gma_advance()
815 s->ip_va += (dw_len << 2); in ip_gma_advance()
830 static inline int cmd_length(struct parser_exec_state *s) in cmd_length() argument
832 return get_cmd_length(s->info, cmd_val(s, 0)); in cmd_length()
836 #define patch_value(s, addr, val) do { \ argument
847 struct parser_exec_state *s) in is_cmd_update_pdps() argument
849 u32 base = s->workload->engine->mmio_base; in is_cmd_update_pdps()
853 static int cmd_pdp_mmio_update_handler(struct parser_exec_state *s, in cmd_pdp_mmio_update_handler() argument
856 struct intel_vgpu *vgpu = s->vgpu; in cmd_pdp_mmio_update_handler()
857 struct intel_vgpu_mm *shadow_mm = s->workload->shadow_mm; in cmd_pdp_mmio_update_handler()
863 pdps[0] = (u64)cmd_val(s, 2) << 32; in cmd_pdp_mmio_update_handler()
864 pdps[0] |= cmd_val(s, 4); in cmd_pdp_mmio_update_handler()
873 &s->workload->lri_shadow_mm); in cmd_pdp_mmio_update_handler()
874 *cmd_ptr(s, 2) = upper_32_bits(mm->ppgtt_mm.shadow_pdps[0]); in cmd_pdp_mmio_update_handler()
875 *cmd_ptr(s, 4) = lower_32_bits(mm->ppgtt_mm.shadow_pdps[0]); in cmd_pdp_mmio_update_handler()
887 static int cmd_reg_handler(struct parser_exec_state *s, in cmd_reg_handler() argument
890 struct intel_vgpu *vgpu = s->vgpu; in cmd_reg_handler()
901 if (is_init_ctx(s)) { in cmd_reg_handler()
950 vreg = &vgpu_vreg(s->vgpu, offset); in cmd_reg_handler()
952 if (is_cmd_update_pdps(offset, s) && in cmd_reg_handler()
953 cmd_pdp_mmio_update_handler(s, offset, index)) in cmd_reg_handler()
959 patch_value(s, cmd_ptr(s, index), VGT_PVINFO_PAGE); in cmd_reg_handler()
963 *vreg = cmd_val(s, index + 1); in cmd_reg_handler()
971 cmdval = cmd_val(s, index + 1); in cmd_reg_handler()
981 ret = mmio_info->write(s->vgpu, offset, in cmd_reg_handler()
993 patch_value(s, cmd_ptr(s, index+1), cmdval_new); in cmd_reg_handler()
1009 if (GRAPHICS_VER(s->engine->i915) == 9 && in cmd_reg_handler()
1012 intel_gvt_hypervisor_read_gpa(s->vgpu, in cmd_reg_handler()
1013 s->workload->ring_context_gpa + 12, &ctx_sr_ctl, 4); in cmd_reg_handler()
1016 u32 data = cmd_val(s, index + 1); in cmd_reg_handler()
1018 if (intel_gvt_mmio_has_mode_mask(s->vgpu->gvt, offset)) in cmd_reg_handler()
1029 #define cmd_reg(s, i) \ argument
1030 (cmd_val(s, i) & GENMASK(22, 2))
1032 #define cmd_reg_inhibit(s, i) \ argument
1033 (cmd_val(s, i) & GENMASK(22, 18))
1035 #define cmd_gma(s, i) \ argument
1036 (cmd_val(s, i) & GENMASK(31, 2))
1038 #define cmd_gma_hi(s, i) \ argument
1039 (cmd_val(s, i) & GENMASK(15, 0))
1041 static int cmd_handler_lri(struct parser_exec_state *s) in cmd_handler_lri() argument
1044 int cmd_len = cmd_length(s); in cmd_handler_lri()
1047 if (IS_BROADWELL(s->engine->i915) && s->engine->id != RCS0) { in cmd_handler_lri()
1048 if (s->engine->id == BCS0 && in cmd_handler_lri()
1049 cmd_reg(s, i) == i915_mmio_reg_offset(DERRMR)) in cmd_handler_lri()
1052 ret |= cmd_reg_inhibit(s, i) ? -EBADRQC : 0; in cmd_handler_lri()
1056 ret |= cmd_reg_handler(s, cmd_reg(s, i), i, "lri"); in cmd_handler_lri()
1063 static int cmd_handler_lrr(struct parser_exec_state *s) in cmd_handler_lrr() argument
1066 int cmd_len = cmd_length(s); in cmd_handler_lrr()
1069 if (IS_BROADWELL(s->engine->i915)) in cmd_handler_lrr()
1070 ret |= ((cmd_reg_inhibit(s, i) || in cmd_handler_lrr()
1071 (cmd_reg_inhibit(s, i + 1)))) ? in cmd_handler_lrr()
1075 ret |= cmd_reg_handler(s, cmd_reg(s, i), i, "lrr-src"); in cmd_handler_lrr()
1078 ret |= cmd_reg_handler(s, cmd_reg(s, i + 1), i, "lrr-dst"); in cmd_handler_lrr()
1085 static inline int cmd_address_audit(struct parser_exec_state *s,
1088 static int cmd_handler_lrm(struct parser_exec_state *s) in cmd_handler_lrm() argument
1090 struct intel_gvt *gvt = s->vgpu->gvt; in cmd_handler_lrm()
1094 int cmd_len = cmd_length(s); in cmd_handler_lrm()
1097 if (IS_BROADWELL(s->engine->i915)) in cmd_handler_lrm()
1098 ret |= (cmd_reg_inhibit(s, i)) ? -EBADRQC : 0; in cmd_handler_lrm()
1101 ret |= cmd_reg_handler(s, cmd_reg(s, i), i, "lrm"); in cmd_handler_lrm()
1104 if (cmd_val(s, 0) & (1 << 22)) { in cmd_handler_lrm()
1105 gma = cmd_gma(s, i + 1); in cmd_handler_lrm()
1107 gma |= (cmd_gma_hi(s, i + 2)) << 32; in cmd_handler_lrm()
1108 ret |= cmd_address_audit(s, gma, sizeof(u32), false); in cmd_handler_lrm()
1112 i += gmadr_dw_number(s) + 1; in cmd_handler_lrm()
1117 static int cmd_handler_srm(struct parser_exec_state *s) in cmd_handler_srm() argument
1119 int gmadr_bytes = s->vgpu->gvt->device_info.gmadr_bytes_in_cmd; in cmd_handler_srm()
1122 int cmd_len = cmd_length(s); in cmd_handler_srm()
1125 ret |= cmd_reg_handler(s, cmd_reg(s, i), i, "srm"); in cmd_handler_srm()
1128 if (cmd_val(s, 0) & (1 << 22)) { in cmd_handler_srm()
1129 gma = cmd_gma(s, i + 1); in cmd_handler_srm()
1131 gma |= (cmd_gma_hi(s, i + 2)) << 32; in cmd_handler_srm()
1132 ret |= cmd_address_audit(s, gma, sizeof(u32), false); in cmd_handler_srm()
1136 i += gmadr_dw_number(s) + 1; in cmd_handler_srm()
1175 static int cmd_handler_pipe_control(struct parser_exec_state *s) in cmd_handler_pipe_control() argument
1177 int gmadr_bytes = s->vgpu->gvt->device_info.gmadr_bytes_in_cmd; in cmd_handler_pipe_control()
1184 post_sync = (cmd_val(s, 1) & PIPE_CONTROL_POST_SYNC_OP_MASK) >> 14; in cmd_handler_pipe_control()
1187 if (cmd_val(s, 1) & PIPE_CONTROL_MMIO_WRITE) in cmd_handler_pipe_control()
1188 ret = cmd_reg_handler(s, cmd_reg(s, 2), 1, "pipe_ctrl"); in cmd_handler_pipe_control()
1192 ret = cmd_reg_handler(s, 0x2350, 1, "pipe_ctrl"); in cmd_handler_pipe_control()
1194 ret = cmd_reg_handler(s, 0x2358, 1, "pipe_ctrl"); in cmd_handler_pipe_control()
1197 if ((cmd_val(s, 1) & PIPE_CONTROL_GLOBAL_GTT_IVB)) { in cmd_handler_pipe_control()
1198 gma = cmd_val(s, 2) & GENMASK(31, 3); in cmd_handler_pipe_control()
1200 gma |= (cmd_gma_hi(s, 3)) << 32; in cmd_handler_pipe_control()
1202 if (cmd_val(s, 1) & (1 << 21)) in cmd_handler_pipe_control()
1204 ret |= cmd_address_audit(s, gma, sizeof(u64), in cmd_handler_pipe_control()
1209 hws_pga = s->vgpu->hws_pga[s->engine->id]; in cmd_handler_pipe_control()
1211 patch_value(s, cmd_ptr(s, 2), gma); in cmd_handler_pipe_control()
1212 val = cmd_val(s, 1) & (~(1 << 21)); in cmd_handler_pipe_control()
1213 patch_value(s, cmd_ptr(s, 1), val); in cmd_handler_pipe_control()
1222 if (cmd_val(s, 1) & PIPE_CONTROL_NOTIFY) in cmd_handler_pipe_control()
1223 set_bit(cmd_interrupt_events[s->engine->id].pipe_control_notify, in cmd_handler_pipe_control()
1224 s->workload->pending_events); in cmd_handler_pipe_control()
1228 static int cmd_handler_mi_user_interrupt(struct parser_exec_state *s) in cmd_handler_mi_user_interrupt() argument
1230 set_bit(cmd_interrupt_events[s->engine->id].mi_user_interrupt, in cmd_handler_mi_user_interrupt()
1231 s->workload->pending_events); in cmd_handler_mi_user_interrupt()
1232 patch_value(s, cmd_ptr(s, 0), MI_NOOP); in cmd_handler_mi_user_interrupt()
1236 static int cmd_advance_default(struct parser_exec_state *s) in cmd_advance_default() argument
1238 return ip_gma_advance(s, cmd_length(s)); in cmd_advance_default()
1241 static int cmd_handler_mi_batch_buffer_end(struct parser_exec_state *s) in cmd_handler_mi_batch_buffer_end() argument
1245 if (s->buf_type == BATCH_BUFFER_2ND_LEVEL) { in cmd_handler_mi_batch_buffer_end()
1246 s->buf_type = BATCH_BUFFER_INSTRUCTION; in cmd_handler_mi_batch_buffer_end()
1247 ret = ip_gma_set(s, s->ret_ip_gma_bb); in cmd_handler_mi_batch_buffer_end()
1248 s->buf_addr_type = s->saved_buf_addr_type; in cmd_handler_mi_batch_buffer_end()
1249 } else if (s->buf_type == RING_BUFFER_CTX) { in cmd_handler_mi_batch_buffer_end()
1250 ret = ip_gma_set(s, s->ring_tail); in cmd_handler_mi_batch_buffer_end()
1252 s->buf_type = RING_BUFFER_INSTRUCTION; in cmd_handler_mi_batch_buffer_end()
1253 s->buf_addr_type = GTT_BUFFER; in cmd_handler_mi_batch_buffer_end()
1254 if (s->ret_ip_gma_ring >= s->ring_start + s->ring_size) in cmd_handler_mi_batch_buffer_end()
1255 s->ret_ip_gma_ring -= s->ring_size; in cmd_handler_mi_batch_buffer_end()
1256 ret = ip_gma_set(s, s->ret_ip_gma_ring); in cmd_handler_mi_batch_buffer_end()
1280 static int gen8_decode_mi_display_flip(struct parser_exec_state *s, in gen8_decode_mi_display_flip() argument
1283 struct drm_i915_private *dev_priv = s->engine->i915; in gen8_decode_mi_display_flip()
1295 dword0 = cmd_val(s, 0); in gen8_decode_mi_display_flip()
1296 dword1 = cmd_val(s, 1); in gen8_decode_mi_display_flip()
1297 dword2 = cmd_val(s, 2); in gen8_decode_mi_display_flip()
1326 static int skl_decode_mi_display_flip(struct parser_exec_state *s, in skl_decode_mi_display_flip() argument
1329 struct drm_i915_private *dev_priv = s->engine->i915; in skl_decode_mi_display_flip()
1330 struct intel_vgpu *vgpu = s->vgpu; in skl_decode_mi_display_flip()
1331 u32 dword0 = cmd_val(s, 0); in skl_decode_mi_display_flip()
1332 u32 dword1 = cmd_val(s, 1); in skl_decode_mi_display_flip()
1333 u32 dword2 = cmd_val(s, 2); in skl_decode_mi_display_flip()
1385 static int gen8_check_mi_display_flip(struct parser_exec_state *s, in gen8_check_mi_display_flip() argument
1393 if (GRAPHICS_VER(s->engine->i915) >= 9) { in gen8_check_mi_display_flip()
1394 stride = vgpu_vreg_t(s->vgpu, info->stride_reg) & GENMASK(9, 0); in gen8_check_mi_display_flip()
1395 tile = (vgpu_vreg_t(s->vgpu, info->ctrl_reg) & in gen8_check_mi_display_flip()
1398 stride = (vgpu_vreg_t(s->vgpu, info->stride_reg) & in gen8_check_mi_display_flip()
1400 tile = (vgpu_vreg_t(s->vgpu, info->ctrl_reg) & (1 << 10)) >> 10; in gen8_check_mi_display_flip()
1413 struct parser_exec_state *s, in gen8_update_plane_mmio_from_mi_display_flip() argument
1416 struct drm_i915_private *dev_priv = s->engine->i915; in gen8_update_plane_mmio_from_mi_display_flip()
1417 struct intel_vgpu *vgpu = s->vgpu; in gen8_update_plane_mmio_from_mi_display_flip()
1444 static int decode_mi_display_flip(struct parser_exec_state *s, in decode_mi_display_flip() argument
1447 if (IS_BROADWELL(s->engine->i915)) in decode_mi_display_flip()
1448 return gen8_decode_mi_display_flip(s, info); in decode_mi_display_flip()
1449 if (GRAPHICS_VER(s->engine->i915) >= 9) in decode_mi_display_flip()
1450 return skl_decode_mi_display_flip(s, info); in decode_mi_display_flip()
1455 static int check_mi_display_flip(struct parser_exec_state *s, in check_mi_display_flip() argument
1458 return gen8_check_mi_display_flip(s, info); in check_mi_display_flip()
1462 struct parser_exec_state *s, in update_plane_mmio_from_mi_display_flip() argument
1465 return gen8_update_plane_mmio_from_mi_display_flip(s, info); in update_plane_mmio_from_mi_display_flip()
1468 static int cmd_handler_mi_display_flip(struct parser_exec_state *s) in cmd_handler_mi_display_flip() argument
1471 struct intel_vgpu *vgpu = s->vgpu; in cmd_handler_mi_display_flip()
1474 int len = cmd_length(s); in cmd_handler_mi_display_flip()
1480 ret = gvt_check_valid_cmd_length(cmd_length(s), in cmd_handler_mi_display_flip()
1485 ret = decode_mi_display_flip(s, &info); in cmd_handler_mi_display_flip()
1491 ret = check_mi_display_flip(s, &info); in cmd_handler_mi_display_flip()
1497 ret = update_plane_mmio_from_mi_display_flip(s, &info); in cmd_handler_mi_display_flip()
1504 patch_value(s, cmd_ptr(s, i), MI_NOOP); in cmd_handler_mi_display_flip()
1518 static int cmd_handler_mi_wait_for_event(struct parser_exec_state *s) in cmd_handler_mi_wait_for_event() argument
1520 u32 cmd = cmd_val(s, 0); in cmd_handler_mi_wait_for_event()
1525 patch_value(s, cmd_ptr(s, 0), MI_NOOP); in cmd_handler_mi_wait_for_event()
1529 static unsigned long get_gma_bb_from_cmd(struct parser_exec_state *s, int index) in get_gma_bb_from_cmd() argument
1533 struct intel_vgpu *vgpu = s->vgpu; in get_gma_bb_from_cmd()
1541 gma_low = cmd_val(s, index) & BATCH_BUFFER_ADDR_MASK; in get_gma_bb_from_cmd()
1545 gma_high = cmd_val(s, index + 1) & BATCH_BUFFER_ADDR_HIGH_MASK; in get_gma_bb_from_cmd()
1551 static inline int cmd_address_audit(struct parser_exec_state *s, in cmd_address_audit() argument
1554 struct intel_vgpu *vgpu = s->vgpu; in cmd_address_audit()
1561 s->info->name); in cmd_address_audit()
1579 s->info->name, guest_gma, op_size); in cmd_address_audit()
1582 for (i = 0; i < cmd_length(s); i++) { in cmd_address_audit()
1584 pr_err("\n%08x ", cmd_val(s, i)); in cmd_address_audit()
1586 pr_err("%08x ", cmd_val(s, i)); in cmd_address_audit()
1597 static int cmd_handler_mi_store_data_imm(struct parser_exec_state *s) in cmd_handler_mi_store_data_imm() argument
1599 int gmadr_bytes = s->vgpu->gvt->device_info.gmadr_bytes_in_cmd; in cmd_handler_mi_store_data_imm()
1600 int op_size = (cmd_length(s) - 3) * sizeof(u32); in cmd_handler_mi_store_data_imm()
1601 int core_id = (cmd_val(s, 2) & (1 << 0)) ? 1 : 0; in cmd_handler_mi_store_data_imm()
1607 if (!(cmd_val(s, 0) & (1 << 22))) in cmd_handler_mi_store_data_imm()
1613 ret = gvt_check_valid_cmd_length(cmd_length(s), in cmd_handler_mi_store_data_imm()
1618 gma = cmd_val(s, 2) & GENMASK(31, 2); in cmd_handler_mi_store_data_imm()
1621 gma_low = cmd_val(s, 1) & GENMASK(31, 2); in cmd_handler_mi_store_data_imm()
1622 gma_high = cmd_val(s, 2) & GENMASK(15, 0); in cmd_handler_mi_store_data_imm()
1624 core_id = (cmd_val(s, 1) & (1 << 0)) ? 1 : 0; in cmd_handler_mi_store_data_imm()
1626 ret = cmd_address_audit(s, gma + op_size * core_id, op_size, false); in cmd_handler_mi_store_data_imm()
1630 static inline int unexpected_cmd(struct parser_exec_state *s) in unexpected_cmd() argument
1632 struct intel_vgpu *vgpu = s->vgpu; in unexpected_cmd()
1634 gvt_vgpu_err("Unexpected %s in command buffer!\n", s->info->name); in unexpected_cmd()
1639 static int cmd_handler_mi_semaphore_wait(struct parser_exec_state *s) in cmd_handler_mi_semaphore_wait() argument
1641 return unexpected_cmd(s); in cmd_handler_mi_semaphore_wait()
1644 static int cmd_handler_mi_report_perf_count(struct parser_exec_state *s) in cmd_handler_mi_report_perf_count() argument
1646 return unexpected_cmd(s); in cmd_handler_mi_report_perf_count()
1649 static int cmd_handler_mi_op_2e(struct parser_exec_state *s) in cmd_handler_mi_op_2e() argument
1651 return unexpected_cmd(s); in cmd_handler_mi_op_2e()
1654 static int cmd_handler_mi_op_2f(struct parser_exec_state *s) in cmd_handler_mi_op_2f() argument
1656 int gmadr_bytes = s->vgpu->gvt->device_info.gmadr_bytes_in_cmd; in cmd_handler_mi_op_2f()
1657 int op_size = (1 << ((cmd_val(s, 0) & GENMASK(20, 19)) >> 19)) * in cmd_handler_mi_op_2f()
1663 if (!(cmd_val(s, 0) & (1 << 22))) in cmd_handler_mi_op_2f()
1667 if (cmd_val(s, 0) & BIT(18)) in cmd_handler_mi_op_2f()
1669 ret = gvt_check_valid_cmd_length(cmd_length(s), in cmd_handler_mi_op_2f()
1674 gma = cmd_val(s, 1) & GENMASK(31, 2); in cmd_handler_mi_op_2f()
1676 gma_high = cmd_val(s, 2) & GENMASK(15, 0); in cmd_handler_mi_op_2f()
1679 ret = cmd_address_audit(s, gma, op_size, false); in cmd_handler_mi_op_2f()
1683 static int cmd_handler_mi_store_data_index(struct parser_exec_state *s) in cmd_handler_mi_store_data_index() argument
1685 return unexpected_cmd(s); in cmd_handler_mi_store_data_index()
1688 static int cmd_handler_mi_clflush(struct parser_exec_state *s) in cmd_handler_mi_clflush() argument
1690 return unexpected_cmd(s); in cmd_handler_mi_clflush()
1694 struct parser_exec_state *s) in cmd_handler_mi_conditional_batch_buffer_end() argument
1696 return unexpected_cmd(s); in cmd_handler_mi_conditional_batch_buffer_end()
1699 static int cmd_handler_mi_update_gtt(struct parser_exec_state *s) in cmd_handler_mi_update_gtt() argument
1701 return unexpected_cmd(s); in cmd_handler_mi_update_gtt()
1704 static int cmd_handler_mi_flush_dw(struct parser_exec_state *s) in cmd_handler_mi_flush_dw() argument
1706 int gmadr_bytes = s->vgpu->gvt->device_info.gmadr_bytes_in_cmd; in cmd_handler_mi_flush_dw()
1713 ret = gvt_check_valid_cmd_length(cmd_length(s), in cmd_handler_mi_flush_dw()
1717 ret = gvt_check_valid_cmd_length(cmd_length(s), in cmd_handler_mi_flush_dw()
1723 if (((cmd_val(s, 0) >> 14) & 0x3) && (cmd_val(s, 1) & (1 << 2))) { in cmd_handler_mi_flush_dw()
1724 gma = cmd_val(s, 1) & GENMASK(31, 3); in cmd_handler_mi_flush_dw()
1726 gma |= (cmd_val(s, 2) & GENMASK(15, 0)) << 32; in cmd_handler_mi_flush_dw()
1728 if (cmd_val(s, 0) & (1 << 21)) in cmd_handler_mi_flush_dw()
1730 ret = cmd_address_audit(s, gma, sizeof(u64), index_mode); in cmd_handler_mi_flush_dw()
1734 hws_pga = s->vgpu->hws_pga[s->engine->id]; in cmd_handler_mi_flush_dw()
1736 patch_value(s, cmd_ptr(s, 1), gma); in cmd_handler_mi_flush_dw()
1737 val = cmd_val(s, 0) & (~(1 << 21)); in cmd_handler_mi_flush_dw()
1738 patch_value(s, cmd_ptr(s, 0), val); in cmd_handler_mi_flush_dw()
1742 if ((cmd_val(s, 0) & (1 << 8))) in cmd_handler_mi_flush_dw()
1743 set_bit(cmd_interrupt_events[s->engine->id].mi_flush_dw, in cmd_handler_mi_flush_dw()
1744 s->workload->pending_events); in cmd_handler_mi_flush_dw()
1748 static void addr_type_update_snb(struct parser_exec_state *s) in addr_type_update_snb() argument
1750 if ((s->buf_type == RING_BUFFER_INSTRUCTION) && in addr_type_update_snb()
1751 (BATCH_BUFFER_ADR_SPACE_BIT(cmd_val(s, 0)) == 1)) { in addr_type_update_snb()
1752 s->buf_addr_type = PPGTT_BUFFER; in addr_type_update_snb()
1789 static int batch_buffer_needs_scan(struct parser_exec_state *s) in batch_buffer_needs_scan() argument
1792 if (cmd_val(s, 0) & BIT(8) && in batch_buffer_needs_scan()
1793 !(s->vgpu->scan_nonprivbb & s->engine->mask)) in batch_buffer_needs_scan()
1804 static int find_bb_size(struct parser_exec_state *s, in find_bb_size() argument
1812 struct intel_vgpu *vgpu = s->vgpu; in find_bb_size()
1814 struct intel_vgpu_mm *mm = (s->buf_addr_type == GTT_BUFFER) ? in find_bb_size()
1815 s->vgpu->gtt.ggtt_mm : s->workload->shadow_mm; in find_bb_size()
1821 gma = get_gma_bb_from_cmd(s, 1); in find_bb_size()
1825 cmd = cmd_val(s, 0); in find_bb_size()
1826 info = get_cmd_info(s->vgpu->gvt, cmd, s->engine); in find_bb_size()
1829 cmd, get_opcode(cmd, s->engine), in find_bb_size()
1830 repr_addr_type(s->buf_addr_type), in find_bb_size()
1831 s->engine->name, s->workload); in find_bb_size()
1835 if (copy_gma_to_hva(s->vgpu, mm, in find_bb_size()
1838 info = get_cmd_info(s->vgpu->gvt, cmd, s->engine); in find_bb_size()
1841 cmd, get_opcode(cmd, s->engine), in find_bb_size()
1842 repr_addr_type(s->buf_addr_type), in find_bb_size()
1843 s->engine->name, s->workload); in find_bb_size()
1866 static int audit_bb_end(struct parser_exec_state *s, void *va) in audit_bb_end() argument
1868 struct intel_vgpu *vgpu = s->vgpu; in audit_bb_end()
1872 info = get_cmd_info(s->vgpu->gvt, cmd, s->engine); in audit_bb_end()
1875 cmd, get_opcode(cmd, s->engine), in audit_bb_end()
1876 repr_addr_type(s->buf_addr_type), in audit_bb_end()
1877 s->engine->name, s->workload); in audit_bb_end()
1889 static int perform_bb_shadow(struct parser_exec_state *s) in perform_bb_shadow() argument
1891 struct intel_vgpu *vgpu = s->vgpu; in perform_bb_shadow()
1897 struct intel_vgpu_mm *mm = (s->buf_addr_type == GTT_BUFFER) ? in perform_bb_shadow()
1898 s->vgpu->gtt.ggtt_mm : s->workload->shadow_mm; in perform_bb_shadow()
1902 gma = get_gma_bb_from_cmd(s, 1); in perform_bb_shadow()
1906 ret = find_bb_size(s, &bb_size, &bb_end_cmd_offset); in perform_bb_shadow()
1914 bb->ppgtt = (s->buf_addr_type == GTT_BUFFER) ? false : true; in perform_bb_shadow()
1930 bb->obj = i915_gem_object_create_shmem(s->engine->i915, in perform_bb_shadow()
1944 ret = copy_gma_to_hva(s->vgpu, mm, in perform_bb_shadow()
1953 ret = audit_bb_end(s, bb->va + start_offset + bb_end_cmd_offset); in perform_bb_shadow()
1959 list_add(&bb->list, &s->workload->shadow_bb); in perform_bb_shadow()
1961 bb->bb_start_cmd_va = s->ip_va; in perform_bb_shadow()
1963 if ((s->buf_type == BATCH_BUFFER_INSTRUCTION) && (!s->is_ctx_wa)) in perform_bb_shadow()
1964 bb->bb_offset = s->ip_va - s->rb_va; in perform_bb_shadow()
1976 s->ip_va = bb->va + start_offset; in perform_bb_shadow()
1977 s->ip_gma = gma; in perform_bb_shadow()
1988 static int cmd_handler_mi_batch_buffer_start(struct parser_exec_state *s) in cmd_handler_mi_batch_buffer_start() argument
1992 struct intel_vgpu *vgpu = s->vgpu; in cmd_handler_mi_batch_buffer_start()
1994 if (s->buf_type == BATCH_BUFFER_2ND_LEVEL) { in cmd_handler_mi_batch_buffer_start()
1999 second_level = BATCH_BUFFER_2ND_LEVEL_BIT(cmd_val(s, 0)) == 1; in cmd_handler_mi_batch_buffer_start()
2000 if (second_level && (s->buf_type != BATCH_BUFFER_INSTRUCTION)) { in cmd_handler_mi_batch_buffer_start()
2005 s->saved_buf_addr_type = s->buf_addr_type; in cmd_handler_mi_batch_buffer_start()
2006 addr_type_update_snb(s); in cmd_handler_mi_batch_buffer_start()
2007 if (s->buf_type == RING_BUFFER_INSTRUCTION) { in cmd_handler_mi_batch_buffer_start()
2008 s->ret_ip_gma_ring = s->ip_gma + cmd_length(s) * sizeof(u32); in cmd_handler_mi_batch_buffer_start()
2009 s->buf_type = BATCH_BUFFER_INSTRUCTION; in cmd_handler_mi_batch_buffer_start()
2011 s->buf_type = BATCH_BUFFER_2ND_LEVEL; in cmd_handler_mi_batch_buffer_start()
2012 s->ret_ip_gma_bb = s->ip_gma + cmd_length(s) * sizeof(u32); in cmd_handler_mi_batch_buffer_start()
2013 s->ret_bb_va = s->ip_va + cmd_length(s) * sizeof(u32); in cmd_handler_mi_batch_buffer_start()
2016 if (batch_buffer_needs_scan(s)) { in cmd_handler_mi_batch_buffer_start()
2017 ret = perform_bb_shadow(s); in cmd_handler_mi_batch_buffer_start()
2022 ret = cmd_handler_mi_batch_buffer_end(s); in cmd_handler_mi_batch_buffer_start()
2718 static int cmd_parser_exec(struct parser_exec_state *s) in cmd_parser_exec() argument
2720 struct intel_vgpu *vgpu = s->vgpu; in cmd_parser_exec()
2725 cmd = cmd_val(s, 0); in cmd_parser_exec()
2731 info = get_cmd_info(s->vgpu->gvt, cmd, s->engine); in cmd_parser_exec()
2735 cmd, get_opcode(cmd, s->engine), in cmd_parser_exec()
2736 repr_addr_type(s->buf_addr_type), in cmd_parser_exec()
2737 s->engine->name, s->workload); in cmd_parser_exec()
2741 s->info = info; in cmd_parser_exec()
2743 trace_gvt_command(vgpu->id, s->engine->id, s->ip_gma, s->ip_va, in cmd_parser_exec()
2744 cmd_length(s), s->buf_type, s->buf_addr_type, in cmd_parser_exec()
2745 s->workload, info->name); in cmd_parser_exec()
2748 ret = gvt_check_valid_cmd_length(cmd_length(s), in cmd_parser_exec()
2755 ret = info->handler(s); in cmd_parser_exec()
2763 ret = cmd_advance_default(s); in cmd_parser_exec()
2785 static int command_scan(struct parser_exec_state *s, in command_scan() argument
2792 struct intel_vgpu *vgpu = s->vgpu; in command_scan()
2798 while (s->ip_gma != gma_tail) { in command_scan()
2799 if (s->buf_type == RING_BUFFER_INSTRUCTION || in command_scan()
2800 s->buf_type == RING_BUFFER_CTX) { in command_scan()
2801 if (!(s->ip_gma >= rb_start) || in command_scan()
2802 !(s->ip_gma < gma_bottom)) { in command_scan()
2805 s->ip_gma, rb_start, in command_scan()
2807 parser_exec_state_dump(s); in command_scan()
2810 if (gma_out_of_range(s->ip_gma, gma_head, gma_tail)) { in command_scan()
2813 s->ip_gma, rb_start, in command_scan()
2815 parser_exec_state_dump(s); in command_scan()
2819 ret = cmd_parser_exec(s); in command_scan()
2822 parser_exec_state_dump(s); in command_scan()
2833 struct parser_exec_state s; in scan_workload() local
2844 s.buf_type = RING_BUFFER_INSTRUCTION; in scan_workload()
2845 s.buf_addr_type = GTT_BUFFER; in scan_workload()
2846 s.vgpu = workload->vgpu; in scan_workload()
2847 s.engine = workload->engine; in scan_workload()
2848 s.ring_start = workload->rb_start; in scan_workload()
2849 s.ring_size = _RING_CTL_BUF_SIZE(workload->rb_ctl); in scan_workload()
2850 s.ring_head = gma_head; in scan_workload()
2851 s.ring_tail = gma_tail; in scan_workload()
2852 s.rb_va = workload->shadow_ring_buffer_va; in scan_workload()
2853 s.workload = workload; in scan_workload()
2854 s.is_ctx_wa = false; in scan_workload()
2859 ret = ip_gma_set(&s, gma_head); in scan_workload()
2863 ret = command_scan(&s, workload->rb_head, workload->rb_tail, in scan_workload()
2874 struct parser_exec_state s; in scan_wa_ctx() local
2892 s.buf_type = RING_BUFFER_INSTRUCTION; in scan_wa_ctx()
2893 s.buf_addr_type = GTT_BUFFER; in scan_wa_ctx()
2894 s.vgpu = workload->vgpu; in scan_wa_ctx()
2895 s.engine = workload->engine; in scan_wa_ctx()
2896 s.ring_start = wa_ctx->indirect_ctx.guest_gma; in scan_wa_ctx()
2897 s.ring_size = ring_size; in scan_wa_ctx()
2898 s.ring_head = gma_head; in scan_wa_ctx()
2899 s.ring_tail = gma_tail; in scan_wa_ctx()
2900 s.rb_va = wa_ctx->indirect_ctx.shadow_va; in scan_wa_ctx()
2901 s.workload = workload; in scan_wa_ctx()
2902 s.is_ctx_wa = true; in scan_wa_ctx()
2904 ret = ip_gma_set(&s, gma_head); in scan_wa_ctx()
2908 ret = command_scan(&s, 0, ring_tail, in scan_wa_ctx()
2917 struct intel_vgpu_submission *s = &vgpu->submission; in shadow_workload_ring_buffer() local
2932 if (workload->rb_len > s->ring_scan_buffer_size[workload->engine->id]) { in shadow_workload_ring_buffer()
2936 p = krealloc(s->ring_scan_buffer[workload->engine->id], in shadow_workload_ring_buffer()
2942 s->ring_scan_buffer[workload->engine->id] = p; in shadow_workload_ring_buffer()
2943 s->ring_scan_buffer_size[workload->engine->id] = workload->rb_len; in shadow_workload_ring_buffer()
2946 shadow_ring_buffer_va = s->ring_scan_buffer[workload->engine->id]; in shadow_workload_ring_buffer()
3110 struct parser_exec_state s; in intel_gvt_update_reg_whitelist() local
3124 s.buf_type = RING_BUFFER_CTX; in intel_gvt_update_reg_whitelist()
3125 s.buf_addr_type = GTT_BUFFER; in intel_gvt_update_reg_whitelist()
3126 s.vgpu = vgpu; in intel_gvt_update_reg_whitelist()
3127 s.engine = engine; in intel_gvt_update_reg_whitelist()
3128 s.ring_start = 0; in intel_gvt_update_reg_whitelist()
3129 s.ring_size = engine->context_size - start; in intel_gvt_update_reg_whitelist()
3130 s.ring_head = 0; in intel_gvt_update_reg_whitelist()
3131 s.ring_tail = s.ring_size; in intel_gvt_update_reg_whitelist()
3132 s.rb_va = vaddr + start; in intel_gvt_update_reg_whitelist()
3133 s.workload = NULL; in intel_gvt_update_reg_whitelist()
3134 s.is_ctx_wa = false; in intel_gvt_update_reg_whitelist()
3135 s.is_init_ctx = true; in intel_gvt_update_reg_whitelist()
3138 ret = ip_gma_set(&s, RING_CTX_SIZE); in intel_gvt_update_reg_whitelist()
3140 ret = command_scan(&s, 0, s.ring_size, 0, s.ring_size); in intel_gvt_update_reg_whitelist()
3157 struct parser_exec_state s; in intel_gvt_scan_engine_context() local
3176 s.buf_type = RING_BUFFER_CTX; in intel_gvt_scan_engine_context()
3177 s.buf_addr_type = GTT_BUFFER; in intel_gvt_scan_engine_context()
3178 s.vgpu = workload->vgpu; in intel_gvt_scan_engine_context()
3179 s.engine = workload->engine; in intel_gvt_scan_engine_context()
3180 s.ring_start = gma_start; in intel_gvt_scan_engine_context()
3181 s.ring_size = ctx_size; in intel_gvt_scan_engine_context()
3182 s.ring_head = gma_start + gma_head; in intel_gvt_scan_engine_context()
3183 s.ring_tail = gma_start + gma_tail; in intel_gvt_scan_engine_context()
3184 s.rb_va = ce->lrc_reg_state; in intel_gvt_scan_engine_context()
3185 s.workload = workload; in intel_gvt_scan_engine_context()
3186 s.is_ctx_wa = false; in intel_gvt_scan_engine_context()
3187 s.is_init_ctx = false; in intel_gvt_scan_engine_context()
3192 ret = ip_gma_set(&s, gma_start + gma_head + RING_CTX_SIZE); in intel_gvt_scan_engine_context()
3196 ret = command_scan(&s, gma_head, gma_tail, in intel_gvt_scan_engine_context()