Lines Matching +full:native +full:- +full:mode
20 #include <misc/cxl-base.h>
32 spin_lock(&afu->afu_cntl_lock); in afu_control()
43 dev_warn(&afu->dev, "WARNING: AFU control timed out!\n"); in afu_control()
44 rc = -EBUSY; in afu_control()
48 if (!cxl_ops->link_ok(afu->adapter, afu)) { in afu_control()
49 afu->enabled = enabled; in afu_control()
50 rc = -EIO; in afu_control()
70 afu->enabled = enabled; in afu_control()
73 spin_unlock(&afu->afu_cntl_lock); in afu_control()
110 * Re-enable any masked interrupts when the AFU is not in native_afu_reset()
112 * in dedicated mode. in native_afu_reset()
114 if (afu->current_mode == 0) { in native_afu_reset()
125 if (!cxl_ops->link_ok(afu->adapter, afu)) { in native_afu_check_and_enable()
127 return -EIO; in native_afu_check_and_enable()
129 if (afu->enabled) in native_afu_check_and_enable()
153 if (!cxl_ops->link_ok(afu->adapter, afu)) { in cxl_psl_purge()
154 dev_warn(&afu->dev, "PSL Purge called with link down, ignoring\n"); in cxl_psl_purge()
155 rc = -EIO; in cxl_psl_purge()
171 dev_warn(&afu->dev, "WARNING: PSL Purge timed out!\n"); in cxl_psl_purge()
172 rc = -EBUSY; in cxl_psl_purge()
175 if (!cxl_ops->link_ok(afu->adapter, afu)) { in cxl_psl_purge()
176 rc = -EIO; in cxl_psl_purge()
186 …dev_notice(&afu->dev, "PSL purge terminating pending translation, DSISR: 0x%016llx, DAR: 0x%016llx… in cxl_psl_purge()
190 … dev_notice(&afu->dev, "PSL purge acknowledging pending non-translation fault, DSISR: 0x%016llx\n", in cxl_psl_purge()
199 pr_devel("PSL purged in %lld ns\n", end - start); in cxl_psl_purge()
213 * Most of that junk is really just an overly-complicated way of saying in spa_max_procs()
217 * end_of_PSL_queue_area = SPA_Base + ((n+4) * 128) + (n*8) - 1 in spa_max_procs()
223 return ((spa_size / 8) - 96) / 17; in spa_max_procs()
226 static int cxl_alloc_spa(struct cxl_afu *afu, int mode) in cxl_alloc_spa() argument
231 afu->native->spa_order = -1; in cxl_alloc_spa()
233 afu->native->spa_order++; in cxl_alloc_spa()
234 spa_size = (1 << afu->native->spa_order) * PAGE_SIZE; in cxl_alloc_spa()
237 dev_warn(&afu->dev, "num_of_processes too large for the SPA, limiting to %i (0x%x)\n", in cxl_alloc_spa()
238 afu->native->spa_max_procs, afu->native->spa_size); in cxl_alloc_spa()
239 if (mode != CXL_MODE_DEDICATED) in cxl_alloc_spa()
240 afu->num_procs = afu->native->spa_max_procs; in cxl_alloc_spa()
244 afu->native->spa_size = spa_size; in cxl_alloc_spa()
245 afu->native->spa_max_procs = spa_max_procs(afu->native->spa_size); in cxl_alloc_spa()
246 } while (afu->native->spa_max_procs < afu->num_procs); in cxl_alloc_spa()
248 if (!(afu->native->spa = (struct cxl_process_element *) in cxl_alloc_spa()
249 __get_free_pages(GFP_KERNEL | __GFP_ZERO, afu->native->spa_order))) { in cxl_alloc_spa()
251 return -ENOMEM; in cxl_alloc_spa()
253 pr_devel("spa pages: %i afu->spa_max_procs: %i afu->num_procs: %i\n", in cxl_alloc_spa()
254 1<<afu->native->spa_order, afu->native->spa_max_procs, afu->num_procs); in cxl_alloc_spa()
263 afu->native->sw_command_status = (__be64 *)((char *)afu->native->spa + in attach_spa()
264 ((afu->native->spa_max_procs + 3) * 128)); in attach_spa()
266 spap = virt_to_phys(afu->native->spa) & CXL_PSL_SPAP_Addr; in attach_spa()
267 spap |= ((afu->native->spa_size >> (12 - CXL_PSL_SPAP_Size_Shift)) - 1) & CXL_PSL_SPAP_Size; in attach_spa()
270 afu->native->spa, afu->native->spa_max_procs, in attach_spa()
271 afu->native->sw_command_status, spap); in attach_spa()
282 if (afu->native->spa) { in cxl_release_spa()
283 free_pages((unsigned long) afu->native->spa, in cxl_release_spa()
284 afu->native->spa_order); in cxl_release_spa()
285 afu->native->spa = NULL; in cxl_release_spa()
298 pr_devel("CXL adapter - invalidation of all ERAT entries\n"); in cxl_invalidate_all_psl9()
308 dev_warn(&adapter->dev, in cxl_invalidate_all_psl9()
310 return -EBUSY; in cxl_invalidate_all_psl9()
312 if (!cxl_ops->link_ok(adapter, NULL)) in cxl_invalidate_all_psl9()
313 return -EIO; in cxl_invalidate_all_psl9()
330 dev_warn(&adapter->dev, "WARNING: CXL adapter wide TLBIA timed out!\n"); in cxl_invalidate_all_psl8()
331 return -EBUSY; in cxl_invalidate_all_psl8()
333 if (!cxl_ops->link_ok(adapter, NULL)) in cxl_invalidate_all_psl8()
334 return -EIO; in cxl_invalidate_all_psl8()
341 dev_warn(&adapter->dev, "WARNING: CXL adapter wide SLBIA timed out!\n"); in cxl_invalidate_all_psl8()
342 return -EBUSY; in cxl_invalidate_all_psl8()
344 if (!cxl_ops->link_ok(adapter, NULL)) in cxl_invalidate_all_psl8()
345 return -EIO; in cxl_invalidate_all_psl8()
361 if (adapter->native->no_data_cache) { in cxl_data_cache_flush()
374 dev_warn(&adapter->dev, "WARNING: cache flush timed out!\n"); in cxl_data_cache_flush()
375 return -EBUSY; in cxl_data_cache_flush()
378 if (!cxl_ops->link_ok(adapter, NULL)) { in cxl_data_cache_flush()
379 dev_warn(&adapter->dev, "WARNING: link down when flushing cache\n"); in cxl_data_cache_flush()
380 return -EIO; in cxl_data_cache_flush()
414 struct cxl *adapter = ctx->afu->adapter; in slb_invalid()
417 WARN_ON(!mutex_is_locked(&ctx->afu->native->spa_mutex)); in slb_invalid()
420 ((u64)be32_to_cpu(ctx->elem->common.pid) << 32) | in slb_invalid()
421 be32_to_cpu(ctx->elem->lpid)); in slb_invalid()
425 if (!cxl_ops->link_ok(adapter, NULL)) in slb_invalid()
443 WARN_ON(!ctx->afu->enabled); in do_process_element_cmd()
445 ctx->elem->software_state = cpu_to_be32(pe_state); in do_process_element_cmd()
447 *(ctx->afu->native->sw_command_status) = cpu_to_be64(cmd | 0 | ctx->pe); in do_process_element_cmd()
449 cxl_p1n_write(ctx->afu, CXL_PSL_LLCMD_An, cmd | ctx->pe); in do_process_element_cmd()
452 dev_warn(&ctx->afu->dev, "WARNING: Process Element Command timed out!\n"); in do_process_element_cmd()
453 rc = -EBUSY; in do_process_element_cmd()
456 if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu)) { in do_process_element_cmd()
457 dev_warn(&ctx->afu->dev, "WARNING: Device link down, aborting Process Element Command!\n"); in do_process_element_cmd()
458 rc = -EIO; in do_process_element_cmd()
461 state = be64_to_cpup(ctx->afu->native->sw_command_status); in do_process_element_cmd()
464 rc = -1; in do_process_element_cmd()
468 (cmd | (cmd >> 16) | ctx->pe)) in do_process_element_cmd()
489 mutex_lock(&ctx->afu->native->spa_mutex); in add_process_element()
490 pr_devel("%s Adding pe: %i started\n", __func__, ctx->pe); in add_process_element()
492 ctx->pe_inserted = true; in add_process_element()
493 pr_devel("%s Adding pe: %i finished\n", __func__, ctx->pe); in add_process_element()
494 mutex_unlock(&ctx->afu->native->spa_mutex); in add_process_element()
503 if (!(ctx->elem->software_state & cpu_to_be32(CXL_PE_SOFTWARE_STATE_V))) in terminate_process_element()
506 mutex_lock(&ctx->afu->native->spa_mutex); in terminate_process_element()
507 pr_devel("%s Terminate pe: %i started\n", __func__, ctx->pe); in terminate_process_element()
512 if (cxl_ops->link_ok(ctx->afu->adapter, ctx->afu)) in terminate_process_element()
515 ctx->elem->software_state = 0; /* Remove Valid bit */ in terminate_process_element()
516 pr_devel("%s Terminate pe: %i finished\n", __func__, ctx->pe); in terminate_process_element()
517 mutex_unlock(&ctx->afu->native->spa_mutex); in terminate_process_element()
525 mutex_lock(&ctx->afu->native->spa_mutex); in remove_process_element()
526 pr_devel("%s Remove pe: %i started\n", __func__, ctx->pe); in remove_process_element()
531 if (cxl_ops->link_ok(ctx->afu->adapter, ctx->afu)) in remove_process_element()
535 ctx->pe_inserted = false; in remove_process_element()
538 pr_devel("%s Remove pe: %i finished\n", __func__, ctx->pe); in remove_process_element()
539 mutex_unlock(&ctx->afu->native->spa_mutex); in remove_process_element()
546 if (!ctx->afu->pp_size || ctx->master) { in cxl_assign_psn_space()
547 ctx->psn_phys = ctx->afu->psn_phys; in cxl_assign_psn_space()
548 ctx->psn_size = ctx->afu->adapter->ps_size; in cxl_assign_psn_space()
550 ctx->psn_phys = ctx->afu->psn_phys + in cxl_assign_psn_space()
551 (ctx->afu->native->pp_offset + ctx->afu->pp_size * ctx->pe); in cxl_assign_psn_space()
552 ctx->psn_size = ctx->afu->pp_size; in cxl_assign_psn_space()
560 dev_info(&afu->dev, "Activating AFU directed mode\n"); in activate_afu_directed()
562 afu->num_procs = afu->max_procs_virtualised; in activate_afu_directed()
563 if (afu->native->spa == NULL) { in activate_afu_directed()
565 return -ENOMEM; in activate_afu_directed()
574 afu->current_mode = CXL_MODE_DIRECTED; in activate_afu_directed()
633 return cxl_calculate_sr(ctx->master, ctx->kernel, false, in calculate_sr()
639 bool need_update = (ctx->status == STARTED); in update_ivtes_directed()
648 ctx->elem->ivte_offsets[r] = cpu_to_be16(ctx->irqs.offset[r]); in update_ivtes_directed()
649 ctx->elem->ivte_ranges[r] = cpu_to_be16(ctx->irqs.range[r]); in update_ivtes_directed()
673 ctx->elem->ctxtime = 0; /* disable */ in process_element_entry_psl9()
674 ctx->elem->lpid = cpu_to_be32(mfspr(SPRN_LPID)); in process_element_entry_psl9()
675 ctx->elem->haurp = 0; /* disable */ in process_element_entry_psl9()
677 if (ctx->kernel) in process_element_entry_psl9()
680 if (ctx->mm == NULL) { in process_element_entry_psl9()
682 __func__, ctx->pe, pid_nr(ctx->pid)); in process_element_entry_psl9()
683 return -EINVAL; in process_element_entry_psl9()
685 pid = ctx->mm->context.id; in process_element_entry_psl9()
689 if (!(ctx->tidr) && (ctx->assign_tidr)) { in process_element_entry_psl9()
692 return -ENODEV; in process_element_entry_psl9()
693 ctx->tidr = current->thread.tidr; in process_element_entry_psl9()
694 pr_devel("%s: current tidr: %d\n", __func__, ctx->tidr); in process_element_entry_psl9()
697 ctx->elem->common.tid = cpu_to_be32(ctx->tidr); in process_element_entry_psl9()
698 ctx->elem->common.pid = cpu_to_be32(pid); in process_element_entry_psl9()
700 ctx->elem->sr = cpu_to_be64(calculate_sr(ctx)); in process_element_entry_psl9()
702 ctx->elem->common.csrp = 0; /* disable */ in process_element_entry_psl9()
710 if (ctx->irqs.range[0] == 0) { in process_element_entry_psl9()
711 ctx->irqs.offset[0] = ctx->afu->native->psl_hwirq; in process_element_entry_psl9()
712 ctx->irqs.range[0] = 1; in process_element_entry_psl9()
715 ctx->elem->common.amr = cpu_to_be64(amr); in process_element_entry_psl9()
716 ctx->elem->common.wed = cpu_to_be64(wed); in process_element_entry_psl9()
733 result = cxl_ops->afu_check_and_enable(ctx->afu); in cxl_attach_afu_directed_psl9()
747 ctx->elem->ctxtime = 0; /* disable */ in cxl_attach_afu_directed_psl8()
748 ctx->elem->lpid = cpu_to_be32(mfspr(SPRN_LPID)); in cxl_attach_afu_directed_psl8()
749 ctx->elem->haurp = 0; /* disable */ in cxl_attach_afu_directed_psl8()
750 ctx->elem->u.sdr = cpu_to_be64(mfspr(SPRN_SDR1)); in cxl_attach_afu_directed_psl8()
752 pid = current->pid; in cxl_attach_afu_directed_psl8()
753 if (ctx->kernel) in cxl_attach_afu_directed_psl8()
755 ctx->elem->common.tid = 0; in cxl_attach_afu_directed_psl8()
756 ctx->elem->common.pid = cpu_to_be32(pid); in cxl_attach_afu_directed_psl8()
758 ctx->elem->sr = cpu_to_be64(calculate_sr(ctx)); in cxl_attach_afu_directed_psl8()
760 ctx->elem->common.csrp = 0; /* disable */ in cxl_attach_afu_directed_psl8()
761 ctx->elem->common.u.psl8.aurp0 = 0; /* disable */ in cxl_attach_afu_directed_psl8()
762 ctx->elem->common.u.psl8.aurp1 = 0; /* disable */ in cxl_attach_afu_directed_psl8()
766 ctx->elem->common.u.psl8.sstp0 = cpu_to_be64(ctx->sstp0); in cxl_attach_afu_directed_psl8()
767 ctx->elem->common.u.psl8.sstp1 = cpu_to_be64(ctx->sstp1); in cxl_attach_afu_directed_psl8()
773 if (ctx->irqs.range[0] == 0) { in cxl_attach_afu_directed_psl8()
774 ctx->irqs.offset[0] = ctx->afu->native->psl_hwirq; in cxl_attach_afu_directed_psl8()
775 ctx->irqs.range[0] = 1; in cxl_attach_afu_directed_psl8()
780 ctx->elem->common.amr = cpu_to_be64(amr); in cxl_attach_afu_directed_psl8()
781 ctx->elem->common.wed = cpu_to_be64(wed); in cxl_attach_afu_directed_psl8()
784 if ((result = cxl_ops->afu_check_and_enable(ctx->afu))) in cxl_attach_afu_directed_psl8()
792 dev_info(&afu->dev, "Deactivating AFU directed mode\n"); in deactivate_afu_directed()
794 afu->current_mode = 0; in deactivate_afu_directed()
795 afu->num_procs = 0; in deactivate_afu_directed()
802 * stopping an AFU in AFU directed mode is AFU specific, which is not in deactivate_afu_directed()
825 if (afu->adapter->native->sl_ops->needs_reset_before_disable) in deactivate_afu_directed()
826 cxl_ops->afu_reset(afu); in deactivate_afu_directed()
835 dev_info(&afu->dev, "Activating dedicated process mode\n"); in cxl_activate_dedicated_process_psl9()
838 * If XSL is set to dedicated mode (Set in PSL_SCNTL reg), the in cxl_activate_dedicated_process_psl9()
844 afu->num_procs = 1; in cxl_activate_dedicated_process_psl9()
845 if (afu->native->spa == NULL) { in cxl_activate_dedicated_process_psl9()
847 return -ENOMEM; in cxl_activate_dedicated_process_psl9()
854 afu->current_mode = CXL_MODE_DEDICATED; in cxl_activate_dedicated_process_psl9()
861 dev_info(&afu->dev, "Activating dedicated process mode\n"); in cxl_activate_dedicated_process_psl8()
876 afu->current_mode = CXL_MODE_DEDICATED; in cxl_activate_dedicated_process_psl8()
877 afu->num_procs = 1; in cxl_activate_dedicated_process_psl8()
887 ctx->elem->ivte_offsets[r] = cpu_to_be16(ctx->irqs.offset[r]); in cxl_update_dedicated_ivtes_psl9()
888 ctx->elem->ivte_ranges[r] = cpu_to_be16(ctx->irqs.range[r]); in cxl_update_dedicated_ivtes_psl9()
894 struct cxl_afu *afu = ctx->afu; in cxl_update_dedicated_ivtes_psl8()
897 (((u64)ctx->irqs.offset[0] & 0xffff) << 48) | in cxl_update_dedicated_ivtes_psl8()
898 (((u64)ctx->irqs.offset[1] & 0xffff) << 32) | in cxl_update_dedicated_ivtes_psl8()
899 (((u64)ctx->irqs.offset[2] & 0xffff) << 16) | in cxl_update_dedicated_ivtes_psl8()
900 ((u64)ctx->irqs.offset[3] & 0xffff)); in cxl_update_dedicated_ivtes_psl8()
902 (((u64)ctx->irqs.range[0] & 0xffff) << 48) | in cxl_update_dedicated_ivtes_psl8()
903 (((u64)ctx->irqs.range[1] & 0xffff) << 32) | in cxl_update_dedicated_ivtes_psl8()
904 (((u64)ctx->irqs.range[2] & 0xffff) << 16) | in cxl_update_dedicated_ivtes_psl8()
905 ((u64)ctx->irqs.range[3] & 0xffff)); in cxl_update_dedicated_ivtes_psl8()
910 struct cxl_afu *afu = ctx->afu; in cxl_attach_dedicated_process_psl9()
918 if (ctx->afu->adapter->native->sl_ops->update_dedicated_ivtes) in cxl_attach_dedicated_process_psl9()
919 afu->adapter->native->sl_ops->update_dedicated_ivtes(ctx); in cxl_attach_dedicated_process_psl9()
921 ctx->elem->software_state = cpu_to_be32(CXL_PE_SOFTWARE_STATE_V); in cxl_attach_dedicated_process_psl9()
929 result = cxl_ops->afu_reset(afu); in cxl_attach_dedicated_process_psl9()
938 struct cxl_afu *afu = ctx->afu; in cxl_attach_dedicated_process_psl8()
942 pid = (u64)current->pid << 32; in cxl_attach_dedicated_process_psl8()
943 if (ctx->kernel) in cxl_attach_dedicated_process_psl8()
949 if ((rc = cxl_write_sstp(afu, ctx->sstp0, ctx->sstp1))) in cxl_attach_dedicated_process_psl8()
954 if (ctx->afu->adapter->native->sl_ops->update_dedicated_ivtes) in cxl_attach_dedicated_process_psl8()
955 afu->adapter->native->sl_ops->update_dedicated_ivtes(ctx); in cxl_attach_dedicated_process_psl8()
962 if ((rc = cxl_ops->afu_reset(afu))) in cxl_attach_dedicated_process_psl8()
972 dev_info(&afu->dev, "Deactivating dedicated process mode\n"); in deactivate_dedicated_process()
974 afu->current_mode = 0; in deactivate_dedicated_process()
975 afu->num_procs = 0; in deactivate_dedicated_process()
982 static int native_afu_deactivate_mode(struct cxl_afu *afu, int mode) in native_afu_deactivate_mode() argument
984 if (mode == CXL_MODE_DIRECTED) in native_afu_deactivate_mode()
986 if (mode == CXL_MODE_DEDICATED) in native_afu_deactivate_mode()
991 static int native_afu_activate_mode(struct cxl_afu *afu, int mode) in native_afu_activate_mode() argument
993 if (!mode) in native_afu_activate_mode()
995 if (!(mode & afu->modes_supported)) in native_afu_activate_mode()
996 return -EINVAL; in native_afu_activate_mode()
998 if (!cxl_ops->link_ok(afu->adapter, afu)) { in native_afu_activate_mode()
1000 return -EIO; in native_afu_activate_mode()
1003 if (mode == CXL_MODE_DIRECTED) in native_afu_activate_mode()
1005 if ((mode == CXL_MODE_DEDICATED) && in native_afu_activate_mode()
1006 (afu->adapter->native->sl_ops->activate_dedicated_process)) in native_afu_activate_mode()
1007 return afu->adapter->native->sl_ops->activate_dedicated_process(afu); in native_afu_activate_mode()
1009 return -EINVAL; in native_afu_activate_mode()
1015 if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu)) { in native_attach_process()
1017 return -EIO; in native_attach_process()
1020 ctx->kernel = kernel; in native_attach_process()
1021 if ((ctx->afu->current_mode == CXL_MODE_DIRECTED) && in native_attach_process()
1022 (ctx->afu->adapter->native->sl_ops->attach_afu_directed)) in native_attach_process()
1023 return ctx->afu->adapter->native->sl_ops->attach_afu_directed(ctx, wed, amr); in native_attach_process()
1025 if ((ctx->afu->current_mode == CXL_MODE_DEDICATED) && in native_attach_process()
1026 (ctx->afu->adapter->native->sl_ops->attach_dedicated_process)) in native_attach_process()
1027 return ctx->afu->adapter->native->sl_ops->attach_dedicated_process(ctx, wed, amr); in native_attach_process()
1029 return -EINVAL; in native_attach_process()
1036 * stop the AFU in dedicated mode (we therefore do not make that in detach_process_native_dedicated()
1050 cxl_ops->afu_reset(ctx->afu); in detach_process_native_dedicated()
1051 cxl_afu_disable(ctx->afu); in detach_process_native_dedicated()
1052 cxl_psl_purge(ctx->afu); in detach_process_native_dedicated()
1058 if (ctx->afu->current_mode == CXL_MODE_DIRECTED) in native_update_ivtes()
1060 if ((ctx->afu->current_mode == CXL_MODE_DEDICATED) && in native_update_ivtes()
1061 (ctx->afu->adapter->native->sl_ops->update_dedicated_ivtes)) in native_update_ivtes()
1062 return ctx->afu->adapter->native->sl_ops->update_dedicated_ivtes(ctx); in native_update_ivtes()
1063 WARN(1, "native_update_ivtes: Bad mode\n"); in native_update_ivtes()
1068 if (!ctx->pe_inserted) in detach_process_native_afu_directed()
1071 return -1; in detach_process_native_afu_directed()
1073 return -1; in detach_process_native_afu_directed()
1082 if (ctx->afu->current_mode == CXL_MODE_DEDICATED) in native_detach_process()
1093 if (!cxl_ops->link_ok(afu->adapter, afu)) in native_get_irq_info()
1094 return -EIO; in native_get_irq_info()
1096 info->dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An); in native_get_irq_info()
1097 info->dar = cxl_p2n_read(afu, CXL_PSL_DAR_An); in native_get_irq_info()
1099 info->dsr = cxl_p2n_read(afu, CXL_PSL_DSR_An); in native_get_irq_info()
1100 info->afu_err = cxl_p2n_read(afu, CXL_AFU_ERR_An); in native_get_irq_info()
1101 info->errstat = cxl_p2n_read(afu, CXL_PSL_ErrStat_An); in native_get_irq_info()
1102 info->proc_handle = 0; in native_get_irq_info()
1111 fir1 = cxl_p1_read(ctx->afu->adapter, CXL_PSL9_FIR1); in cxl_native_irq_dump_regs_psl9()
1113 dev_crit(&ctx->afu->dev, "PSL_FIR1: 0x%016llx\n", fir1); in cxl_native_irq_dump_regs_psl9()
1114 if (ctx->afu->adapter->native->sl_ops->register_serr_irq) { in cxl_native_irq_dump_regs_psl9()
1115 serr = cxl_p1n_read(ctx->afu, CXL_PSL_SERR_An); in cxl_native_irq_dump_regs_psl9()
1116 cxl_afu_decode_psl_serr(ctx->afu, serr); in cxl_native_irq_dump_regs_psl9()
1124 fir1 = cxl_p1_read(ctx->afu->adapter, CXL_PSL_FIR1); in cxl_native_irq_dump_regs_psl8()
1125 fir2 = cxl_p1_read(ctx->afu->adapter, CXL_PSL_FIR2); in cxl_native_irq_dump_regs_psl8()
1126 fir_slice = cxl_p1n_read(ctx->afu, CXL_PSL_FIR_SLICE_An); in cxl_native_irq_dump_regs_psl8()
1127 afu_debug = cxl_p1n_read(ctx->afu, CXL_AFU_DEBUG_An); in cxl_native_irq_dump_regs_psl8()
1129 dev_crit(&ctx->afu->dev, "PSL_FIR1: 0x%016llx\n", fir1); in cxl_native_irq_dump_regs_psl8()
1130 dev_crit(&ctx->afu->dev, "PSL_FIR2: 0x%016llx\n", fir2); in cxl_native_irq_dump_regs_psl8()
1131 if (ctx->afu->adapter->native->sl_ops->register_serr_irq) { in cxl_native_irq_dump_regs_psl8()
1132 serr = cxl_p1n_read(ctx->afu, CXL_PSL_SERR_An); in cxl_native_irq_dump_regs_psl8()
1133 cxl_afu_decode_psl_serr(ctx->afu, serr); in cxl_native_irq_dump_regs_psl8()
1135 dev_crit(&ctx->afu->dev, "PSL_FIR_SLICE_An: 0x%016llx\n", fir_slice); in cxl_native_irq_dump_regs_psl8()
1136 dev_crit(&ctx->afu->dev, "CXL_PSL_AFU_DEBUG_An: 0x%016llx\n", afu_debug); in cxl_native_irq_dump_regs_psl8()
1143 dev_crit(&ctx->afu->dev, "PSL ERROR STATUS: 0x%016llx\n", errstat); in native_handle_psl_slice_error()
1145 if (ctx->afu->adapter->native->sl_ops->psl_irq_dump_registers) in native_handle_psl_slice_error()
1146 ctx->afu->adapter->native->sl_ops->psl_irq_dump_registers(ctx); in native_handle_psl_slice_error()
1148 if (ctx->afu->adapter->native->sl_ops->debugfs_stop_trace) { in native_handle_psl_slice_error()
1149 dev_crit(&ctx->afu->dev, "STOPPING CXL TRACE\n"); in native_handle_psl_slice_error()
1150 ctx->afu->adapter->native->sl_ops->debugfs_stop_trace(ctx->afu->adapter); in native_handle_psl_slice_error()
1153 return cxl_ops->ack_irq(ctx, 0, errstat); in native_handle_psl_slice_error()
1169 if (cxl_is_translation_fault(afu, irq_info->dsisr)) in cxl_fail_irq_psl()
1187 dev_warn(&afu->dev, in native_irq_multiplexed()
1192 /* Mask the pe-handle from register value */ in native_irq_multiplexed()
1196 if (afu->adapter->native->sl_ops->fail_irq) in native_irq_multiplexed()
1197 return afu->adapter->native->sl_ops->fail_irq(afu, &irq_info); in native_irq_multiplexed()
1202 ctx = idr_find(&afu->contexts_idr, ph); in native_irq_multiplexed()
1204 if (afu->adapter->native->sl_ops->handle_interrupt) in native_irq_multiplexed()
1205 ret = afu->adapter->native->sl_ops->handle_interrupt(irq, ctx, &irq_info); in native_irq_multiplexed()
1212 " %016llx\n(Possible AFU HW issue - was a term/remove acked" in native_irq_multiplexed()
1215 if (afu->adapter->native->sl_ops->fail_irq) in native_irq_multiplexed()
1216 ret = afu->adapter->native->sl_ops->fail_irq(afu, &irq_info); in native_irq_multiplexed()
1230 while (timeout--) { in native_irq_wait()
1231 ph = cxl_p2n_read(ctx->afu, CXL_PSL_PEHandle_An) & 0xffff; in native_irq_wait()
1232 if (ph != ctx->pe) in native_irq_wait()
1234 dsisr = cxl_p2n_read(ctx->afu, CXL_PSL_DSISR_An); in native_irq_wait()
1248 dev_warn(&ctx->afu->dev, "WARNING: waiting on DSI for PE %i" in native_irq_wait()
1271 dev_crit(&afu->dev, "PSL_FIR_SLICE_An: 0x%016llx\n", fir_slice); in native_slice_irq_err()
1272 dev_crit(&afu->dev, "CXL_PSL_AFU_DEBUG_An: 0x%016llx\n", afu_debug); in native_slice_irq_err()
1274 dev_crit(&afu->dev, "CXL_PSL_ErrStat_An: 0x%016llx\n", errstat); in native_slice_irq_err()
1275 dev_crit(&afu->dev, "AFU_ERR_An: 0x%.16llx\n", afu_error); in native_slice_irq_err()
1276 dev_crit(&afu->dev, "PSL_DSISR_An: 0x%.16llx\n", dsisr); in native_slice_irq_err()
1282 dev_info(&afu->dev, "Further such interrupts will be masked until the AFU is reset\n"); in native_slice_irq_err()
1292 dev_crit(&adapter->dev, "PSL_FIR: 0x%016llx\n", fir1); in cxl_native_err_irq_dump_regs_psl9()
1301 dev_crit(&adapter->dev, in cxl_native_err_irq_dump_regs_psl8()
1314 dev_crit(&adapter->dev, "PSL_ErrIVTE: 0x%016llx\n", err_ivte); in native_irq_err()
1316 if (adapter->native->sl_ops->debugfs_stop_trace) { in native_irq_err()
1317 dev_crit(&adapter->dev, "STOPPING CXL TRACE\n"); in native_irq_err()
1318 adapter->native->sl_ops->debugfs_stop_trace(adapter); in native_irq_err()
1321 if (adapter->native->sl_ops->err_irq_dump_registers) in native_irq_err()
1322 adapter->native->sl_ops->err_irq_dump_registers(adapter); in native_irq_err()
1331 adapter->irq_name = kasprintf(GFP_KERNEL, "cxl-%s-err", in cxl_native_register_psl_err_irq()
1332 dev_name(&adapter->dev)); in cxl_native_register_psl_err_irq()
1333 if (!adapter->irq_name) in cxl_native_register_psl_err_irq()
1334 return -ENOMEM; in cxl_native_register_psl_err_irq()
1337 &adapter->native->err_hwirq, in cxl_native_register_psl_err_irq()
1338 &adapter->native->err_virq, in cxl_native_register_psl_err_irq()
1339 adapter->irq_name))) { in cxl_native_register_psl_err_irq()
1340 kfree(adapter->irq_name); in cxl_native_register_psl_err_irq()
1341 adapter->irq_name = NULL; in cxl_native_register_psl_err_irq()
1345 cxl_p1_write(adapter, CXL_PSL_ErrIVTE, adapter->native->err_hwirq & 0xffff); in cxl_native_register_psl_err_irq()
1352 if (adapter->native->err_virq == 0 || in cxl_native_release_psl_err_irq()
1353 adapter->native->err_virq != in cxl_native_release_psl_err_irq()
1354 irq_find_mapping(NULL, adapter->native->err_hwirq)) in cxl_native_release_psl_err_irq()
1358 cxl_unmap_irq(adapter->native->err_virq, adapter); in cxl_native_release_psl_err_irq()
1359 cxl_ops->release_one_irq(adapter, adapter->native->err_hwirq); in cxl_native_release_psl_err_irq()
1360 kfree(adapter->irq_name); in cxl_native_release_psl_err_irq()
1361 adapter->native->err_virq = 0; in cxl_native_release_psl_err_irq()
1369 afu->err_irq_name = kasprintf(GFP_KERNEL, "cxl-%s-err", in cxl_native_register_serr_irq()
1370 dev_name(&afu->dev)); in cxl_native_register_serr_irq()
1371 if (!afu->err_irq_name) in cxl_native_register_serr_irq()
1372 return -ENOMEM; in cxl_native_register_serr_irq()
1374 if ((rc = cxl_register_one_irq(afu->adapter, native_slice_irq_err, afu, in cxl_native_register_serr_irq()
1375 &afu->serr_hwirq, in cxl_native_register_serr_irq()
1376 &afu->serr_virq, afu->err_irq_name))) { in cxl_native_register_serr_irq()
1377 kfree(afu->err_irq_name); in cxl_native_register_serr_irq()
1378 afu->err_irq_name = NULL; in cxl_native_register_serr_irq()
1384 serr = (serr & 0x00ffffffffff0000ULL) | (afu->serr_hwirq & 0xffff); in cxl_native_register_serr_irq()
1390 serr = (serr & ~0xff0000007fffffffULL) | (afu->serr_hwirq & 0xffff); in cxl_native_register_serr_irq()
1399 if (afu->serr_virq == 0 || in cxl_native_release_serr_irq()
1400 afu->serr_virq != irq_find_mapping(NULL, afu->serr_hwirq)) in cxl_native_release_serr_irq()
1404 cxl_unmap_irq(afu->serr_virq, afu); in cxl_native_release_serr_irq()
1405 cxl_ops->release_one_irq(afu->adapter, afu->serr_hwirq); in cxl_native_release_serr_irq()
1406 kfree(afu->err_irq_name); in cxl_native_release_serr_irq()
1407 afu->serr_virq = 0; in cxl_native_release_serr_irq()
1414 afu->psl_irq_name = kasprintf(GFP_KERNEL, "cxl-%s", in cxl_native_register_psl_irq()
1415 dev_name(&afu->dev)); in cxl_native_register_psl_irq()
1416 if (!afu->psl_irq_name) in cxl_native_register_psl_irq()
1417 return -ENOMEM; in cxl_native_register_psl_irq()
1419 if ((rc = cxl_register_one_irq(afu->adapter, native_irq_multiplexed, in cxl_native_register_psl_irq()
1420 afu, &afu->native->psl_hwirq, &afu->native->psl_virq, in cxl_native_register_psl_irq()
1421 afu->psl_irq_name))) { in cxl_native_register_psl_irq()
1422 kfree(afu->psl_irq_name); in cxl_native_register_psl_irq()
1423 afu->psl_irq_name = NULL; in cxl_native_register_psl_irq()
1430 if (afu->native->psl_virq == 0 || in cxl_native_release_psl_irq()
1431 afu->native->psl_virq != in cxl_native_release_psl_irq()
1432 irq_find_mapping(NULL, afu->native->psl_hwirq)) in cxl_native_release_psl_irq()
1435 cxl_unmap_irq(afu->native->psl_virq, afu); in cxl_native_release_psl_irq()
1436 cxl_ops->release_one_irq(afu->adapter, afu->native->psl_hwirq); in cxl_native_release_psl_irq()
1437 kfree(afu->psl_irq_name); in cxl_native_release_psl_irq()
1438 afu->native->psl_virq = 0; in cxl_native_release_psl_irq()
1459 cxl_p2n_write(ctx->afu, CXL_PSL_TFC_An, tfc); in native_ack_irq()
1461 recover_psl_err(ctx->afu, psl_reset_mask); in native_ack_irq()
1479 if (unlikely(!cxl_ops->link_ok(afu->adapter, afu))) in native_afu_cr_read64()
1480 return -EIO; in native_afu_cr_read64()
1481 if (unlikely(off >= afu->crs_len)) in native_afu_cr_read64()
1482 return -ERANGE; in native_afu_cr_read64()
1483 *out = in_le64(afu->native->afu_desc_mmio + afu->crs_offset + in native_afu_cr_read64()
1484 (cr * afu->crs_len) + off); in native_afu_cr_read64()
1490 if (unlikely(!cxl_ops->link_ok(afu->adapter, afu))) in native_afu_cr_read32()
1491 return -EIO; in native_afu_cr_read32()
1492 if (unlikely(off >= afu->crs_len)) in native_afu_cr_read32()
1493 return -ERANGE; in native_afu_cr_read32()
1494 *out = in_le32(afu->native->afu_desc_mmio + afu->crs_offset + in native_afu_cr_read32()
1495 (cr * afu->crs_len) + off); in native_afu_cr_read32()
1525 if (unlikely(!cxl_ops->link_ok(afu->adapter, afu))) in native_afu_cr_write32()
1526 return -EIO; in native_afu_cr_write32()
1527 if (unlikely(off >= afu->crs_len)) in native_afu_cr_write32()
1528 return -ERANGE; in native_afu_cr_write32()
1529 out_le32(afu->native->afu_desc_mmio + afu->crs_offset + in native_afu_cr_write32()
1530 (cr * afu->crs_len) + off, in); in native_afu_cr_write32()