Lines Matching refs:afu
22 static void pci_error_handlers(struct cxl_afu *afu, in pci_error_handlers() argument
28 if (afu->phb == NULL) in pci_error_handlers()
31 list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) { in pci_error_handlers()
63 dev_crit(&ctx->afu->dev, "PSL ERROR STATUS: 0x%.16llx\n", errstat); in guest_handle_psl_slice_error()
68 static ssize_t guest_collect_vpd(struct cxl *adapter, struct cxl_afu *afu, in guest_collect_vpd() argument
118 rc = cxl_h_collect_vpd(afu->guest->handle, 0, in guest_collect_vpd()
156 return cxl_h_collect_int_info(ctx->afu->guest->handle, ctx->process_token, info); in guest_get_irq_info()
176 static int afu_read_error_state(struct cxl_afu *afu, int *state_out) in afu_read_error_state() argument
181 if (!afu) in afu_read_error_state()
184 rc = cxl_h_read_error_state(afu->guest->handle, &state); in afu_read_error_state()
197 struct cxl_afu *afu = data; in guest_slice_irq_err() local
201 rc = cxl_h_get_fn_error_interrupt(afu->guest->handle, &serr); in guest_slice_irq_err()
203 dev_crit(&afu->dev, "Couldn't read PSL_SERR_An: %d\n", rc); in guest_slice_irq_err()
206 afu_error = cxl_p2n_read(afu, CXL_AFU_ERR_An); in guest_slice_irq_err()
207 dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An); in guest_slice_irq_err()
208 cxl_afu_decode_psl_serr(afu, serr); in guest_slice_irq_err()
209 dev_crit(&afu->dev, "AFU_ERR_An: 0x%.16llx\n", afu_error); in guest_slice_irq_err()
210 dev_crit(&afu->dev, "PSL_DSISR_An: 0x%.16llx\n", dsisr); in guest_slice_irq_err()
212 rc = cxl_h_ack_fn_error_interrupt(afu->guest->handle, serr); in guest_slice_irq_err()
214 dev_crit(&afu->dev, "Couldn't ack slice error interrupt: %d\n", in guest_slice_irq_err()
266 struct cxl_afu *afu = NULL; in guest_reset() local
271 if ((afu = adapter->afu[i])) { in guest_reset()
272 pci_error_handlers(afu, CXL_ERROR_DETECTED_EVENT, in guest_reset()
274 cxl_context_detach_all(afu); in guest_reset()
280 if (!rc && (afu = adapter->afu[i])) { in guest_reset()
281 pci_error_handlers(afu, CXL_SLOT_RESET_EVENT, in guest_reset()
283 pci_error_handlers(afu, CXL_RESUME_EVENT, 0); in guest_reset()
351 static int guest_register_serr_irq(struct cxl_afu *afu) in guest_register_serr_irq() argument
353 afu->err_irq_name = kasprintf(GFP_KERNEL, "cxl-%s-err", in guest_register_serr_irq()
354 dev_name(&afu->dev)); in guest_register_serr_irq()
355 if (!afu->err_irq_name) in guest_register_serr_irq()
358 if (!(afu->serr_virq = cxl_map_irq(afu->adapter, afu->serr_hwirq, in guest_register_serr_irq()
359 guest_slice_irq_err, afu, afu->err_irq_name))) { in guest_register_serr_irq()
360 kfree(afu->err_irq_name); in guest_register_serr_irq()
361 afu->err_irq_name = NULL; in guest_register_serr_irq()
368 static void guest_release_serr_irq(struct cxl_afu *afu) in guest_release_serr_irq() argument
370 cxl_unmap_irq(afu->serr_virq, afu); in guest_release_serr_irq()
371 cxl_ops->release_one_irq(afu->adapter, afu->serr_hwirq); in guest_release_serr_irq()
372 kfree(afu->err_irq_name); in guest_release_serr_irq()
377 return cxl_h_control_faults(ctx->afu->guest->handle, ctx->process_token, in guest_ack_irq()
387 pr_devel("Disabling AFU(%d) interrupts\n", ctx->afu->slice); in disable_afu_irqs()
403 pr_devel("Enabling AFU(%d) interrupts\n", ctx->afu->slice); in enable_afu_irqs()
413 static int _guest_afu_cr_readXX(int sz, struct cxl_afu *afu, int cr_idx, in _guest_afu_cr_readXX() argument
420 if (afu->crs_len < sz) in _guest_afu_cr_readXX()
423 if (unlikely(offset >= afu->crs_len)) in _guest_afu_cr_readXX()
430 rc = cxl_h_get_config(afu->guest->handle, cr_idx, offset, in _guest_afu_cr_readXX()
457 static int guest_afu_cr_read32(struct cxl_afu *afu, int cr_idx, u64 offset, in guest_afu_cr_read32() argument
463 rc = _guest_afu_cr_readXX(4, afu, cr_idx, offset, &val); in guest_afu_cr_read32()
469 static int guest_afu_cr_read16(struct cxl_afu *afu, int cr_idx, u64 offset, in guest_afu_cr_read16() argument
475 rc = _guest_afu_cr_readXX(2, afu, cr_idx, offset, &val); in guest_afu_cr_read16()
481 static int guest_afu_cr_read8(struct cxl_afu *afu, int cr_idx, u64 offset, in guest_afu_cr_read8() argument
487 rc = _guest_afu_cr_readXX(1, afu, cr_idx, offset, &val); in guest_afu_cr_read8()
493 static int guest_afu_cr_read64(struct cxl_afu *afu, int cr_idx, u64 offset, in guest_afu_cr_read64() argument
496 return _guest_afu_cr_readXX(8, afu, cr_idx, offset, out); in guest_afu_cr_read64()
499 static int guest_afu_cr_write32(struct cxl_afu *afu, int cr, u64 off, u32 in) in guest_afu_cr_write32() argument
505 static int guest_afu_cr_write16(struct cxl_afu *afu, int cr, u64 off, u16 in) in guest_afu_cr_write16() argument
511 static int guest_afu_cr_write8(struct cxl_afu *afu, int cr, u64 off, u8 in) in guest_afu_cr_write8() argument
520 struct cxl *adapter = ctx->afu->adapter; in attach_afu_directed()
587 rc = cxl_h_attach_process(ctx->afu->guest->handle, elem, in attach_afu_directed()
590 if (ctx->master || !ctx->afu->pp_psa) { in attach_afu_directed()
591 ctx->psn_phys = ctx->afu->psn_phys; in attach_afu_directed()
592 ctx->psn_size = ctx->afu->adapter->ps_size; in attach_afu_directed()
597 if (ctx->afu->pp_psa && mmio_size && in attach_afu_directed()
598 ctx->afu->pp_size == 0) { in attach_afu_directed()
607 ctx->afu->pp_size = mmio_size; in attach_afu_directed()
630 if (ctx->afu->current_mode == CXL_MODE_DIRECTED) in guest_attach_process()
642 if (cxl_h_detach_process(ctx->afu->guest->handle, ctx->process_token)) in detach_afu_directed()
652 if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu)) in guest_detach_process()
655 if (ctx->afu->current_mode == CXL_MODE_DIRECTED) in guest_detach_process()
663 struct cxl_afu *afu = to_cxl_afu(dev); in guest_release_afu() local
667 idr_destroy(&afu->contexts_idr); in guest_release_afu()
669 kfree(afu->guest); in guest_release_afu()
670 kfree(afu); in guest_release_afu()
673 ssize_t cxl_guest_read_afu_vpd(struct cxl_afu *afu, void *buf, size_t len) in cxl_guest_read_afu_vpd() argument
675 return guest_collect_vpd(NULL, afu, buf, len); in cxl_guest_read_afu_vpd()
679 static ssize_t guest_afu_read_err_buffer(struct cxl_afu *afu, char *buf, in guest_afu_read_err_buffer() argument
689 rc = cxl_h_get_afu_err(afu->guest->handle, in guest_afu_read_err_buffer()
705 static int guest_afu_check_and_enable(struct cxl_afu *afu) in guest_afu_check_and_enable() argument
734 static int activate_afu_directed(struct cxl_afu *afu) in activate_afu_directed() argument
738 dev_info(&afu->dev, "Activating AFU(%d) directed mode\n", afu->slice); in activate_afu_directed()
740 afu->current_mode = CXL_MODE_DIRECTED; in activate_afu_directed()
742 afu->num_procs = afu->max_procs_virtualised; in activate_afu_directed()
744 if ((rc = cxl_chardev_m_afu_add(afu))) in activate_afu_directed()
747 if ((rc = cxl_sysfs_afu_m_add(afu))) in activate_afu_directed()
750 if ((rc = cxl_chardev_s_afu_add(afu))) in activate_afu_directed()
755 cxl_sysfs_afu_m_remove(afu); in activate_afu_directed()
757 cxl_chardev_afu_remove(afu); in activate_afu_directed()
761 static int guest_afu_activate_mode(struct cxl_afu *afu, int mode) in guest_afu_activate_mode() argument
765 if (!(mode & afu->modes_supported)) in guest_afu_activate_mode()
769 return activate_afu_directed(afu); in guest_afu_activate_mode()
772 dev_err(&afu->dev, "Dedicated mode not supported\n"); in guest_afu_activate_mode()
777 static int deactivate_afu_directed(struct cxl_afu *afu) in deactivate_afu_directed() argument
779 dev_info(&afu->dev, "Deactivating AFU(%d) directed mode\n", afu->slice); in deactivate_afu_directed()
781 afu->current_mode = 0; in deactivate_afu_directed()
782 afu->num_procs = 0; in deactivate_afu_directed()
784 cxl_sysfs_afu_m_remove(afu); in deactivate_afu_directed()
785 cxl_chardev_afu_remove(afu); in deactivate_afu_directed()
787 cxl_ops->afu_reset(afu); in deactivate_afu_directed()
792 static int guest_afu_deactivate_mode(struct cxl_afu *afu, int mode) in guest_afu_deactivate_mode() argument
796 if (!(mode & afu->modes_supported)) in guest_afu_deactivate_mode()
800 return deactivate_afu_directed(afu); in guest_afu_deactivate_mode()
804 static int guest_afu_reset(struct cxl_afu *afu) in guest_afu_reset() argument
806 pr_devel("AFU(%d) reset request\n", afu->slice); in guest_afu_reset()
807 return cxl_h_reset_afu(afu->guest->handle); in guest_afu_reset()
810 static int guest_map_slice_regs(struct cxl_afu *afu) in guest_map_slice_regs() argument
812 if (!(afu->p2n_mmio = ioremap(afu->guest->p2n_phys, afu->guest->p2n_size))) { in guest_map_slice_regs()
813 dev_err(&afu->dev, "Error mapping AFU(%d) MMIO regions\n", in guest_map_slice_regs()
814 afu->slice); in guest_map_slice_regs()
820 static void guest_unmap_slice_regs(struct cxl_afu *afu) in guest_unmap_slice_regs() argument
822 if (afu->p2n_mmio) in guest_unmap_slice_regs()
823 iounmap(afu->p2n_mmio); in guest_unmap_slice_regs()
826 static int afu_update_state(struct cxl_afu *afu) in afu_update_state() argument
830 rc = afu_read_error_state(afu, &cur_state); in afu_update_state()
834 if (afu->guest->previous_state == cur_state) in afu_update_state()
837 pr_devel("AFU(%d) update state to %#x\n", afu->slice, cur_state); in afu_update_state()
841 afu->guest->previous_state = cur_state; in afu_update_state()
845 pci_error_handlers(afu, CXL_ERROR_DETECTED_EVENT, in afu_update_state()
848 cxl_context_detach_all(afu); in afu_update_state()
849 if ((rc = cxl_ops->afu_reset(afu))) in afu_update_state()
852 rc = afu_read_error_state(afu, &cur_state); in afu_update_state()
854 pci_error_handlers(afu, CXL_SLOT_RESET_EVENT, in afu_update_state()
856 pci_error_handlers(afu, CXL_RESUME_EVENT, 0); in afu_update_state()
858 afu->guest->previous_state = 0; in afu_update_state()
862 afu->guest->previous_state = cur_state; in afu_update_state()
866 dev_err(&afu->dev, "AFU is in permanent error state\n"); in afu_update_state()
867 pci_error_handlers(afu, CXL_ERROR_DETECTED_EVENT, in afu_update_state()
869 afu->guest->previous_state = cur_state; in afu_update_state()
874 afu->slice, cur_state); in afu_update_state()
895 static bool guest_link_ok(struct cxl *cxl, struct cxl_afu *afu) in guest_link_ok() argument
899 if (afu && (!afu_read_error_state(afu, &state))) { in guest_link_ok()
907 static int afu_properties_look_ok(struct cxl_afu *afu) in afu_properties_look_ok() argument
909 if (afu->pp_irqs < 0) { in afu_properties_look_ok()
910 dev_err(&afu->dev, "Unexpected per-process minimum interrupt value\n"); in afu_properties_look_ok()
914 if (afu->max_procs_virtualised < 1) { in afu_properties_look_ok()
915 dev_err(&afu->dev, "Unexpected max number of processes virtualised value\n"); in afu_properties_look_ok()
919 if (afu->crs_len < 0) { in afu_properties_look_ok()
920 dev_err(&afu->dev, "Unexpected configuration record size value\n"); in afu_properties_look_ok()
929 struct cxl_afu *afu; in cxl_guest_init_afu() local
934 if (!(afu = cxl_alloc_afu(adapter, slice))) in cxl_guest_init_afu()
937 if (!(afu->guest = kzalloc(sizeof(struct cxl_afu_guest), GFP_KERNEL))) { in cxl_guest_init_afu()
938 kfree(afu); in cxl_guest_init_afu()
942 if ((rc = dev_set_name(&afu->dev, "afu%i.%i", in cxl_guest_init_afu()
949 if ((rc = cxl_of_read_afu_handle(afu, afu_np))) in cxl_guest_init_afu()
952 if ((rc = cxl_ops->afu_reset(afu))) in cxl_guest_init_afu()
955 if ((rc = cxl_of_read_afu_properties(afu, afu_np))) in cxl_guest_init_afu()
958 if ((rc = afu_properties_look_ok(afu))) in cxl_guest_init_afu()
961 if ((rc = guest_map_slice_regs(afu))) in cxl_guest_init_afu()
964 if ((rc = guest_register_serr_irq(afu))) in cxl_guest_init_afu()
971 if ((rc = cxl_register_afu(afu))) in cxl_guest_init_afu()
974 if ((rc = cxl_sysfs_afu_add(afu))) in cxl_guest_init_afu()
985 if (afu->max_procs_virtualised == 1) in cxl_guest_init_afu()
986 afu->modes_supported = CXL_MODE_DEDICATED; in cxl_guest_init_afu()
988 afu->modes_supported = CXL_MODE_DIRECTED; in cxl_guest_init_afu()
990 if ((rc = cxl_afu_select_best_mode(afu))) in cxl_guest_init_afu()
993 adapter->afu[afu->slice] = afu; in cxl_guest_init_afu()
995 afu->enabled = true; in cxl_guest_init_afu()
1001 afu->guest->parent = afu; in cxl_guest_init_afu()
1002 afu->guest->handle_err = true; in cxl_guest_init_afu()
1003 INIT_DELAYED_WORK(&afu->guest->work_err, afu_handle_errstate); in cxl_guest_init_afu()
1004 schedule_delayed_work(&afu->guest->work_err, msecs_to_jiffies(1000)); in cxl_guest_init_afu()
1006 if ((rc = cxl_pci_vphb_add(afu))) in cxl_guest_init_afu()
1007 dev_info(&afu->dev, "Can't register vPHB\n"); in cxl_guest_init_afu()
1012 cxl_sysfs_afu_remove(afu); in cxl_guest_init_afu()
1014 device_unregister(&afu->dev); in cxl_guest_init_afu()
1016 guest_release_serr_irq(afu); in cxl_guest_init_afu()
1018 guest_unmap_slice_regs(afu); in cxl_guest_init_afu()
1021 kfree(afu->guest); in cxl_guest_init_afu()
1022 kfree(afu); in cxl_guest_init_afu()
1027 void cxl_guest_remove_afu(struct cxl_afu *afu) in cxl_guest_remove_afu() argument
1029 pr_devel("in %s - AFU(%d)\n", __func__, afu->slice); in cxl_guest_remove_afu()
1031 if (!afu) in cxl_guest_remove_afu()
1035 afu->guest->handle_err = false; in cxl_guest_remove_afu()
1036 flush_delayed_work(&afu->guest->work_err); in cxl_guest_remove_afu()
1038 cxl_pci_vphb_remove(afu); in cxl_guest_remove_afu()
1039 cxl_sysfs_afu_remove(afu); in cxl_guest_remove_afu()
1041 spin_lock(&afu->adapter->afu_list_lock); in cxl_guest_remove_afu()
1042 afu->adapter->afu[afu->slice] = NULL; in cxl_guest_remove_afu()
1043 spin_unlock(&afu->adapter->afu_list_lock); in cxl_guest_remove_afu()
1045 cxl_context_detach_all(afu); in cxl_guest_remove_afu()
1046 cxl_ops->afu_deactivate_mode(afu, afu->current_mode); in cxl_guest_remove_afu()
1047 guest_release_serr_irq(afu); in cxl_guest_remove_afu()
1048 guest_unmap_slice_regs(afu); in cxl_guest_remove_afu()
1050 device_unregister(&afu->dev); in cxl_guest_remove_afu()