• Home
  • Raw
  • Download

Lines Matching +full:dt +full:- +full:node

119 #define CCN_TYPE_RND_1P	0x18 /* RN-D = RN-I + DVM */
127 #define CCN_NUM_XP_WATCHPOINTS 2 /* See DT.dbg_id.num_watchpoints */
128 #define CCN_NUM_PMU_EVENT_COUNTERS 8 /* See DT.dbg_id.num_pmucntr */
150 struct arm_ccn_dt, pmu), struct arm_ccn, dt)
171 struct hlist_node node; member
185 struct arm_ccn_component *node; member
190 struct arm_ccn_dt dt; member
194 static int arm_ccn_node_to_xp(int node) in arm_ccn_node_to_xp() argument
196 return node / CCN_NUM_XP_PORTS; in arm_ccn_node_to_xp()
199 static int arm_ccn_node_to_xp_port(int node) in arm_ccn_node_to_xp_port() argument
201 return node % CCN_NUM_XP_PORTS; in arm_ccn_node_to_xp_port()
231 return snprintf(buf, PAGE_SIZE, "%s\n", (char *)ea->var); in arm_ccn_pmu_format_show()
239 static CCN_FORMAT_ATTR(node, "config:0-7");
240 static CCN_FORMAT_ATTR(xp, "config:0-7");
241 static CCN_FORMAT_ATTR(type, "config:8-15");
242 static CCN_FORMAT_ATTR(event, "config:16-23");
243 static CCN_FORMAT_ATTR(port, "config:24-25");
244 static CCN_FORMAT_ATTR(bus, "config:24-25");
245 static CCN_FORMAT_ATTR(vc, "config:26-28");
246 static CCN_FORMAT_ATTR(dir, "config:29-29");
247 static CCN_FORMAT_ATTR(mask, "config:30-33");
248 static CCN_FORMAT_ATTR(cmp_l, "config1:0-62");
249 static CCN_FORMAT_ATTR(cmp_h, "config2:0-59");
286 * Events defined in TRM for MN, HN-I and SBSX are actually watchpoints set on
316 * RN-I & RN-D (RN-D = RN-I + DVM) nodes have different type ID depending
338 res = snprintf(buf, PAGE_SIZE, "type=0x%x", event->type); in arm_ccn_pmu_event_show()
339 if (event->event) in arm_ccn_pmu_event_show()
340 res += snprintf(buf + res, PAGE_SIZE - res, ",event=0x%x", in arm_ccn_pmu_event_show()
341 event->event); in arm_ccn_pmu_event_show()
342 if (event->def) in arm_ccn_pmu_event_show()
343 res += snprintf(buf + res, PAGE_SIZE - res, ",%s", in arm_ccn_pmu_event_show()
344 event->def); in arm_ccn_pmu_event_show()
345 if (event->mask) in arm_ccn_pmu_event_show()
346 res += snprintf(buf + res, PAGE_SIZE - res, ",mask=0x%x", in arm_ccn_pmu_event_show()
347 event->mask); in arm_ccn_pmu_event_show()
350 switch (event->type) { in arm_ccn_pmu_event_show()
354 res += snprintf(buf + res, PAGE_SIZE - res, in arm_ccn_pmu_event_show()
356 if (event->event == CCN_EVENT_WATCHPOINT) in arm_ccn_pmu_event_show()
357 res += snprintf(buf + res, PAGE_SIZE - res, in arm_ccn_pmu_event_show()
360 res += snprintf(buf + res, PAGE_SIZE - res, in arm_ccn_pmu_event_show()
365 res += snprintf(buf + res, PAGE_SIZE - res, ",node=%d", ccn->mn_id); in arm_ccn_pmu_event_show()
368 res += snprintf(buf + res, PAGE_SIZE - res, ",node=?"); in arm_ccn_pmu_event_show()
372 res += snprintf(buf + res, PAGE_SIZE - res, "\n"); in arm_ccn_pmu_event_show()
387 if (event->type == CCN_TYPE_SBAS && !ccn->sbas_present) in arm_ccn_pmu_events_is_visible()
389 if (event->type == CCN_TYPE_SBSX && !ccn->sbsx_present) in arm_ccn_pmu_events_is_visible()
392 return attr->mode; in arm_ccn_pmu_events_is_visible()
468 i = isdigit(name[0]) ? name[0] - '0' : 0xa + tolower(name[0]) - 'a'; in arm_ccn_pmu_get_cmp_mask()
472 return &ccn->dt.cmp_mask[i].l; in arm_ccn_pmu_get_cmp_mask()
474 return &ccn->dt.cmp_mask[i].h; in arm_ccn_pmu_get_cmp_mask()
484 u64 *mask = arm_ccn_pmu_get_cmp_mask(ccn, attr->attr.name); in arm_ccn_pmu_cmp_mask_show()
486 return mask ? snprintf(buf, PAGE_SIZE, "0x%016llx\n", *mask) : -EINVAL; in arm_ccn_pmu_cmp_mask_show()
493 u64 *mask = arm_ccn_pmu_get_cmp_mask(ccn, attr->attr.name); in arm_ccn_pmu_cmp_mask_store()
494 int err = -EINVAL; in arm_ccn_pmu_cmp_mask_store()
562 return cpumap_print_to_pagebuf(true, buf, &ccn->dt.cpu); in arm_ccn_pmu_cpumask_show()
609 return -EAGAIN; in arm_ccn_pmu_alloc_bit()
615 /* All RN-I and RN-D nodes have identical PMUs */
645 struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu); in arm_ccn_pmu_event_alloc()
646 struct hw_perf_event *hw = &event->hw; in arm_ccn_pmu_event_alloc()
651 node_xp = CCN_CONFIG_NODE(event->attr.config); in arm_ccn_pmu_event_alloc()
652 type = CCN_CONFIG_TYPE(event->attr.config); in arm_ccn_pmu_event_alloc()
653 event_id = CCN_CONFIG_EVENT(event->attr.config); in arm_ccn_pmu_event_alloc()
658 ccn->dt.pmu_counters_mask)) in arm_ccn_pmu_event_alloc()
659 return -EAGAIN; in arm_ccn_pmu_event_alloc()
661 hw->idx = CCN_IDX_PMU_CYCLE_COUNTER; in arm_ccn_pmu_event_alloc()
662 ccn->dt.pmu_counters[CCN_IDX_PMU_CYCLE_COUNTER].event = event; in arm_ccn_pmu_event_alloc()
668 hw->idx = arm_ccn_pmu_alloc_bit(ccn->dt.pmu_counters_mask, in arm_ccn_pmu_event_alloc()
670 if (hw->idx < 0) { in arm_ccn_pmu_event_alloc()
671 dev_dbg(ccn->dev, "No more counters available!\n"); in arm_ccn_pmu_event_alloc()
672 return -EAGAIN; in arm_ccn_pmu_event_alloc()
676 source = &ccn->xp[node_xp]; in arm_ccn_pmu_event_alloc()
678 source = &ccn->node[node_xp]; in arm_ccn_pmu_event_alloc()
679 ccn->dt.pmu_counters[hw->idx].source = source; in arm_ccn_pmu_event_alloc()
683 bit = arm_ccn_pmu_alloc_bit(source->xp.dt_cmp_mask, in arm_ccn_pmu_event_alloc()
686 bit = arm_ccn_pmu_alloc_bit(source->pmu_events_mask, in arm_ccn_pmu_event_alloc()
689 dev_dbg(ccn->dev, "No more event sources/watchpoints on node/XP %d!\n", in arm_ccn_pmu_event_alloc()
691 clear_bit(hw->idx, ccn->dt.pmu_counters_mask); in arm_ccn_pmu_event_alloc()
692 return -EAGAIN; in arm_ccn_pmu_event_alloc()
694 hw->config_base = bit; in arm_ccn_pmu_event_alloc()
696 ccn->dt.pmu_counters[hw->idx].event = event; in arm_ccn_pmu_event_alloc()
703 struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu); in arm_ccn_pmu_event_release()
704 struct hw_perf_event *hw = &event->hw; in arm_ccn_pmu_event_release()
706 if (hw->idx == CCN_IDX_PMU_CYCLE_COUNTER) { in arm_ccn_pmu_event_release()
707 clear_bit(CCN_IDX_PMU_CYCLE_COUNTER, ccn->dt.pmu_counters_mask); in arm_ccn_pmu_event_release()
710 ccn->dt.pmu_counters[hw->idx].source; in arm_ccn_pmu_event_release()
712 if (CCN_CONFIG_TYPE(event->attr.config) == CCN_TYPE_XP && in arm_ccn_pmu_event_release()
713 CCN_CONFIG_EVENT(event->attr.config) == in arm_ccn_pmu_event_release()
715 clear_bit(hw->config_base, source->xp.dt_cmp_mask); in arm_ccn_pmu_event_release()
717 clear_bit(hw->config_base, source->pmu_events_mask); in arm_ccn_pmu_event_release()
718 clear_bit(hw->idx, ccn->dt.pmu_counters_mask); in arm_ccn_pmu_event_release()
721 ccn->dt.pmu_counters[hw->idx].source = NULL; in arm_ccn_pmu_event_release()
722 ccn->dt.pmu_counters[hw->idx].event = NULL; in arm_ccn_pmu_event_release()
728 struct hw_perf_event *hw = &event->hw; in arm_ccn_pmu_event_init()
734 if (event->attr.type != event->pmu->type) in arm_ccn_pmu_event_init()
735 return -ENOENT; in arm_ccn_pmu_event_init()
737 ccn = pmu_to_arm_ccn(event->pmu); in arm_ccn_pmu_event_init()
739 if (hw->sample_period) { in arm_ccn_pmu_event_init()
740 dev_dbg(ccn->dev, "Sampling not supported!\n"); in arm_ccn_pmu_event_init()
741 return -EOPNOTSUPP; in arm_ccn_pmu_event_init()
744 if (has_branch_stack(event) || event->attr.exclude_user || in arm_ccn_pmu_event_init()
745 event->attr.exclude_kernel || event->attr.exclude_hv || in arm_ccn_pmu_event_init()
746 event->attr.exclude_idle || event->attr.exclude_host || in arm_ccn_pmu_event_init()
747 event->attr.exclude_guest) { in arm_ccn_pmu_event_init()
748 dev_dbg(ccn->dev, "Can't exclude execution levels!\n"); in arm_ccn_pmu_event_init()
749 return -EINVAL; in arm_ccn_pmu_event_init()
752 if (event->cpu < 0) { in arm_ccn_pmu_event_init()
753 dev_dbg(ccn->dev, "Can't provide per-task data!\n"); in arm_ccn_pmu_event_init()
754 return -EOPNOTSUPP; in arm_ccn_pmu_event_init()
760 * but can lead to issues for off-core PMUs, like CCN, where each in arm_ccn_pmu_event_init()
765 event->cpu = cpumask_first(&ccn->dt.cpu); in arm_ccn_pmu_event_init()
767 node_xp = CCN_CONFIG_NODE(event->attr.config); in arm_ccn_pmu_event_init()
768 type = CCN_CONFIG_TYPE(event->attr.config); in arm_ccn_pmu_event_init()
769 event_id = CCN_CONFIG_EVENT(event->attr.config); in arm_ccn_pmu_event_init()
771 /* Validate node/xp vs topology */ in arm_ccn_pmu_event_init()
774 if (node_xp != ccn->mn_id) { in arm_ccn_pmu_event_init()
775 dev_dbg(ccn->dev, "Invalid MN ID %d!\n", node_xp); in arm_ccn_pmu_event_init()
776 return -EINVAL; in arm_ccn_pmu_event_init()
780 if (node_xp >= ccn->num_xps) { in arm_ccn_pmu_event_init()
781 dev_dbg(ccn->dev, "Invalid XP ID %d!\n", node_xp); in arm_ccn_pmu_event_init()
782 return -EINVAL; in arm_ccn_pmu_event_init()
788 if (node_xp >= ccn->num_nodes) { in arm_ccn_pmu_event_init()
789 dev_dbg(ccn->dev, "Invalid node ID %d!\n", node_xp); in arm_ccn_pmu_event_init()
790 return -EINVAL; in arm_ccn_pmu_event_init()
792 if (!arm_ccn_pmu_type_eq(type, ccn->node[node_xp].type)) { in arm_ccn_pmu_event_init()
793 dev_dbg(ccn->dev, "Invalid type 0x%x for node %d!\n", in arm_ccn_pmu_event_init()
795 return -EINVAL; in arm_ccn_pmu_event_init()
804 u32 port = CCN_CONFIG_PORT(event->attr.config); in arm_ccn_pmu_event_init()
805 u32 vc = CCN_CONFIG_VC(event->attr.config); in arm_ccn_pmu_event_init()
807 if (!arm_ccn_pmu_type_eq(type, e->type)) in arm_ccn_pmu_event_init()
809 if (event_id != e->event) in arm_ccn_pmu_event_init()
811 if (e->num_ports && port >= e->num_ports) { in arm_ccn_pmu_event_init()
812 dev_dbg(ccn->dev, "Invalid port %d for node/XP %d!\n", in arm_ccn_pmu_event_init()
814 return -EINVAL; in arm_ccn_pmu_event_init()
816 if (e->num_vcs && vc >= e->num_vcs) { in arm_ccn_pmu_event_init()
817 dev_dbg(ccn->dev, "Invalid vc %d for node/XP %d!\n", in arm_ccn_pmu_event_init()
819 return -EINVAL; in arm_ccn_pmu_event_init()
824 dev_dbg(ccn->dev, "Invalid event 0x%x for node/XP %d!\n", in arm_ccn_pmu_event_init()
826 return -EINVAL; in arm_ccn_pmu_event_init()
829 /* Watchpoint-based event for a node is actually set on XP */ in arm_ccn_pmu_event_init()
837 arm_ccn_pmu_config_set(&event->attr.config, in arm_ccn_pmu_event_init()
844 * periodically read when a hrtimer aka cpu-clock leader triggers). in arm_ccn_pmu_event_init()
846 if (event->group_leader->pmu != event->pmu && in arm_ccn_pmu_event_init()
847 !is_software_event(event->group_leader)) in arm_ccn_pmu_event_init()
848 return -EINVAL; in arm_ccn_pmu_event_init()
850 for_each_sibling_event(sibling, event->group_leader) { in arm_ccn_pmu_event_init()
851 if (sibling->pmu != event->pmu && in arm_ccn_pmu_event_init()
853 return -EINVAL; in arm_ccn_pmu_event_init()
865 res = readq(ccn->dt.base + CCN_DT_PMCCNTR); in arm_ccn_pmu_read_counter()
868 writel(0x1, ccn->dt.base + CCN_DT_PMSR_REQ); in arm_ccn_pmu_read_counter()
869 while (!(readl(ccn->dt.base + CCN_DT_PMSR) & 0x1)) in arm_ccn_pmu_read_counter()
871 writel(0x1, ccn->dt.base + CCN_DT_PMSR_CLR); in arm_ccn_pmu_read_counter()
872 res = readl(ccn->dt.base + CCN_DT_PMCCNTRSR + 4) & 0xff; in arm_ccn_pmu_read_counter()
874 res |= readl(ccn->dt.base + CCN_DT_PMCCNTRSR); in arm_ccn_pmu_read_counter()
877 res = readl(ccn->dt.base + CCN_DT_PMEVCNT(idx)); in arm_ccn_pmu_read_counter()
885 struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu); in arm_ccn_pmu_event_update()
886 struct hw_perf_event *hw = &event->hw; in arm_ccn_pmu_event_update()
890 prev_count = local64_read(&hw->prev_count); in arm_ccn_pmu_event_update()
891 new_count = arm_ccn_pmu_read_counter(ccn, hw->idx); in arm_ccn_pmu_event_update()
892 } while (local64_xchg(&hw->prev_count, new_count) != prev_count); in arm_ccn_pmu_event_update()
894 mask = (1LLU << (hw->idx == CCN_IDX_PMU_CYCLE_COUNTER ? 40 : 32)) - 1; in arm_ccn_pmu_event_update()
896 local64_add((new_count - prev_count) & mask, &event->count); in arm_ccn_pmu_event_update()
901 struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu); in arm_ccn_pmu_xp_dt_config()
902 struct hw_perf_event *hw = &event->hw; in arm_ccn_pmu_xp_dt_config()
907 if (hw->idx == CCN_IDX_PMU_CYCLE_COUNTER) in arm_ccn_pmu_xp_dt_config()
910 if (CCN_CONFIG_TYPE(event->attr.config) == CCN_TYPE_XP) in arm_ccn_pmu_xp_dt_config()
911 xp = &ccn->xp[CCN_CONFIG_XP(event->attr.config)]; in arm_ccn_pmu_xp_dt_config()
913 xp = &ccn->xp[arm_ccn_node_to_xp( in arm_ccn_pmu_xp_dt_config()
914 CCN_CONFIG_NODE(event->attr.config))]; in arm_ccn_pmu_xp_dt_config()
917 dt_cfg = hw->event_base; in arm_ccn_pmu_xp_dt_config()
921 spin_lock(&ccn->dt.config_lock); in arm_ccn_pmu_xp_dt_config()
923 val = readl(xp->base + CCN_XP_DT_CONFIG); in arm_ccn_pmu_xp_dt_config()
925 CCN_XP_DT_CONFIG__DT_CFG__SHIFT(hw->idx)); in arm_ccn_pmu_xp_dt_config()
926 val |= dt_cfg << CCN_XP_DT_CONFIG__DT_CFG__SHIFT(hw->idx); in arm_ccn_pmu_xp_dt_config()
927 writel(val, xp->base + CCN_XP_DT_CONFIG); in arm_ccn_pmu_xp_dt_config()
929 spin_unlock(&ccn->dt.config_lock); in arm_ccn_pmu_xp_dt_config()
934 struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu); in arm_ccn_pmu_event_start()
935 struct hw_perf_event *hw = &event->hw; in arm_ccn_pmu_event_start()
937 local64_set(&event->hw.prev_count, in arm_ccn_pmu_event_start()
938 arm_ccn_pmu_read_counter(ccn, hw->idx)); in arm_ccn_pmu_event_start()
939 hw->state = 0; in arm_ccn_pmu_event_start()
941 /* Set the DT bus input, engaging the counter */ in arm_ccn_pmu_event_start()
947 struct hw_perf_event *hw = &event->hw; in arm_ccn_pmu_event_stop()
949 /* Disable counting, setting the DT bus to pass-through mode */ in arm_ccn_pmu_event_stop()
955 hw->state |= PERF_HES_STOPPED; in arm_ccn_pmu_event_stop()
960 struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu); in arm_ccn_pmu_xp_watchpoint_config()
961 struct hw_perf_event *hw = &event->hw; in arm_ccn_pmu_xp_watchpoint_config()
963 ccn->dt.pmu_counters[hw->idx].source; in arm_ccn_pmu_xp_watchpoint_config()
964 unsigned long wp = hw->config_base; in arm_ccn_pmu_xp_watchpoint_config()
966 u64 cmp_l = event->attr.config1; in arm_ccn_pmu_xp_watchpoint_config()
967 u64 cmp_h = event->attr.config2; in arm_ccn_pmu_xp_watchpoint_config()
968 u64 mask_l = ccn->dt.cmp_mask[CCN_CONFIG_MASK(event->attr.config)].l; in arm_ccn_pmu_xp_watchpoint_config()
969 u64 mask_h = ccn->dt.cmp_mask[CCN_CONFIG_MASK(event->attr.config)].h; in arm_ccn_pmu_xp_watchpoint_config()
971 hw->event_base = CCN_XP_DT_CONFIG__DT_CFG__WATCHPOINT(wp); in arm_ccn_pmu_xp_watchpoint_config()
974 val = readl(source->base + CCN_XP_DT_INTERFACE_SEL); in arm_ccn_pmu_xp_watchpoint_config()
977 val |= CCN_CONFIG_DIR(event->attr.config) << in arm_ccn_pmu_xp_watchpoint_config()
981 val |= CCN_CONFIG_PORT(event->attr.config) << in arm_ccn_pmu_xp_watchpoint_config()
985 val |= CCN_CONFIG_VC(event->attr.config) << in arm_ccn_pmu_xp_watchpoint_config()
987 writel(val, source->base + CCN_XP_DT_INTERFACE_SEL); in arm_ccn_pmu_xp_watchpoint_config()
990 writel(cmp_l & 0xffffffff, source->base + CCN_XP_DT_CMP_VAL_L(wp)); in arm_ccn_pmu_xp_watchpoint_config()
992 source->base + CCN_XP_DT_CMP_VAL_L(wp) + 4); in arm_ccn_pmu_xp_watchpoint_config()
993 writel(cmp_h & 0xffffffff, source->base + CCN_XP_DT_CMP_VAL_H(wp)); in arm_ccn_pmu_xp_watchpoint_config()
995 source->base + CCN_XP_DT_CMP_VAL_H(wp) + 4); in arm_ccn_pmu_xp_watchpoint_config()
998 writel(mask_l & 0xffffffff, source->base + CCN_XP_DT_CMP_MASK_L(wp)); in arm_ccn_pmu_xp_watchpoint_config()
1000 source->base + CCN_XP_DT_CMP_MASK_L(wp) + 4); in arm_ccn_pmu_xp_watchpoint_config()
1001 writel(mask_h & 0xffffffff, source->base + CCN_XP_DT_CMP_MASK_H(wp)); in arm_ccn_pmu_xp_watchpoint_config()
1003 source->base + CCN_XP_DT_CMP_MASK_H(wp) + 4); in arm_ccn_pmu_xp_watchpoint_config()
1008 struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu); in arm_ccn_pmu_xp_event_config()
1009 struct hw_perf_event *hw = &event->hw; in arm_ccn_pmu_xp_event_config()
1011 ccn->dt.pmu_counters[hw->idx].source; in arm_ccn_pmu_xp_event_config()
1014 hw->event_base = CCN_XP_DT_CONFIG__DT_CFG__XP_PMU_EVENT(hw->config_base); in arm_ccn_pmu_xp_event_config()
1016 id = (CCN_CONFIG_VC(event->attr.config) << 4) | in arm_ccn_pmu_xp_event_config()
1017 (CCN_CONFIG_BUS(event->attr.config) << 3) | in arm_ccn_pmu_xp_event_config()
1018 (CCN_CONFIG_EVENT(event->attr.config) << 0); in arm_ccn_pmu_xp_event_config()
1020 val = readl(source->base + CCN_XP_PMU_EVENT_SEL); in arm_ccn_pmu_xp_event_config()
1022 CCN_XP_PMU_EVENT_SEL__ID__SHIFT(hw->config_base)); in arm_ccn_pmu_xp_event_config()
1023 val |= id << CCN_XP_PMU_EVENT_SEL__ID__SHIFT(hw->config_base); in arm_ccn_pmu_xp_event_config()
1024 writel(val, source->base + CCN_XP_PMU_EVENT_SEL); in arm_ccn_pmu_xp_event_config()
1029 struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu); in arm_ccn_pmu_node_event_config()
1030 struct hw_perf_event *hw = &event->hw; in arm_ccn_pmu_node_event_config()
1032 ccn->dt.pmu_counters[hw->idx].source; in arm_ccn_pmu_node_event_config()
1033 u32 type = CCN_CONFIG_TYPE(event->attr.config); in arm_ccn_pmu_node_event_config()
1036 port = arm_ccn_node_to_xp_port(CCN_CONFIG_NODE(event->attr.config)); in arm_ccn_pmu_node_event_config()
1037 hw->event_base = CCN_XP_DT_CONFIG__DT_CFG__DEVICE_PMU_EVENT(port, in arm_ccn_pmu_node_event_config()
1038 hw->config_base); in arm_ccn_pmu_node_event_config()
1055 /* Set the event id for the pre-allocated counter */ in arm_ccn_pmu_node_event_config()
1056 val = readl(source->base + CCN_HNF_PMU_EVENT_SEL); in arm_ccn_pmu_node_event_config()
1058 CCN_HNF_PMU_EVENT_SEL__ID__SHIFT(hw->config_base)); in arm_ccn_pmu_node_event_config()
1059 val |= CCN_CONFIG_EVENT(event->attr.config) << in arm_ccn_pmu_node_event_config()
1060 CCN_HNF_PMU_EVENT_SEL__ID__SHIFT(hw->config_base); in arm_ccn_pmu_node_event_config()
1061 writel(val, source->base + CCN_HNF_PMU_EVENT_SEL); in arm_ccn_pmu_node_event_config()
1066 struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu); in arm_ccn_pmu_event_config()
1067 struct hw_perf_event *hw = &event->hw; in arm_ccn_pmu_event_config()
1071 if (hw->idx == CCN_IDX_PMU_CYCLE_COUNTER) in arm_ccn_pmu_event_config()
1074 if (CCN_CONFIG_TYPE(event->attr.config) == CCN_TYPE_XP) in arm_ccn_pmu_event_config()
1075 xp = CCN_CONFIG_XP(event->attr.config); in arm_ccn_pmu_event_config()
1077 xp = arm_ccn_node_to_xp(CCN_CONFIG_NODE(event->attr.config)); in arm_ccn_pmu_event_config()
1079 spin_lock(&ccn->dt.config_lock); in arm_ccn_pmu_event_config()
1081 /* Set the DT bus "distance" register */ in arm_ccn_pmu_event_config()
1082 offset = (hw->idx / 4) * 4; in arm_ccn_pmu_event_config()
1083 val = readl(ccn->dt.base + CCN_DT_ACTIVE_DSM + offset); in arm_ccn_pmu_event_config()
1085 CCN_DT_ACTIVE_DSM__DSM_ID__SHIFT(hw->idx % 4)); in arm_ccn_pmu_event_config()
1086 val |= xp << CCN_DT_ACTIVE_DSM__DSM_ID__SHIFT(hw->idx % 4); in arm_ccn_pmu_event_config()
1087 writel(val, ccn->dt.base + CCN_DT_ACTIVE_DSM + offset); in arm_ccn_pmu_event_config()
1089 if (CCN_CONFIG_TYPE(event->attr.config) == CCN_TYPE_XP) { in arm_ccn_pmu_event_config()
1090 if (CCN_CONFIG_EVENT(event->attr.config) == in arm_ccn_pmu_event_config()
1099 spin_unlock(&ccn->dt.config_lock); in arm_ccn_pmu_event_config()
1104 return bitmap_weight(ccn->dt.pmu_counters_mask, in arm_ccn_pmu_active_counters()
1111 struct hw_perf_event *hw = &event->hw; in arm_ccn_pmu_event_add()
1112 struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu); in arm_ccn_pmu_event_add()
1120 * event->cpu (this is the same one as presented in "cpumask" in arm_ccn_pmu_event_add()
1123 if (!ccn->irq && arm_ccn_pmu_active_counters(ccn) == 1) in arm_ccn_pmu_event_add()
1124 hrtimer_start(&ccn->dt.hrtimer, arm_ccn_pmu_timer_period(), in arm_ccn_pmu_event_add()
1129 hw->state = PERF_HES_STOPPED; in arm_ccn_pmu_event_add()
1139 struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu); in arm_ccn_pmu_event_del()
1145 if (!ccn->irq && arm_ccn_pmu_active_counters(ccn) == 0) in arm_ccn_pmu_event_del()
1146 hrtimer_cancel(&ccn->dt.hrtimer); in arm_ccn_pmu_event_del()
1158 u32 val = readl(ccn->dt.base + CCN_DT_PMCR); in arm_ccn_pmu_enable()
1160 writel(val, ccn->dt.base + CCN_DT_PMCR); in arm_ccn_pmu_enable()
1167 u32 val = readl(ccn->dt.base + CCN_DT_PMCR); in arm_ccn_pmu_disable()
1169 writel(val, ccn->dt.base + CCN_DT_PMCR); in arm_ccn_pmu_disable()
1172 static irqreturn_t arm_ccn_pmu_overflow_handler(struct arm_ccn_dt *dt) in arm_ccn_pmu_overflow_handler() argument
1174 u32 pmovsr = readl(dt->base + CCN_DT_PMOVSR); in arm_ccn_pmu_overflow_handler()
1180 writel(pmovsr, dt->base + CCN_DT_PMOVSR_CLR); in arm_ccn_pmu_overflow_handler()
1185 struct perf_event *event = dt->pmu_counters[idx].event; in arm_ccn_pmu_overflow_handler()
1202 struct arm_ccn_dt *dt = container_of(hrtimer, struct arm_ccn_dt, in arm_ccn_pmu_timer_handler() local
1207 arm_ccn_pmu_overflow_handler(dt); in arm_ccn_pmu_timer_handler()
1215 static int arm_ccn_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node) in arm_ccn_pmu_offline_cpu() argument
1217 struct arm_ccn_dt *dt = hlist_entry_safe(node, struct arm_ccn_dt, node); in arm_ccn_pmu_offline_cpu() local
1218 struct arm_ccn *ccn = container_of(dt, struct arm_ccn, dt); in arm_ccn_pmu_offline_cpu()
1221 if (!cpumask_test_and_clear_cpu(cpu, &dt->cpu)) in arm_ccn_pmu_offline_cpu()
1226 perf_pmu_migrate_context(&dt->pmu, cpu, target); in arm_ccn_pmu_offline_cpu()
1227 cpumask_set_cpu(target, &dt->cpu); in arm_ccn_pmu_offline_cpu()
1228 if (ccn->irq) in arm_ccn_pmu_offline_cpu()
1229 WARN_ON(irq_set_affinity_hint(ccn->irq, &dt->cpu) != 0); in arm_ccn_pmu_offline_cpu()
1241 /* Initialize DT subsystem */ in arm_ccn_pmu_init()
1242 ccn->dt.base = ccn->base + CCN_REGION_SIZE; in arm_ccn_pmu_init()
1243 spin_lock_init(&ccn->dt.config_lock); in arm_ccn_pmu_init()
1244 writel(CCN_DT_PMOVSR_CLR__MASK, ccn->dt.base + CCN_DT_PMOVSR_CLR); in arm_ccn_pmu_init()
1245 writel(CCN_DT_CTL__DT_EN, ccn->dt.base + CCN_DT_CTL); in arm_ccn_pmu_init()
1247 ccn->dt.base + CCN_DT_PMCR); in arm_ccn_pmu_init()
1248 writel(0x1, ccn->dt.base + CCN_DT_PMSR_CLR); in arm_ccn_pmu_init()
1249 for (i = 0; i < ccn->num_xps; i++) { in arm_ccn_pmu_init()
1250 writel(0, ccn->xp[i].base + CCN_XP_DT_CONFIG); in arm_ccn_pmu_init()
1256 ccn->xp[i].base + CCN_XP_DT_CONTROL); in arm_ccn_pmu_init()
1258 ccn->dt.cmp_mask[CCN_IDX_MASK_ANY].l = ~0; in arm_ccn_pmu_init()
1259 ccn->dt.cmp_mask[CCN_IDX_MASK_ANY].h = ~0; in arm_ccn_pmu_init()
1260 ccn->dt.cmp_mask[CCN_IDX_MASK_EXACT].l = 0; in arm_ccn_pmu_init()
1261 ccn->dt.cmp_mask[CCN_IDX_MASK_EXACT].h = 0; in arm_ccn_pmu_init()
1262 ccn->dt.cmp_mask[CCN_IDX_MASK_ORDER].l = ~0; in arm_ccn_pmu_init()
1263 ccn->dt.cmp_mask[CCN_IDX_MASK_ORDER].h = ~(0x1 << 15); in arm_ccn_pmu_init()
1264 ccn->dt.cmp_mask[CCN_IDX_MASK_OPCODE].l = ~0; in arm_ccn_pmu_init()
1265 ccn->dt.cmp_mask[CCN_IDX_MASK_OPCODE].h = ~(0x1f << 9); in arm_ccn_pmu_init()
1268 ccn->dt.id = ida_simple_get(&arm_ccn_pmu_ida, 0, 0, GFP_KERNEL); in arm_ccn_pmu_init()
1269 if (ccn->dt.id == 0) { in arm_ccn_pmu_init()
1272 name = devm_kasprintf(ccn->dev, GFP_KERNEL, "ccn_%d", in arm_ccn_pmu_init()
1273 ccn->dt.id); in arm_ccn_pmu_init()
1275 err = -ENOMEM; in arm_ccn_pmu_init()
1281 ccn->dt.pmu = (struct pmu) { in arm_ccn_pmu_init()
1296 if (!ccn->irq) { in arm_ccn_pmu_init()
1297 dev_info(ccn->dev, "No access to interrupts, using timer.\n"); in arm_ccn_pmu_init()
1298 hrtimer_init(&ccn->dt.hrtimer, CLOCK_MONOTONIC, in arm_ccn_pmu_init()
1300 ccn->dt.hrtimer.function = arm_ccn_pmu_timer_handler; in arm_ccn_pmu_init()
1304 cpumask_set_cpu(get_cpu(), &ccn->dt.cpu); in arm_ccn_pmu_init()
1307 if (ccn->irq) { in arm_ccn_pmu_init()
1308 err = irq_set_affinity_hint(ccn->irq, &ccn->dt.cpu); in arm_ccn_pmu_init()
1310 dev_err(ccn->dev, "Failed to set interrupt affinity!\n"); in arm_ccn_pmu_init()
1315 err = perf_pmu_register(&ccn->dt.pmu, name, -1); in arm_ccn_pmu_init()
1320 &ccn->dt.node); in arm_ccn_pmu_init()
1328 ida_simple_remove(&arm_ccn_pmu_ida, ccn->dt.id); in arm_ccn_pmu_init()
1329 for (i = 0; i < ccn->num_xps; i++) in arm_ccn_pmu_init()
1330 writel(0, ccn->xp[i].base + CCN_XP_DT_CONTROL); in arm_ccn_pmu_init()
1331 writel(0, ccn->dt.base + CCN_DT_PMCR); in arm_ccn_pmu_init()
1340 &ccn->dt.node); in arm_ccn_pmu_cleanup()
1341 if (ccn->irq) in arm_ccn_pmu_cleanup()
1342 irq_set_affinity_hint(ccn->irq, NULL); in arm_ccn_pmu_cleanup()
1343 for (i = 0; i < ccn->num_xps; i++) in arm_ccn_pmu_cleanup()
1344 writel(0, ccn->xp[i].base + CCN_XP_DT_CONTROL); in arm_ccn_pmu_cleanup()
1345 writel(0, ccn->dt.base + CCN_DT_PMCR); in arm_ccn_pmu_cleanup()
1346 perf_pmu_unregister(&ccn->dt.pmu); in arm_ccn_pmu_cleanup()
1347 ida_simple_remove(&arm_ccn_pmu_ida, ccn->dt.id); in arm_ccn_pmu_cleanup()
1361 val = readl(ccn->base + CCN_MN_OLY_COMP_LIST_63_0 + in arm_ccn_for_each_valid_region()
1366 base = ccn->base + region * CCN_REGION_SIZE; in arm_ccn_for_each_valid_region()
1385 if (type == CCN_TYPE_XP && id >= ccn->num_xps) in arm_ccn_get_nodes_num()
1386 ccn->num_xps = id + 1; in arm_ccn_get_nodes_num()
1387 else if (id >= ccn->num_nodes) in arm_ccn_get_nodes_num()
1388 ccn->num_nodes = id + 1; in arm_ccn_get_nodes_num()
1398 dev_dbg(ccn->dev, "Region %d: id=%u, type=0x%02x\n", region, id, type); in arm_ccn_init_nodes()
1402 ccn->mn_id = id; in arm_ccn_init_nodes()
1407 component = &ccn->xp[id]; in arm_ccn_init_nodes()
1410 ccn->sbsx_present = 1; in arm_ccn_init_nodes()
1411 component = &ccn->node[id]; in arm_ccn_init_nodes()
1414 ccn->sbas_present = 1; in arm_ccn_init_nodes()
1415 /* Fall-through */ in arm_ccn_init_nodes()
1417 component = &ccn->node[id]; in arm_ccn_init_nodes()
1421 component->base = base; in arm_ccn_init_nodes()
1422 component->type = type; in arm_ccn_init_nodes()
1432 dev_err(ccn->dev, "Error reported in %08x%08x%08x%08x%08x%08x.\n", in arm_ccn_error_handler()
1435 dev_err(ccn->dev, "Disabling interrupt generation for all errors.\n"); in arm_ccn_error_handler()
1437 ccn->base + CCN_MN_ERRINT_STATUS); in arm_ccn_error_handler()
1452 err_or = err_sig_val[0] = readl(ccn->base + CCN_MN_ERR_SIG_VAL_63_0); in arm_ccn_irq_handler()
1455 res = arm_ccn_pmu_overflow_handler(&ccn->dt); in arm_ccn_irq_handler()
1460 err_sig_val[i] = readl(ccn->base + in arm_ccn_irq_handler()
1469 ccn->base + CCN_MN_ERRINT_STATUS); in arm_ccn_irq_handler()
1482 ccn = devm_kzalloc(&pdev->dev, sizeof(*ccn), GFP_KERNEL); in arm_ccn_probe()
1484 return -ENOMEM; in arm_ccn_probe()
1485 ccn->dev = &pdev->dev; in arm_ccn_probe()
1489 ccn->base = devm_ioremap_resource(ccn->dev, res); in arm_ccn_probe()
1490 if (IS_ERR(ccn->base)) in arm_ccn_probe()
1491 return PTR_ERR(ccn->base); in arm_ccn_probe()
1495 return -EINVAL; in arm_ccn_probe()
1496 irq = res->start; in arm_ccn_probe()
1500 ccn->base + CCN_MN_ERRINT_STATUS); in arm_ccn_probe()
1501 if (readl(ccn->base + CCN_MN_ERRINT_STATUS) & in arm_ccn_probe()
1505 ccn->base + CCN_MN_ERRINT_STATUS); in arm_ccn_probe()
1506 err = devm_request_irq(ccn->dev, irq, arm_ccn_irq_handler, in arm_ccn_probe()
1508 dev_name(ccn->dev), ccn); in arm_ccn_probe()
1512 ccn->irq = irq; in arm_ccn_probe()
1522 ccn->node = devm_kcalloc(ccn->dev, ccn->num_nodes, sizeof(*ccn->node), in arm_ccn_probe()
1524 ccn->xp = devm_kcalloc(ccn->dev, ccn->num_xps, sizeof(*ccn->node), in arm_ccn_probe()
1526 if (!ccn->node || !ccn->xp) in arm_ccn_probe()
1527 return -ENOMEM; in arm_ccn_probe()
1546 { .compatible = "arm,ccn-502", },
1547 { .compatible = "arm,ccn-504", },
1554 .name = "arm-ccn",