Lines Matching +full:ssc +full:- +full:range
7 * For licencing details see kernel-base/COPYING
20 * array indices: 0,1 - HT threads, used with HT enabled cpu
27 char cntr[2][P4_CNTR_LIMIT]; /* counter index (offset), -1 on abscence */
69 * Tags and friends -- they are left to a caller
84 .cntr = { {4, 5, -1}, {6, 7, -1} },
91 .cntr = { {0, -1, -1}, {2, -1, -1} },
100 .cntr = { {0, -1, -1}, {2, -1, -1} },
108 .cntr = { {8, 9, -1}, {10, 11, -1} },
115 P4_ESCR_EMASK_BIT(P4_EVENT_MEMORY_COMPLETE, SSC),
116 .cntr = { {8, 9, -1}, {10, 11, -1} },
123 .cntr = { {8, 9, -1}, {10, 11, -1} },
130 .cntr = { {8, 9, -1}, {10, 11, -1} },
140 .cntr = { {0, -1, -1}, {2, -1, -1} },
149 .cntr = { {0, -1, -1}, {2, -1, -1} },
164 .cntr = { {0, -1, -1}, {2, -1, -1} },
181 .cntr = { {0, -1, -1}, {2, -1, -1} },
198 .cntr = { {2, -1, -1}, {3, -1, -1} },
211 .cntr = { {0, -1, -1}, {2, -1, -1} },
230 .cntr = { {0, -1, -1}, {1, -1, -1} },
249 .cntr = { {2, -1, -1}, {3, -1, -1} },
257 .cntr = { {8, 9, -1}, {10, 11, -1} },
265 .cntr = { {8, 9, -1}, {10, 11, -1} },
273 .cntr = { {8, 9, -1}, {10, 11, -1} },
281 .cntr = { {8, 9, -1}, {10, 11, -1} },
289 .cntr = { {8, 9, -1}, {10, 11, -1} },
297 .cntr = { {8, 9, -1}, {10, 11, -1} },
305 .cntr = { {8, 9, -1}, {10, 11, -1} },
313 .cntr = { {8, 9, -1}, {10, 11, -1} },
320 .cntr = { {4, 5, -1}, {6, 7, -1} },
327 .cntr = { {0, -1, -1}, {2, -1, -1} },
334 .cntr = { {4, 5, -1}, {6, 7, -1} },
343 .cntr = { {4, 5, -1}, {6, 7, -1} },
353 .cntr = { {4, 5, -1}, {6, 7, -1} },
363 .cntr = { {4, 5, -1}, {6, 7, -1} },
379 .cntr = { {8, 9, -1}, {10, 11, -1} },
385 .cntr = { {0, -1, -1}, {2, -1, -1} },
391 .cntr = { {0, -1, -1}, {2, -1, -1} },
397 .cntr = { {0, -1, -1}, {2, -1, -1} },
403 .cntr = { {0, -1, -1}, {2, -1, -1} },
553 [ C(RESULT_ACCESS) ] = -1,
554 [ C(RESULT_MISS) ] = -1,
557 [ C(RESULT_ACCESS) ] = -1,
558 [ C(RESULT_MISS) ] = -1,
563 [ C(RESULT_ACCESS) ] = -1,
564 [ C(RESULT_MISS) ] = -1,
567 [ C(RESULT_ACCESS) ] = -1,
568 [ C(RESULT_MISS) ] = -1,
571 [ C(RESULT_ACCESS) ] = -1,
572 [ C(RESULT_MISS) ] = -1,
581 * utilize non-intersected resources (ESCR/CCCR/counter registers).
596 * Non-halted cycles can be substituted with non-sleeping cycles (see
598 * to run nmi-watchdog and 'perf top' (or any other user space tool
652 /* non-halted CPU clocks */
724 esel = P4_OPCODE_ESEL(bind->opcode); in p4_pmu_event_map()
743 * - IQ_ESCR0, IQ_ESCR1 only for models 1 and 2 in p4_event_match_cpu_model()
753 /* User data may have out-of-bound event index */ in p4_validate_raw_event()
754 v = p4_config_unpack_event(event->attr.config); in p4_validate_raw_event()
756 return -EINVAL; in p4_validate_raw_event()
760 return -EINVAL; in p4_validate_raw_event()
766 * depends on logical cpu state -- count event if one cpu active, in p4_validate_raw_event()
779 v = perf_allow_cpu(&event->attr); in p4_validate_raw_event()
785 emask = p4_config_unpack_escr(event->attr.config) & P4_ESCR_EVENTMASK_MASK; in p4_validate_raw_event()
787 return -EINVAL; in p4_validate_raw_event()
792 if (p4_config_pebs_has(event->attr.config, P4_PEBS_CONFIG_ENABLE)) in p4_validate_raw_event()
793 return -EINVAL; in p4_validate_raw_event()
795 v = p4_config_unpack_metric(event->attr.config); in p4_validate_raw_event()
797 return -EINVAL; in p4_validate_raw_event()
810 * first time on the same cpu -- we will not need swap thread in p4_hw_config()
815 escr = p4_default_escr_conf(cpu, event->attr.exclude_kernel, in p4_hw_config()
816 event->attr.exclude_user); in p4_hw_config()
817 event->hw.config = p4_config_pack_escr(escr) | in p4_hw_config()
821 event->hw.config = p4_set_ht_bit(event->hw.config); in p4_hw_config()
823 if (event->attr.type == PERF_TYPE_RAW) { in p4_hw_config()
830 event->attr.config &= P4_CONFIG_MASK; in p4_hw_config()
840 event->hw.config |= event->attr.config; in p4_hw_config()
841 bind = p4_config_get_bind(event->attr.config); in p4_hw_config()
843 rc = -EINVAL; in p4_hw_config()
846 esel = P4_OPCODE_ESEL(bind->opcode); in p4_hw_config()
847 event->hw.config |= p4_config_pack_cccr(P4_CCCR_ESEL(esel)); in p4_hw_config()
861 rdmsrl(hwc->config_base, v); in p4_pmu_clear_cccr_ovf()
863 wrmsrl(hwc->config_base, v & ~P4_CCCR_OVF); in p4_pmu_clear_cccr_ovf()
874 rdmsrl(hwc->event_base, v); in p4_pmu_clear_cccr_ovf()
891 * What is more complex -- RAW events, if user (for some in p4_pmu_disable_pebs()
893 * event opcode -- it's fine from hardware point of view in p4_pmu_disable_pebs()
896 * So at moment let leave metrics turned on forever -- it's in p4_pmu_disable_pebs()
906 struct hw_perf_event *hwc = &event->hw; in p4_pmu_disable_event()
913 (void)wrmsrl_safe(hwc->config_base, in p4_pmu_disable_event()
914 p4_config_unpack_cccr(hwc->config) & ~P4_CCCR_ENABLE & ~P4_CCCR_OVF & ~P4_CCCR_RESERVED); in p4_pmu_disable_event()
923 struct perf_event *event = cpuc->events[idx]; in p4_pmu_disable_all()
924 if (!test_bit(idx, cpuc->active_mask)) in p4_pmu_disable_all()
946 (void)wrmsrl_safe(MSR_IA32_PEBS_ENABLE, (u64)bind->metric_pebs); in p4_pmu_enable_pebs()
947 (void)wrmsrl_safe(MSR_P4_PEBS_MATRIX_VERT, (u64)bind->metric_vert); in p4_pmu_enable_pebs()
952 struct hw_perf_event *hwc = &event->hw; in p4_pmu_enable_event()
953 int thread = p4_ht_config_thread(hwc->config); in p4_pmu_enable_event()
954 u64 escr_conf = p4_config_unpack_escr(p4_clear_ht_bit(hwc->config)); in p4_pmu_enable_event()
955 unsigned int idx = p4_config_unpack_event(hwc->config); in p4_pmu_enable_event()
960 escr_addr = bind->escr_msr[thread]; in p4_pmu_enable_event()
963 * - we dont support cascaded counters yet in p4_pmu_enable_event()
964 * - and counter 1 is broken (erratum) in p4_pmu_enable_event()
966 WARN_ON_ONCE(p4_is_event_cascaded(hwc->config)); in p4_pmu_enable_event()
967 WARN_ON_ONCE(hwc->idx == 1); in p4_pmu_enable_event()
971 escr_conf |= P4_ESCR_EVENT(P4_OPCODE_EVNT(bind->opcode)); in p4_pmu_enable_event()
973 cccr = p4_config_unpack_cccr(hwc->config); in p4_pmu_enable_event()
979 p4_pmu_enable_pebs(hwc->config); in p4_pmu_enable_event()
982 (void)wrmsrl_safe(hwc->config_base, in p4_pmu_enable_event()
992 struct perf_event *event = cpuc->events[idx]; in p4_pmu_enable_all()
993 if (!test_bit(idx, cpuc->active_mask)) in p4_pmu_enable_all()
1013 if (!test_bit(idx, cpuc->active_mask)) { in p4_pmu_handle_irq()
1014 /* catch in-flight IRQs */ in p4_pmu_handle_irq()
1015 if (__test_and_clear_bit(idx, cpuc->running)) in p4_pmu_handle_irq()
1020 event = cpuc->events[idx]; in p4_pmu_handle_irq()
1021 hwc = &event->hw; in p4_pmu_handle_irq()
1023 WARN_ON_ONCE(hwc->idx != idx); in p4_pmu_handle_irq()
1029 if (!overflow && (val & (1ULL << (x86_pmu.cntval_bits - 1)))) in p4_pmu_handle_irq()
1035 perf_sample_data_init(&data, 0, hwc->last_period); in p4_pmu_handle_irq()
1074 if (!p4_should_swap_ts(hwc->config, cpu)) in p4_pmu_swap_config_ts()
1082 escr = p4_config_unpack_escr(hwc->config); in p4_pmu_swap_config_ts()
1083 cccr = p4_config_unpack_cccr(hwc->config); in p4_pmu_swap_config_ts()
1096 hwc->config = p4_config_pack_escr(escr); in p4_pmu_swap_config_ts()
1097 hwc->config |= p4_config_pack_cccr(cccr); in p4_pmu_swap_config_ts()
1098 hwc->config |= P4_CONFIG_HT; in p4_pmu_swap_config_ts()
1110 hwc->config = p4_config_pack_escr(escr); in p4_pmu_swap_config_ts()
1111 hwc->config |= p4_config_pack_cccr(cccr); in p4_pmu_swap_config_ts()
1112 hwc->config &= ~P4_CONFIG_HT; in p4_pmu_swap_config_ts()
1119 * the metric between any ESCRs is laid in range [0xa0,0xe1]
1126 #define P4_ESCR_MSR_TABLE_SIZE (P4_ESCR_MSR_MAX - P4_ESCR_MSR_BASE + 1)
1127 #define P4_ESCR_MSR_IDX(msr) (msr - P4_ESCR_MSR_BASE)
1187 return -1; in p4_get_escr_idx()
1199 j = bind->cntr[thread][i]; in p4_next_cntr()
1200 if (j != -1 && !test_bit(j, used_mask)) in p4_next_cntr()
1204 return -1; in p4_next_cntr()
1222 for (i = 0, num = n; i < n; i++, num--) { in p4_pmu_schedule_events()
1224 hwc = &cpuc->event_list[i]->hw; in p4_pmu_schedule_events()
1237 bind = p4_config_get_bind(hwc->config); in p4_pmu_schedule_events()
1238 escr_idx = p4_get_escr_idx(bind->escr_msr[thread]); in p4_pmu_schedule_events()
1239 if (unlikely(escr_idx == -1)) in p4_pmu_schedule_events()
1242 if (hwc->idx != -1 && !p4_should_swap_ts(hwc->config, cpu)) { in p4_pmu_schedule_events()
1243 cntr_idx = hwc->idx; in p4_pmu_schedule_events()
1245 assign[i] = hwc->idx; in p4_pmu_schedule_events()
1250 if (cntr_idx == -1 || test_bit(escr_idx, escr_mask)) { in p4_pmu_schedule_events()
1254 config_alias = p4_get_alias_event(hwc->config); in p4_pmu_schedule_events()
1257 hwc->config = config_alias; in p4_pmu_schedule_events()
1264 * Unfortunately, p4_pmu_swap_config_ts touches the hwc->config in p4_pmu_schedule_events()
1270 * Solve this with a cheap hack, reset the idx back to -1 to in p4_pmu_schedule_events()
1275 * perf wants to work, but P4 is special. :-( in p4_pmu_schedule_events()
1277 if (p4_should_swap_ts(hwc->config, cpu)) in p4_pmu_schedule_events()
1278 hwc->idx = -1; in p4_pmu_schedule_events()
1288 return num ? -EINVAL : 0; in p4_pmu_schedule_events()
1291 PMU_FORMAT_ATTR(cccr, "config:0-31" );
1292 PMU_FORMAT_ATTR(escr, "config:32-62");
1324 .max_period = (1ULL << (ARCH_P4_CNTRVAL_BITS - 1)) - 1,
1328 * This handles erratum N15 in intel doc 249199-029,
1345 /* If we get stripped -- indexing fails */ in p4_pmu_init()
1352 return -ENODEV; in p4_pmu_init()