Lines Matching full:flexible
46 unsigned int flexible; /* XXX: placeholder, see fetch_this_slot() */ member
65 unsigned int flexible; member
152 nr = info->flexible; in fetch_bp_busy_slots()
153 if (nr > slots->flexible) in fetch_bp_busy_slots()
154 slots->flexible = nr; in fetch_bp_busy_slots()
159 * For now, continue to consider flexible as pinned, until we can
160 * ensure no flexible event can ever be scheduled before a pinned event
243 * (per_cpu(info->flexible, cpu) || (per_cpu(info->cpu_pinned, cpu)
254 * (per_cpu(info->flexible, *) || (max(per_cpu(info->cpu_pinned, *))
266 * ((per_cpu(info->flexible, cpu) > 1) + per_cpu(info->cpu_pinned, cpu)
269 * -> Same checks as before. But now the info->flexible, if any, must keep
274 * ((per_cpu(info->flexible, *) > 1) + max(per_cpu(info->cpu_pinned, *))
303 /* Flexible counters need to keep at least one slot */ in __reserve_bp_slot()
304 if (slots.pinned + (!!slots.flexible) > nr_slots[type]) in __reserve_bp_slot()