Home
last modified time | relevance | path

Searched refs:cluster (Results 1 – 25 of 214) sorted by relevance

123456789

/kernel/linux/linux-5.10/kernel/sched/
Dcore_ctl.c60 struct cluster_data *cluster; member
69 #define for_each_cluster(cluster, idx) \ argument
70 for (; (idx) < num_clusters && ((cluster) = &cluster_state[idx]);\
81 static unsigned int get_active_cpu_count(const struct cluster_data *cluster);
161 struct cluster_data *cluster; in show_global_state() local
168 cluster = c->cluster; in show_global_state()
169 if (!cluster || !cluster->inited) in show_global_state()
184 cluster->first_cpu); in show_global_state()
193 "\tNr running: %u\n", cluster->nrrun); in show_global_state()
195 "\tActive CPUs: %u\n", get_active_cpu_count(cluster)); in show_global_state()
[all …]
Dwalt.c405 struct sched_cluster *cluster = cpu_cluster(cpu); in update_cluster_load_subtractions() local
406 struct cpumask cluster_cpus = cluster->cpus; in update_cluster_load_subtractions()
411 raw_spin_lock(&cluster->load_lock); in update_cluster_load_subtractions()
432 raw_spin_unlock(&cluster->load_lock); in update_cluster_load_subtractions()
1326 insert_cluster(struct sched_cluster *cluster, struct list_head *head) in insert_cluster() argument
1332 if (cluster->max_power_cost < tmp->max_power_cost) in insert_cluster()
1337 list_add(&cluster->list, iter); in insert_cluster()
1342 struct sched_cluster *cluster = NULL; in alloc_new_cluster() local
1344 cluster = kzalloc(sizeof(struct sched_cluster), GFP_ATOMIC); in alloc_new_cluster()
1345 if (!cluster) { in alloc_new_cluster()
[all …]
/kernel/linux/linux-5.10/arch/arm/common/
Dmcpm_entry.c34 static void __mcpm_cpu_going_down(unsigned int cpu, unsigned int cluster) in __mcpm_cpu_going_down() argument
36 mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_GOING_DOWN; in __mcpm_cpu_going_down()
37 sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu); in __mcpm_cpu_going_down()
47 static void __mcpm_cpu_down(unsigned int cpu, unsigned int cluster) in __mcpm_cpu_down() argument
50 mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_DOWN; in __mcpm_cpu_down()
51 sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu); in __mcpm_cpu_down()
63 static void __mcpm_outbound_leave_critical(unsigned int cluster, int state) in __mcpm_outbound_leave_critical() argument
66 mcpm_sync.clusters[cluster].cluster = state; in __mcpm_outbound_leave_critical()
67 sync_cache_w(&mcpm_sync.clusters[cluster].cluster); in __mcpm_outbound_leave_critical()
82 static bool __mcpm_outbound_enter_critical(unsigned int cpu, unsigned int cluster) in __mcpm_outbound_enter_critical() argument
[all …]
DbL_switcher.c273 int cluster; in bL_switcher_thread() local
288 cluster = t->wanted_cluster; in bL_switcher_thread()
295 if (cluster != -1) { in bL_switcher_thread()
296 bL_switch_to(cluster); in bL_switcher_thread()
422 unsigned int cpu, cluster, mask; in bL_switcher_halve_cpus() local
429 cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 1); in bL_switcher_halve_cpus()
430 if (cluster >= 2) { in bL_switcher_halve_cpus()
436 mask |= (1 << cluster); in bL_switcher_halve_cpus()
454 cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 1); in bL_switcher_halve_cpus()
456 cluster_0 = cluster; in bL_switcher_halve_cpus()
[all …]
Dmcpm_head.S54 ubfx r10, r0, #8, #8 @ r10 = cluster
86 mla r8, r0, r10, r8 @ r8 = sync cluster base
94 @ At this point, the cluster cannot unexpectedly enter the GOING_DOWN
98 mla r11, r0, r10, r11 @ r11 = cluster first man lock
104 bne mcpm_setup_wait @ wait for cluster setup if so
107 cmp r0, #CLUSTER_UP @ cluster already up?
108 bne mcpm_setup @ if not, set up the cluster
118 @ Signal that the cluster is being brought up:
123 @ Any CPU trying to take the cluster into CLUSTER_GOING_DOWN from this
126 @ Wait for any previously-pending cluster teardown operations to abort
[all …]
/kernel/linux/linux-5.10/arch/arm/mach-sunxi/
Dmc_smp.c88 static bool sunxi_core_is_cortex_a15(unsigned int core, unsigned int cluster) in sunxi_core_is_cortex_a15() argument
91 int cpu = cluster * SUNXI_CPUS_PER_CLUSTER + core; in sunxi_core_is_cortex_a15()
106 __func__, cluster, core); in sunxi_core_is_cortex_a15()
116 static int sunxi_cpu_power_switch_set(unsigned int cpu, unsigned int cluster, in sunxi_cpu_power_switch_set() argument
122 reg = readl(prcm_base + PRCM_PWR_SWITCH_REG(cluster, cpu)); in sunxi_cpu_power_switch_set()
126 cluster, cpu); in sunxi_cpu_power_switch_set()
130 writel(0xff, prcm_base + PRCM_PWR_SWITCH_REG(cluster, cpu)); in sunxi_cpu_power_switch_set()
132 writel(0xfe, prcm_base + PRCM_PWR_SWITCH_REG(cluster, cpu)); in sunxi_cpu_power_switch_set()
134 writel(0xf8, prcm_base + PRCM_PWR_SWITCH_REG(cluster, cpu)); in sunxi_cpu_power_switch_set()
136 writel(0xf0, prcm_base + PRCM_PWR_SWITCH_REG(cluster, cpu)); in sunxi_cpu_power_switch_set()
[all …]
/kernel/linux/linux-5.10/arch/arm/mach-vexpress/
Dtc2_pm.c48 static int tc2_pm_cpu_powerup(unsigned int cpu, unsigned int cluster) in tc2_pm_cpu_powerup() argument
50 pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); in tc2_pm_cpu_powerup()
51 if (cluster >= TC2_CLUSTERS || cpu >= tc2_nr_cpus[cluster]) in tc2_pm_cpu_powerup()
53 ve_spc_set_resume_addr(cluster, cpu, in tc2_pm_cpu_powerup()
55 ve_spc_cpu_wakeup_irq(cluster, cpu, true); in tc2_pm_cpu_powerup()
59 static int tc2_pm_cluster_powerup(unsigned int cluster) in tc2_pm_cluster_powerup() argument
61 pr_debug("%s: cluster %u\n", __func__, cluster); in tc2_pm_cluster_powerup()
62 if (cluster >= TC2_CLUSTERS) in tc2_pm_cluster_powerup()
64 ve_spc_powerdown(cluster, false); in tc2_pm_cluster_powerup()
68 static void tc2_pm_cpu_powerdown_prepare(unsigned int cpu, unsigned int cluster) in tc2_pm_cpu_powerdown_prepare() argument
[all …]
Ddcscb.c39 static int dcscb_cpu_powerup(unsigned int cpu, unsigned int cluster) in dcscb_cpu_powerup() argument
43 pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); in dcscb_cpu_powerup()
44 if (cluster >= 2 || !(cpumask & dcscb_allcpus_mask[cluster])) in dcscb_cpu_powerup()
47 rst_hold = readl_relaxed(dcscb_base + RST_HOLD0 + cluster * 4); in dcscb_cpu_powerup()
49 writel_relaxed(rst_hold, dcscb_base + RST_HOLD0 + cluster * 4); in dcscb_cpu_powerup()
53 static int dcscb_cluster_powerup(unsigned int cluster) in dcscb_cluster_powerup() argument
57 pr_debug("%s: cluster %u\n", __func__, cluster); in dcscb_cluster_powerup()
58 if (cluster >= 2) in dcscb_cluster_powerup()
62 rst_hold = readl_relaxed(dcscb_base + RST_HOLD0 + cluster * 4); in dcscb_cluster_powerup()
64 rst_hold |= dcscb_allcpus_mask[cluster]; in dcscb_cluster_powerup()
[all …]
Dspc.c119 static inline bool cluster_is_a15(u32 cluster) in cluster_is_a15() argument
121 return cluster == info->a15_clusid; in cluster_is_a15()
158 void ve_spc_cpu_wakeup_irq(u32 cluster, u32 cpu, bool set) in ve_spc_cpu_wakeup_irq() argument
162 if (cluster >= MAX_CLUSTERS) in ve_spc_cpu_wakeup_irq()
167 if (!cluster_is_a15(cluster)) in ve_spc_cpu_wakeup_irq()
187 void ve_spc_set_resume_addr(u32 cluster, u32 cpu, u32 addr) in ve_spc_set_resume_addr() argument
191 if (cluster >= MAX_CLUSTERS) in ve_spc_set_resume_addr()
194 if (cluster_is_a15(cluster)) in ve_spc_set_resume_addr()
212 void ve_spc_powerdown(u32 cluster, bool enable) in ve_spc_powerdown() argument
216 if (cluster >= MAX_CLUSTERS) in ve_spc_powerdown()
[all …]
/kernel/linux/linux-5.10/fs/ocfs2/cluster/
Dnodemanager.c51 struct o2nm_cluster *cluster = o2nm_single_cluster; in o2nm_configured_node_map() local
53 BUG_ON(bytes < (sizeof(cluster->cl_nodes_bitmap))); in o2nm_configured_node_map()
55 if (cluster == NULL) in o2nm_configured_node_map()
58 read_lock(&cluster->cl_nodes_lock); in o2nm_configured_node_map()
59 memcpy(map, cluster->cl_nodes_bitmap, sizeof(cluster->cl_nodes_bitmap)); in o2nm_configured_node_map()
60 read_unlock(&cluster->cl_nodes_lock); in o2nm_configured_node_map()
66 static struct o2nm_node *o2nm_node_ip_tree_lookup(struct o2nm_cluster *cluster, in o2nm_node_ip_tree_lookup() argument
71 struct rb_node **p = &cluster->cl_node_ip_tree.rb_node; in o2nm_node_ip_tree_lookup()
104 struct o2nm_cluster *cluster = o2nm_single_cluster; in o2nm_get_node_by_ip() local
106 if (cluster == NULL) in o2nm_get_node_by_ip()
[all …]
/kernel/linux/linux-5.10/arch/arm/mach-exynos/
Dmcpm-exynos.c59 static int exynos_cpu_powerup(unsigned int cpu, unsigned int cluster) in exynos_cpu_powerup() argument
61 unsigned int cpunr = cpu + (cluster * EXYNOS5420_CPUS_PER_CLUSTER); in exynos_cpu_powerup()
64 pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); in exynos_cpu_powerup()
66 cluster >= EXYNOS5420_NR_CLUSTERS) in exynos_cpu_powerup()
78 if (cluster && in exynos_cpu_powerup()
79 cluster == MPIDR_AFFINITY_LEVEL(cpu_logical_map(0), 1)) { in exynos_cpu_powerup()
95 cpu, cluster); in exynos_cpu_powerup()
108 static int exynos_cluster_powerup(unsigned int cluster) in exynos_cluster_powerup() argument
110 pr_debug("%s: cluster %u\n", __func__, cluster); in exynos_cluster_powerup()
111 if (cluster >= EXYNOS5420_NR_CLUSTERS) in exynos_cluster_powerup()
[all …]
/kernel/linux/linux-5.10/arch/arm/mach-hisi/
Dplatmcpm.c71 static bool hip04_cluster_is_down(unsigned int cluster) in hip04_cluster_is_down() argument
76 if (hip04_cpu_table[cluster][i]) in hip04_cluster_is_down()
81 static void hip04_set_snoop_filter(unsigned int cluster, unsigned int on) in hip04_set_snoop_filter() argument
89 data |= 1 << cluster; in hip04_set_snoop_filter()
91 data &= ~(1 << cluster); in hip04_set_snoop_filter()
100 unsigned int mpidr, cpu, cluster; in hip04_boot_secondary() local
106 cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); in hip04_boot_secondary()
110 if (cluster >= HIP04_MAX_CLUSTERS || cpu >= HIP04_MAX_CPUS_PER_CLUSTER) in hip04_boot_secondary()
115 if (hip04_cpu_table[cluster][cpu]) in hip04_boot_secondary()
118 sys_dreq = sysctrl + SC_CPU_RESET_DREQ(cluster); in hip04_boot_secondary()
[all …]
/kernel/linux/linux-5.10/drivers/perf/
Dqcom_l2_pmu.c243 static void cluster_pmu_set_resr(struct cluster_pmu *cluster, in cluster_pmu_set_resr() argument
254 spin_lock_irqsave(&cluster->pmu_lock, flags); in cluster_pmu_set_resr()
262 spin_unlock_irqrestore(&cluster->pmu_lock, flags); in cluster_pmu_set_resr()
319 static void l2_cache_cluster_set_period(struct cluster_pmu *cluster, in l2_cache_cluster_set_period() argument
339 static int l2_cache_get_event_idx(struct cluster_pmu *cluster, in l2_cache_get_event_idx() argument
344 int num_ctrs = cluster->l2cache_pmu->num_counters - 1; in l2_cache_get_event_idx()
348 if (test_and_set_bit(l2_cycle_ctr_idx, cluster->used_counters)) in l2_cache_get_event_idx()
354 idx = find_first_zero_bit(cluster->used_counters, num_ctrs); in l2_cache_get_event_idx()
365 if (test_bit(group, cluster->used_groups)) in l2_cache_get_event_idx()
368 set_bit(idx, cluster->used_counters); in l2_cache_get_event_idx()
[all …]
/kernel/linux/linux-5.10/arch/arm/include/asm/
Dmcpm.h44 void mcpm_set_entry_vector(unsigned cpu, unsigned cluster, void *ptr);
51 void mcpm_set_early_poke(unsigned cpu, unsigned cluster,
84 int mcpm_cpu_power_up(unsigned int cpu, unsigned int cluster);
132 int mcpm_wait_for_cpu_powerdown(unsigned int cpu, unsigned int cluster);
219 int (*cpu_powerup)(unsigned int cpu, unsigned int cluster);
220 int (*cluster_powerup)(unsigned int cluster);
221 void (*cpu_suspend_prepare)(unsigned int cpu, unsigned int cluster);
222 void (*cpu_powerdown_prepare)(unsigned int cpu, unsigned int cluster);
223 void (*cluster_powerdown_prepare)(unsigned int cluster);
226 void (*cpu_is_up)(unsigned int cpu, unsigned int cluster);
[all …]
/kernel/linux/linux-5.10/arch/alpha/mm/
Dnuma.c33 for ((_cluster) = (memdesc)->cluster, (i) = 0; \
38 struct memclust_struct * cluster; in show_mem_layout() local
47 for_each_mem_cluster(memdesc, cluster, i) { in show_mem_layout()
49 i, cluster->usage, cluster->start_pfn, in show_mem_layout()
50 cluster->start_pfn + cluster->numpages); in show_mem_layout()
58 struct memclust_struct * cluster; in setup_memory_node() local
78 for_each_mem_cluster(memdesc, cluster, i) { in setup_memory_node()
82 if (cluster->usage & 3) in setup_memory_node()
85 start = cluster->start_pfn; in setup_memory_node()
86 end = start + cluster->numpages; in setup_memory_node()
[all …]
/kernel/linux/linux-5.10/Documentation/arm/
Dcluster-pm-race-avoidance.rst6 cluster setup and teardown operations and to manage hardware coherency
29 cluster-level operations are only performed when it is truly safe to do
34 are not immediately enabled when a cluster powers up. Since enabling or
38 power-down and power-up at the cluster level.
48 Each cluster and CPU is assigned a state, as follows:
67 The CPU or cluster is not coherent, and is either powered off or
71 The CPU or cluster has committed to moving to the UP state.
76 The CPU or cluster is active and coherent at the hardware
81 The CPU or cluster has committed to moving to the DOWN
89 Each cluster is also assigned a state, but it is necessary to split the
[all …]
/kernel/linux/linux-5.10/drivers/remoteproc/
Dti_k3_r5_remoteproc.c129 struct k3_r5_cluster *cluster; member
249 static int k3_r5_lockstep_reset(struct k3_r5_cluster *cluster) in k3_r5_lockstep_reset() argument
255 list_for_each_entry(core, &cluster->cores, elem) { in k3_r5_lockstep_reset()
266 list_for_each_entry(core, &cluster->cores, elem) { in k3_r5_lockstep_reset()
279 list_for_each_entry_continue_reverse(core, &cluster->cores, elem) { in k3_r5_lockstep_reset()
284 core = list_last_entry(&cluster->cores, struct k3_r5_core, elem); in k3_r5_lockstep_reset()
286 list_for_each_entry_from_reverse(core, &cluster->cores, elem) { in k3_r5_lockstep_reset()
294 static int k3_r5_lockstep_release(struct k3_r5_cluster *cluster) in k3_r5_lockstep_release() argument
300 list_for_each_entry_reverse(core, &cluster->cores, elem) { in k3_r5_lockstep_release()
312 list_for_each_entry_reverse(core, &cluster->cores, elem) { in k3_r5_lockstep_release()
[all …]
/kernel/linux/linux-5.10/drivers/cpufreq/
Dvexpress-spc-cpufreq.c47 #define ACTUAL_FREQ(cluster, freq) ((cluster == A7_CLUSTER) ? freq << 1 : freq) argument
48 #define VIRT_FREQ(cluster, freq) ((cluster == A7_CLUSTER) ? freq >> 1 : freq) argument
74 static unsigned int find_cluster_maxfreq(int cluster) in find_cluster_maxfreq() argument
82 if (cluster == per_cpu(physical_cluster, j) && in find_cluster_maxfreq()
285 u32 cluster = raw_cpu_to_cluster(cpu_dev->id); in _put_cluster_clk_and_freq_table() local
287 if (!freq_table[cluster]) in _put_cluster_clk_and_freq_table()
290 clk_put(clk[cluster]); in _put_cluster_clk_and_freq_table()
291 dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table[cluster]); in _put_cluster_clk_and_freq_table()
297 u32 cluster = cpu_to_cluster(cpu_dev->id); in put_cluster_clk_and_freq_table() local
300 if (atomic_dec_return(&cluster_usage[cluster])) in put_cluster_clk_and_freq_table()
[all …]
Dtegra186-cpufreq.c62 struct tegra186_cpufreq_cluster *cluster = &data->clusters[i]; in tegra186_cpufreq_init() local
64 cluster->info; in tegra186_cpufreq_init()
76 policy->freq_table = cluster->table; in tegra186_cpufreq_init()
113 struct tegra186_cpufreq_cluster *cluster = &data->clusters[i]; in tegra186_cpufreq_get() local
116 for (core = 0; core < ARRAY_SIZE(cluster->info->cpus); core++) { in tegra186_cpufreq_get()
117 if (cluster->info->cpus[core] != policy->cpu) in tegra186_cpufreq_get()
120 freq = (cluster->ref_clk_khz * ndiv) / cluster->div; in tegra186_cpufreq_get()
144 struct tegra186_cpufreq_cluster *cluster) in init_vhint_table() argument
163 req.cluster_id = cluster->info->bpmp_cluster_id; in init_vhint_table()
196 cluster->ref_clk_khz = data->ref_clk_hz / 1000; in init_vhint_table()
[all …]
/kernel/linux/linux-5.10/Documentation/ABI/testing/
Dsysfs-ocfs214 covers how ocfs2 uses distributed locking between cluster
18 cluster nodes can interoperate if they have an identical
34 the available plugins to support ocfs2 cluster operation.
35 A cluster plugin is required to use ocfs2 in a cluster.
38 * 'o2cb' - The classic o2cb cluster stack that ocfs2 has
40 * 'user' - A plugin supporting userspace cluster software
54 cluster plugin is currently in use by the filesystem.
62 the cluster stack in use. The contents may change
63 when all filesystems are unmounted and the cluster stack
71 of current ocfs2 cluster stack. This value is set by
[all …]
/kernel/linux/linux-5.10/arch/mips/include/asm/
Dmips-cps.h133 static inline uint64_t mips_cps_cluster_config(unsigned int cluster) in mips_cps_cluster_config() argument
143 WARN_ON(cluster != 0); in mips_cps_cluster_config()
151 mips_cm_lock_other(cluster, 0, 0, CM_GCR_Cx_OTHER_BLOCK_GLOBAL); in mips_cps_cluster_config()
166 static inline unsigned int mips_cps_numcores(unsigned int cluster) in mips_cps_numcores() argument
172 return (mips_cps_cluster_config(cluster) + 1) & CM_GCR_CONFIG_PCORES; in mips_cps_numcores()
182 static inline unsigned int mips_cps_numiocu(unsigned int cluster) in mips_cps_numiocu() argument
189 num_iocu = mips_cps_cluster_config(cluster) & CM_GCR_CONFIG_NUMIOCU; in mips_cps_numiocu()
203 static inline unsigned int mips_cps_numvps(unsigned int cluster, unsigned int core) in mips_cps_numvps() argument
214 mips_cm_lock_other(cluster, core, 0, CM_GCR_Cx_OTHER_BLOCK_LOCAL); in mips_cps_numvps()
/kernel/linux/linux-5.10/include/soc/nps/
Dcommon.h77 u32 __reserved:20, cluster:4, core:4, thread:4; member
79 u32 __reserved:24, cluster:4, core:4;
106 static inline int nps_cluster_logic_to_phys(int cluster) in nps_cluster_logic_to_phys() argument
114 : "+r"(cluster) in nps_cluster_logic_to_phys()
120 return cluster; in nps_cluster_logic_to_phys()
125 nps_cluster_logic_to_phys(gid.cluster); })
/kernel/linux/linux-5.10/fs/fat/
Dcache.c225 int fat_get_cluster(struct inode *inode, int cluster, int *fclus, int *dclus) in fat_get_cluster() argument
244 if (cluster == 0) in fat_get_cluster()
247 if (fat_cache_lookup(inode, cluster, &cid, fclus, dclus) < 0) { in fat_get_cluster()
256 while (*fclus < cluster) { in fat_get_cluster()
291 static int fat_bmap_cluster(struct inode *inode, int cluster) in fat_bmap_cluster() argument
299 ret = fat_get_cluster(inode, cluster, &fclus, &dclus); in fat_bmap_cluster()
316 int cluster, offset; in fat_get_mapped_cluster() local
318 cluster = sector >> (sbi->cluster_bits - sb->s_blocksize_bits); in fat_get_mapped_cluster()
320 cluster = fat_bmap_cluster(inode, cluster); in fat_get_mapped_cluster()
321 if (cluster < 0) in fat_get_mapped_cluster()
[all …]
/kernel/linux/linux-5.10/drivers/clocksource/
Dtimer-nps.c82 int cluster = raw_smp_processor_id() >> NPS_CLUSTER_OFFSET; in nps_clksrc_read() local
84 return (u64)ioread32be(nps_msu_reg_low_addr[cluster]); in nps_clksrc_read()
89 int ret, cluster; in nps_setup_clocksource() local
94 for (cluster = 0; cluster < NPS_CLUSTER_NUM; cluster++) in nps_setup_clocksource()
95 nps_msu_reg_low_addr[cluster] = in nps_setup_clocksource()
96 nps_host_reg((cluster << NPS_CLUSTER_OFFSET), in nps_setup_clocksource()
/kernel/linux/linux-5.10/arch/arm/mach-milbeaut/
Dplatsmp.c25 unsigned int mpidr, cpu, cluster; in m10v_boot_secondary() local
32 cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); in m10v_boot_secondary()
38 __func__, cpu, l_cpu, cluster); in m10v_boot_secondary()
48 unsigned int mpidr, cpu, cluster; in m10v_smp_init() local
61 cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); in m10v_smp_init()
62 pr_info("MCPM boot on cpu_%u cluster_%u\n", cpu, cluster); in m10v_smp_init()

123456789