Home
last modified time | relevance | path

Searched full:cluster (Results 1 – 25 of 615) sorted by relevance

12345678910>>...25

/kernel/linux/linux-5.10/kernel/sched/
Dcore_ctl.c60 struct cluster_data *cluster; member
69 #define for_each_cluster(cluster, idx) \ argument
70 for (; (idx) < num_clusters && ((cluster) = &cluster_state[idx]);\
81 static unsigned int get_active_cpu_count(const struct cluster_data *cluster);
161 struct cluster_data *cluster; in show_global_state() local
168 cluster = c->cluster; in show_global_state()
169 if (!cluster || !cluster->inited) in show_global_state()
184 cluster->first_cpu); in show_global_state()
193 "\tNr running: %u\n", cluster->nrrun); in show_global_state()
195 "\tActive CPUs: %u\n", get_active_cpu_count(cluster)); in show_global_state()
[all …]
Dwalt.c405 struct sched_cluster *cluster = cpu_cluster(cpu); in update_cluster_load_subtractions() local
406 struct cpumask cluster_cpus = cluster->cpus; in update_cluster_load_subtractions()
411 raw_spin_lock(&cluster->load_lock); in update_cluster_load_subtractions()
432 raw_spin_unlock(&cluster->load_lock); in update_cluster_load_subtractions()
531 * even for intra cluster migrations. This is because, the aggregated in fixup_busy_time()
1326 insert_cluster(struct sched_cluster *cluster, struct list_head *head) in insert_cluster() argument
1332 if (cluster->max_power_cost < tmp->max_power_cost) in insert_cluster()
1337 list_add(&cluster->list, iter); in insert_cluster()
1342 struct sched_cluster *cluster = NULL; in alloc_new_cluster() local
1344 cluster = kzalloc(sizeof(struct sched_cluster), GFP_ATOMIC); in alloc_new_cluster()
[all …]
/kernel/linux/linux-5.10/arch/arm/common/
Dmcpm_entry.c3 * arch/arm/common/mcpm_entry.c -- entry point for multi-cluster PM
24 * see Documentation/arm/cluster-pm-race-avoidance.rst.
34 static void __mcpm_cpu_going_down(unsigned int cpu, unsigned int cluster) in __mcpm_cpu_going_down() argument
36 mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_GOING_DOWN; in __mcpm_cpu_going_down()
37 sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu); in __mcpm_cpu_going_down()
42 * cluster can be torn down without disrupting this CPU.
47 static void __mcpm_cpu_down(unsigned int cpu, unsigned int cluster) in __mcpm_cpu_down() argument
50 mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_DOWN; in __mcpm_cpu_down()
51 sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu); in __mcpm_cpu_down()
56 * __mcpm_outbound_leave_critical: Leave the cluster teardown critical section.
[all …]
Dmcpm_head.S3 * arch/arm/common/mcpm_head.S -- kernel entry point for multi-cluster PM
8 * Refer to Documentation/arm/cluster-pm-race-avoidance.rst
26 1903: .asciz " cluster"
54 ubfx r10, r0, #8, #8 @ r10 = cluster
86 mla r8, r0, r10, r8 @ r8 = sync cluster base
94 @ At this point, the cluster cannot unexpectedly enter the GOING_DOWN
98 mla r11, r0, r10, r11 @ r11 = cluster first man lock
104 bne mcpm_setup_wait @ wait for cluster setup if so
107 cmp r0, #CLUSTER_UP @ cluster already up?
108 bne mcpm_setup @ if not, set up the cluster
[all …]
DbL_switcher.c3 * arch/arm/common/bL_switcher.c -- big.LITTLE cluster switcher core driver
118 * with the cluster number.
141 * bL_switch_to - Switch to a specific cluster for the current CPU
142 * @new_cluster_id: the ID of the cluster to switch to.
273 int cluster; in bL_switcher_thread() local
288 cluster = t->wanted_cluster; in bL_switcher_thread()
295 if (cluster != -1) { in bL_switcher_thread()
296 bL_switch_to(cluster); in bL_switcher_thread()
321 * bL_switch_request_cb - Switch to a specific cluster for the given CPU,
325 * @new_cluster_id: the ID of the cluster to switch to.
[all …]
/kernel/linux/linux-5.10/arch/arm/mach-sunxi/
Dmc_smp.c11 * Cluster cache enable trampoline code adapted from MCPM framework
88 static bool sunxi_core_is_cortex_a15(unsigned int core, unsigned int cluster) in sunxi_core_is_cortex_a15() argument
91 int cpu = cluster * SUNXI_CPUS_PER_CLUSTER + core; in sunxi_core_is_cortex_a15()
103 * would be mid way in a core or cluster power sequence. in sunxi_core_is_cortex_a15()
105 pr_err("%s: Couldn't get CPU cluster %u core %u device node\n", in sunxi_core_is_cortex_a15()
106 __func__, cluster, core); in sunxi_core_is_cortex_a15()
116 static int sunxi_cpu_power_switch_set(unsigned int cpu, unsigned int cluster, in sunxi_cpu_power_switch_set() argument
122 reg = readl(prcm_base + PRCM_PWR_SWITCH_REG(cluster, cpu)); in sunxi_cpu_power_switch_set()
125 pr_debug("power clamp for cluster %u cpu %u already open\n", in sunxi_cpu_power_switch_set()
126 cluster, cpu); in sunxi_cpu_power_switch_set()
[all …]
/kernel/linux/linux-5.10/arch/arm/mach-vexpress/
Dtc2_pm.c48 static int tc2_pm_cpu_powerup(unsigned int cpu, unsigned int cluster) in tc2_pm_cpu_powerup() argument
50 pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); in tc2_pm_cpu_powerup()
51 if (cluster >= TC2_CLUSTERS || cpu >= tc2_nr_cpus[cluster]) in tc2_pm_cpu_powerup()
53 ve_spc_set_resume_addr(cluster, cpu, in tc2_pm_cpu_powerup()
55 ve_spc_cpu_wakeup_irq(cluster, cpu, true); in tc2_pm_cpu_powerup()
59 static int tc2_pm_cluster_powerup(unsigned int cluster) in tc2_pm_cluster_powerup() argument
61 pr_debug("%s: cluster %u\n", __func__, cluster); in tc2_pm_cluster_powerup()
62 if (cluster >= TC2_CLUSTERS) in tc2_pm_cluster_powerup()
64 ve_spc_powerdown(cluster, false); in tc2_pm_cluster_powerup()
68 static void tc2_pm_cpu_powerdown_prepare(unsigned int cpu, unsigned int cluster) in tc2_pm_cpu_powerdown_prepare() argument
[all …]
Ddcscb.c3 * arch/arm/mach-vexpress/dcscb.c - Dual Cluster System Configuration Block
39 static int dcscb_cpu_powerup(unsigned int cpu, unsigned int cluster) in dcscb_cpu_powerup() argument
43 pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); in dcscb_cpu_powerup()
44 if (cluster >= 2 || !(cpumask & dcscb_allcpus_mask[cluster])) in dcscb_cpu_powerup()
47 rst_hold = readl_relaxed(dcscb_base + RST_HOLD0 + cluster * 4); in dcscb_cpu_powerup()
49 writel_relaxed(rst_hold, dcscb_base + RST_HOLD0 + cluster * 4); in dcscb_cpu_powerup()
53 static int dcscb_cluster_powerup(unsigned int cluster) in dcscb_cluster_powerup() argument
57 pr_debug("%s: cluster %u\n", __func__, cluster); in dcscb_cluster_powerup()
58 if (cluster >= 2) in dcscb_cluster_powerup()
61 /* remove cluster reset and add individual CPU's reset */ in dcscb_cluster_powerup()
[all …]
Dspc.c58 /* SPC CPU/cluster reset statue */
79 /* TC2 static dual-cluster configuration */
105 * A15s cluster identifier
119 static inline bool cluster_is_a15(u32 cluster) in cluster_is_a15() argument
121 return cluster == info->a15_clusid; in cluster_is_a15()
154 * @cluster: mpidr[15:8] bitfield describing cluster affinity level
158 void ve_spc_cpu_wakeup_irq(u32 cluster, u32 cpu, bool set) in ve_spc_cpu_wakeup_irq() argument
162 if (cluster >= MAX_CLUSTERS) in ve_spc_cpu_wakeup_irq()
167 if (!cluster_is_a15(cluster)) in ve_spc_cpu_wakeup_irq()
183 * @cluster: mpidr[15:8] bitfield describing cluster affinity level
[all …]
/kernel/linux/linux-5.10/Documentation/arm/
Dcluster-pm-race-avoidance.rst2 Cluster-wide Power-up/power-down race avoidance algorithm
6 cluster setup and teardown operations and to manage hardware coherency
29 cluster-level operations are only performed when it is truly safe to do
34 are not immediately enabled when a cluster powers up. Since enabling or
38 power-down and power-up at the cluster level.
48 Each cluster and CPU is assigned a state, as follows:
67 The CPU or cluster is not coherent, and is either powered off or
71 The CPU or cluster has committed to moving to the UP state.
76 The CPU or cluster is active and coherent at the hardware
81 The CPU or cluster has committed to moving to the DOWN
[all …]
/kernel/linux/linux-5.10/fs/ocfs2/cluster/
Dnodemanager.c20 * cluster active at a time. Changing this will require trickling
21 * cluster references throughout where nodes are looked up */
51 struct o2nm_cluster *cluster = o2nm_single_cluster; in o2nm_configured_node_map() local
53 BUG_ON(bytes < (sizeof(cluster->cl_nodes_bitmap))); in o2nm_configured_node_map()
55 if (cluster == NULL) in o2nm_configured_node_map()
58 read_lock(&cluster->cl_nodes_lock); in o2nm_configured_node_map()
59 memcpy(map, cluster->cl_nodes_bitmap, sizeof(cluster->cl_nodes_bitmap)); in o2nm_configured_node_map()
60 read_unlock(&cluster->cl_nodes_lock); in o2nm_configured_node_map()
66 static struct o2nm_node *o2nm_node_ip_tree_lookup(struct o2nm_cluster *cluster, in o2nm_node_ip_tree_lookup() argument
71 struct rb_node **p = &cluster->cl_node_ip_tree.rb_node; in o2nm_node_ip_tree_lookup()
[all …]
/kernel/linux/linux-5.10/drivers/perf/
Dqcom_l2_pmu.c121 * The cache is made up of one or more clusters, each cluster has its own PMU.
122 * Each cluster is associated with one or more CPUs.
143 /* The CPU that is used for collecting events on this cluster */
145 /* All the CPUs associated with this cluster */
243 static void cluster_pmu_set_resr(struct cluster_pmu *cluster, in cluster_pmu_set_resr() argument
254 spin_lock_irqsave(&cluster->pmu_lock, flags); in cluster_pmu_set_resr()
262 spin_unlock_irqrestore(&cluster->pmu_lock, flags); in cluster_pmu_set_resr()
268 * all CPUS, subunits and ID independent events in this cluster.
319 static void l2_cache_cluster_set_period(struct cluster_pmu *cluster, in l2_cache_cluster_set_period() argument
339 static int l2_cache_get_event_idx(struct cluster_pmu *cluster, in l2_cache_get_event_idx() argument
[all …]
/kernel/linux/linux-5.10/arch/arm/mach-exynos/
Dmcpm-exynos.c59 static int exynos_cpu_powerup(unsigned int cpu, unsigned int cluster) in exynos_cpu_powerup() argument
61 unsigned int cpunr = cpu + (cluster * EXYNOS5420_CPUS_PER_CLUSTER); in exynos_cpu_powerup()
64 pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); in exynos_cpu_powerup()
66 cluster >= EXYNOS5420_NR_CLUSTERS) in exynos_cpu_powerup()
73 * This assumes the cluster number of the big cores(Cortex A15) in exynos_cpu_powerup()
78 if (cluster && in exynos_cpu_powerup()
79 cluster == MPIDR_AFFINITY_LEVEL(cpu_logical_map(0), 1)) { in exynos_cpu_powerup()
94 pr_err("cpu %u cluster %u powerup failed\n", in exynos_cpu_powerup()
95 cpu, cluster); in exynos_cpu_powerup()
108 static int exynos_cluster_powerup(unsigned int cluster) in exynos_cluster_powerup() argument
[all …]
/kernel/linux/linux-5.10/arch/arm/include/asm/
Dmcpm.h13 * Maximum number of possible clusters / CPUs per cluster.
39 * This is used to indicate where the given CPU from given cluster should
44 void mcpm_set_entry_vector(unsigned cpu, unsigned cluster, void *ptr);
51 void mcpm_set_early_poke(unsigned cpu, unsigned cluster,
55 * CPU/cluster power operations API for higher subsystems to use.
66 * mcpm_cpu_power_up - make given CPU in given cluster runable
68 * @cpu: CPU number within given cluster
69 * @cluster: cluster number for the CPU
71 * The identified CPU is brought out of reset. If the cluster was powered
73 * in the cluster run, and ensuring appropriate cluster setup.
[all …]
/kernel/linux/linux-5.10/arch/arm/mach-hisi/
Dplatmcpm.c71 static bool hip04_cluster_is_down(unsigned int cluster) in hip04_cluster_is_down() argument
76 if (hip04_cpu_table[cluster][i]) in hip04_cluster_is_down()
81 static void hip04_set_snoop_filter(unsigned int cluster, unsigned int on) in hip04_set_snoop_filter() argument
89 data |= 1 << cluster; in hip04_set_snoop_filter()
91 data &= ~(1 << cluster); in hip04_set_snoop_filter()
100 unsigned int mpidr, cpu, cluster; in hip04_boot_secondary() local
106 cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); in hip04_boot_secondary()
110 if (cluster >= HIP04_MAX_CLUSTERS || cpu >= HIP04_MAX_CPUS_PER_CLUSTER) in hip04_boot_secondary()
115 if (hip04_cpu_table[cluster][cpu]) in hip04_boot_secondary()
118 sys_dreq = sysctrl + SC_CPU_RESET_DREQ(cluster); in hip04_boot_secondary()
[all …]
/kernel/linux/linux-5.10/Documentation/ABI/testing/
Dsysfs-ocfs214 covers how ocfs2 uses distributed locking between cluster
18 cluster nodes can interoperate if they have an identical
34 the available plugins to support ocfs2 cluster operation.
35 A cluster plugin is required to use ocfs2 in a cluster.
38 * 'o2cb' - The classic o2cb cluster stack that ocfs2 has
40 * 'user' - A plugin supporting userspace cluster software
54 cluster plugin is currently in use by the filesystem.
62 the cluster stack in use. The contents may change
63 when all filesystems are unmounted and the cluster stack
71 of current ocfs2 cluster stack. This value is set by
[all …]
/kernel/linux/linux-5.10/arch/mips/include/asm/
Dmips-cps.h126 * mips_cps_cluster_config - return (GCR|CPC)_CONFIG from a cluster
127 * @cluster: the ID of the cluster whose config we want
129 * Read the value of GCR_CONFIG (or its CPC_CONFIG mirror) from a @cluster.
133 static inline uint64_t mips_cps_cluster_config(unsigned int cluster) in mips_cps_cluster_config() argument
141 * within this cluster. in mips_cps_cluster_config()
143 WARN_ON(cluster != 0); in mips_cps_cluster_config()
151 mips_cm_lock_other(cluster, 0, 0, CM_GCR_Cx_OTHER_BLOCK_GLOBAL); in mips_cps_cluster_config()
160 * mips_cps_numcores - return the number of cores present in a cluster
161 * @cluster: the ID of the cluster whose core count we want
166 static inline unsigned int mips_cps_numcores(unsigned int cluster) in mips_cps_numcores() argument
[all …]
/kernel/linux/linux-5.10/fs/erofs/
Derofs_fs.h203 * e.g. for 4k logical cluster size, 4B if compacted 2B is off;
214 * bit 0-3 : algorithm type of head 1 (logical cluster type 01);
215 * bit 4-7 : algorithm type of head 2 (logical cluster type 11).
219 * bit 0-2 : logical cluster bits - 12, e.g. 0 for 4096;
220 * bit 3-4 : (physical - logical) cluster bits of head 1:
222 * bit 5-7 : (physical - logical) cluster bits of head 2.
230 * Fixed-sized output compression ondisk Logical Extent cluster type:
231 * 0 - literal (uncompressed) cluster
232 * 1 - compressed cluster (for the head logical cluster)
233 * 2 - compressed cluster (for the other logical clusters)
[all …]
/kernel/linux/linux-5.10/drivers/cpufreq/
Dtegra186-cpufreq.c28 /* Denver cluster */
34 /* A57 cluster */
62 struct tegra186_cpufreq_cluster *cluster = &data->clusters[i]; in tegra186_cpufreq_init() local
64 cluster->info; in tegra186_cpufreq_init()
76 policy->freq_table = cluster->table; in tegra186_cpufreq_init()
113 struct tegra186_cpufreq_cluster *cluster = &data->clusters[i]; in tegra186_cpufreq_get() local
116 for (core = 0; core < ARRAY_SIZE(cluster->info->cpus); core++) { in tegra186_cpufreq_get()
117 if (cluster->info->cpus[core] != policy->cpu) in tegra186_cpufreq_get()
120 freq = (cluster->ref_clk_khz * ndiv) / cluster->div; in tegra186_cpufreq_get()
144 struct tegra186_cpufreq_cluster *cluster) in init_vhint_table() argument
[all …]
Dvexpress-spc-cpufreq.c47 #define ACTUAL_FREQ(cluster, freq) ((cluster == A7_CLUSTER) ? freq << 1 : freq) argument
48 #define VIRT_FREQ(cluster, freq) ((cluster == A7_CLUSTER) ? freq >> 1 : freq) argument
74 static unsigned int find_cluster_maxfreq(int cluster) in find_cluster_maxfreq() argument
82 if (cluster == per_cpu(physical_cluster, j) && in find_cluster_maxfreq()
157 /* Recalc freq for old cluster when switching clusters */ in ve_spc_cpufreq_set_rate()
159 /* Switch cluster */ in ve_spc_cpufreq_set_rate()
164 /* Set freq of old cluster if there are cpus left on it */ in ve_spc_cpufreq_set_rate()
170 pr_err("%s: clk_set_rate failed: %d, old cluster: %d\n", in ve_spc_cpufreq_set_rate()
285 u32 cluster = raw_cpu_to_cluster(cpu_dev->id); in _put_cluster_clk_and_freq_table() local
287 if (!freq_table[cluster]) in _put_cluster_clk_and_freq_table()
[all …]
/kernel/linux/linux-5.10/drivers/clk/mvebu/
Dap-cpu-clk.c128 * struct ap806_clk: CPU cluster clock controller instance
129 * @cluster: Cluster clock controller index
130 * @clk_name: Cluster clock controller name
131 * @dev : Cluster clock device
132 * @hw: HW specific structure of Cluster clock controller
136 unsigned int cluster; member
152 (clk->cluster * clk->pll_regs->cluster_offset); in ap_cpu_clk_recalc_rate()
168 (clk->cluster * clk->pll_regs->cluster_offset); in ap_cpu_clk_set_rate()
170 (clk->cluster * clk->pll_regs->cluster_offset); in ap_cpu_clk_set_rate()
172 (clk->cluster * clk->pll_regs->cluster_offset); in ap_cpu_clk_set_rate()
[all …]
/kernel/linux/linux-5.10/drivers/remoteproc/
Dti_k3_r5_remoteproc.c71 * struct k3_r5_cluster - K3 R5F Cluster structure
73 * @mode: Mode to configure the Cluster - Split or LockStep
74 * @cores: list of R5 cores within the cluster
119 * @cluster: cached pointer to parent cluster structure
129 struct k3_r5_cluster *cluster; member
249 static int k3_r5_lockstep_reset(struct k3_r5_cluster *cluster) in k3_r5_lockstep_reset() argument
255 list_for_each_entry(core, &cluster->cores, elem) { in k3_r5_lockstep_reset()
266 list_for_each_entry(core, &cluster->cores, elem) { in k3_r5_lockstep_reset()
279 list_for_each_entry_continue_reverse(core, &cluster->cores, elem) { in k3_r5_lockstep_reset()
284 core = list_last_entry(&cluster->cores, struct k3_r5_core, elem); in k3_r5_lockstep_reset()
[all …]
/kernel/linux/linux-5.10/arch/alpha/mm/
Dnuma.c33 for ((_cluster) = (memdesc)->cluster, (i) = 0; \
38 struct memclust_struct * cluster; in show_mem_layout() local
47 for_each_mem_cluster(memdesc, cluster, i) { in show_mem_layout()
49 i, cluster->usage, cluster->start_pfn, in show_mem_layout()
50 cluster->start_pfn + cluster->numpages); in show_mem_layout()
58 struct memclust_struct * cluster; in setup_memory_node() local
78 for_each_mem_cluster(memdesc, cluster, i) { in setup_memory_node()
82 if (cluster->usage & 3) in setup_memory_node()
85 start = cluster->start_pfn; in setup_memory_node()
86 end = start + cluster->numpages; in setup_memory_node()
[all …]
/kernel/linux/linux-5.10/drivers/cpuidle/
Dcpuidle-big_little.c32 * cluster state since, when all CPUs in a cluster hit it, the cluster
35 * There is no notion of cluster states in the menu governor, so CPUs
36 * have to define CPU states where possibly the cluster will be shutdown
38 * at random times; however the cluster state provides target_residency
39 * values as if all CPUs in a cluster enter the state at once; this is
46 * current cluster operating point. It is the time it takes to get the CPU
47 * up and running when the CPU is powered up on cluster wake-up from shutdown.
51 * target_residency: it is the minimum amount of time the cluster has
52 * to be down to break even in terms of power consumption. cluster
69 .desc = "ARM little-cluster power down",
[all …]
/kernel/linux/linux-5.10/fs/fat/
Dcache.c7 * Mar 1999. AV. Changed cache, so that it uses the starting cluster instead
21 int fcluster; /* cluster number in the file. */
22 int dcluster; /* cluster number on disk. */
123 /* Find the same part as "new" in cluster-chain. */ in fat_cache_merge()
225 int fat_get_cluster(struct inode *inode, int cluster, int *fclus, int *dclus) in fat_get_cluster() argument
240 "%s: invalid start cluster (i_pos %lld, start %08x)", in fat_get_cluster()
244 if (cluster == 0) in fat_get_cluster()
247 if (fat_cache_lookup(inode, cluster, &cid, fclus, dclus) < 0) { in fat_get_cluster()
256 while (*fclus < cluster) { in fat_get_cluster()
257 /* prevent the infinite loop of cluster chain */ in fat_get_cluster()
[all …]

12345678910>>...25