Home
last modified time | relevance | path

Searched +full:cluster +full:- +full:index (Results 1 – 25 of 431) sorted by relevance

12345678910>>...18

/kernel/linux/linux-5.10/kernel/sched/
Dcore_ctl.c1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2014-2021, The Linux Foundation. All rights reserved.
60 struct cluster_data *cluster; member
69 #define for_each_cluster(cluster, idx) \ argument
70 for (; (idx) < num_clusters && ((cluster) = &cluster_state[idx]);\
81 static unsigned int get_active_cpu_count(const struct cluster_data *cluster);
91 return -EINVAL; in store_min_cpus()
93 state->min_cpus = min(val, state->max_cpus); in store_min_cpus()
101 return sysfs_emit(buf, "%u\n", state->min_cpus); in show_min_cpus()
110 return -EINVAL; in store_max_cpus()
[all …]
Dwalt.c1 // SPDX-License-Identifier: GPL-2.0
90 raw_spin_lock(&cpu_rq(cpu)->lock); in acquire_rq_locks_irqsave()
92 raw_spin_lock_nested(&cpu_rq(cpu)->lock, level); in acquire_rq_locks_irqsave()
103 raw_spin_unlock(&cpu_rq(cpu)->lock); in release_rq_locks_irqrestore()
121 /* 1 -> use PELT based load stats, 0 -> use window-based load stats */
151 * A after-boot constant divisor for cpu_util_freq_walt() to apply the load
174 unsigned int max_capacity = 1024; /* max(rq->capacity) */
175 unsigned int min_capacity = 1024; /* min(rq->capacity) */
176 unsigned int max_possible_capacity = 1024; /* max(rq->max_possible_capacity) */
178 min_max_possible_capacity = 1024; /* min(rq->max_possible_capacity) */
[all …]
/kernel/linux/linux-6.6/kernel/sched/
Dcore_ctl.c1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2014-2021, The Linux Foundation. All rights reserved.
60 struct cluster_data *cluster; member
69 #define for_each_cluster(cluster, idx) \ argument
70 for (; (idx) < num_clusters && ((cluster) = &cluster_state[idx]);\
81 static unsigned int get_active_cpu_count(const struct cluster_data *cluster);
91 return -EINVAL; in store_min_cpus()
93 state->min_cpus = min(val, state->max_cpus); in store_min_cpus()
101 return sysfs_emit(buf, "%u\n", state->min_cpus); in show_min_cpus()
110 return -EINVAL; in store_max_cpus()
[all …]
Dwalt.c1 // SPDX-License-Identifier: GPL-2.0
91 raw_spin_lock(&cpu_rq(cpu)->__lock); in acquire_rq_locks_irqsave()
93 raw_spin_lock_nested(&cpu_rq(cpu)->__lock, level); in acquire_rq_locks_irqsave()
104 raw_spin_unlock(&cpu_rq(cpu)->__lock); in release_rq_locks_irqrestore()
122 /* 1 -> use PELT based load stats, 0 -> use window-based load stats */
152 * A after-boot constant divisor for cpu_util_freq_walt() to apply the load
175 unsigned int max_capacity = 1024; /* max(rq->capacity) */
176 unsigned int min_capacity = 1024; /* min(rq->capacity) */
177 unsigned int max_possible_capacity = 1024; /* max(rq->max_possible_capacity) */
179 min_max_possible_capacity = 1024; /* min(rq->max_possible_capacity) */
[all …]
/kernel/linux/linux-5.10/drivers/cpuidle/
Dcpuidle-big_little.c1 // SPDX-License-Identifier: GPL-2.0-only
31 * index 1 states have to define exit_latency and target_residency for
32 * cluster state since, when all CPUs in a cluster hit it, the cluster
35 * There is no notion of cluster states in the menu governor, so CPUs
36 * have to define CPU states where possibly the cluster will be shutdown
38 * at random times; however the cluster state provides target_residency
39 * values as if all CPUs in a cluster enter the state at once; this is
41 * or in the MCPM back-ends.
46 * current cluster operating point. It is the time it takes to get the CPU
47 * up and running when the CPU is powered up on cluster wake-up from shutdown.
[all …]
Dcpuidle-tegra.c1 // SPDX-License-Identifier: GPL-2.0-only
5 * Copyright (c) 2010-2013, NVIDIA Corporation.
15 #define pr_fmt(fmt) "tegra-cpuidle: " fmt
53 return firmware_ops->prepare_idle && firmware_ops->do_idle; in tegra_cpuidle_using_firmware()
73 while (retries--) { in tegra_cpuidle_wait_for_secondary_cpus_parking()
79 * shutdown in order to power-off CPU's cluster safely. in tegra_cpuidle_wait_for_secondary_cpus_parking()
81 * it takes about 40-150us in average and over 1000us in in tegra_cpuidle_wait_for_secondary_cpus_parking()
90 } while (timeout_us--); in tegra_cpuidle_wait_for_secondary_cpus_parking()
99 return -ETIMEDOUT; in tegra_cpuidle_wait_for_secondary_cpus_parking()
139 if (err && err != -ENOSYS) in tegra_cpuidle_c7_enter()
[all …]
/kernel/linux/linux-6.6/drivers/cpuidle/
Dcpuidle-big_little.c1 // SPDX-License-Identifier: GPL-2.0-only
31 * index 1 states have to define exit_latency and target_residency for
32 * cluster state since, when all CPUs in a cluster hit it, the cluster
35 * There is no notion of cluster states in the menu governor, so CPUs
36 * have to define CPU states where possibly the cluster will be shutdown
38 * at random times; however the cluster state provides target_residency
39 * values as if all CPUs in a cluster enter the state at once; this is
41 * or in the MCPM back-ends.
46 * current cluster operating point. It is the time it takes to get the CPU
47 * up and running when the CPU is powered up on cluster wake-up from shutdown.
[all …]
Dcpuidle-tegra.c1 // SPDX-License-Identifier: GPL-2.0-only
5 * Copyright (c) 2010-2013, NVIDIA Corporation.
15 #define pr_fmt(fmt) "tegra-cpuidle: " fmt
68 while (retries--) { in tegra_cpuidle_wait_for_secondary_cpus_parking()
74 * shutdown in order to power-off CPU's cluster safely. in tegra_cpuidle_wait_for_secondary_cpus_parking()
76 * it takes about 40-150us in average and over 1000us in in tegra_cpuidle_wait_for_secondary_cpus_parking()
85 } while (timeout_us--); in tegra_cpuidle_wait_for_secondary_cpus_parking()
94 return -ETIMEDOUT; in tegra_cpuidle_wait_for_secondary_cpus_parking()
134 if (err && err != -ENOSYS) in tegra_cpuidle_c7_enter()
146 * pending SGI state across CPU cluster PM. Abort and retry in tegra_cpuidle_coupled_barrier()
[all …]
/kernel/linux/linux-5.10/drivers/cpufreq/
Dtegra186-cpufreq.c1 // SPDX-License-Identifier: GPL-2.0-only
7 #include <linux/dma-mapping.h>
13 #include <soc/tegra/bpmp-abi.h>
26 #define NO_CPU -1
28 /* Denver cluster */
34 /* A57 cluster */
61 for (i = 0; i < data->num_clusters; i++) { in tegra186_cpufreq_init()
62 struct tegra186_cpufreq_cluster *cluster = &data->clusters[i]; in tegra186_cpufreq_init() local
64 cluster->info; in tegra186_cpufreq_init()
67 for (core = 0; core < ARRAY_SIZE(info->cpus); core++) { in tegra186_cpufreq_init()
[all …]
Dtegra194-cpufreq.c1 // SPDX-License-Identifier: GPL-2.0
9 #include <linux/dma-mapping.h>
19 #include <soc/tegra/bpmp-abi.h>
31 enum cluster { enum
59 static void get_cpu_cluster(void *cluster) in get_cpu_cluster() argument
63 *((uint32_t *)cluster) = MPIDR_AFFINITY_LEVEL(mpidr, 1); in get_cpu_cluster()
67 * Read per-core Read-only system register NVFREQ_FEEDBACK_EL1.
87 return nltbl->ref_clk_hz / KHZ * ndiv / (nltbl->pdiv * nltbl->mdiv); in map_ndiv_to_freq()
104 * freq of cluster. Assuming max cluster clock ~2000MHz, in tegra_read_counters()
110 c = &read_counters_work->c; in tegra_read_counters()
[all …]
Dvexpress-spc-cpufreq.c1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (C) 2013 - 2019 ARM Ltd.
47 #define ACTUAL_FREQ(cluster, freq) ((cluster == A7_CLUSTER) ? freq << 1 : freq) argument
48 #define VIRT_FREQ(cluster, freq) ((cluster == A7_CLUSTER) ? freq >> 1 : freq) argument
74 static unsigned int find_cluster_maxfreq(int cluster) in find_cluster_maxfreq() argument
82 if (cluster == per_cpu(physical_cluster, j) && in find_cluster_maxfreq()
141 ret = -EIO; in ve_spc_cpufreq_set_rate()
157 /* Recalc freq for old cluster when switching clusters */ in ve_spc_cpufreq_set_rate()
159 /* Switch cluster */ in ve_spc_cpufreq_set_rate()
164 /* Set freq of old cluster if there are cpus left on it */ in ve_spc_cpufreq_set_rate()
[all …]
/kernel/linux/linux-6.6/drivers/cpufreq/
Dtegra186-cpufreq.c1 // SPDX-License-Identifier: GPL-2.0-only
7 #include <linux/dma-mapping.h>
13 #include <soc/tegra/bpmp-abi.h>
28 /* CPU0 - A57 Cluster */
33 /* CPU1 - Denver Cluster */
38 /* CPU2 - Denver Cluster */
43 /* CPU3 - A57 Cluster */
48 /* CPU4 - A57 Cluster */
53 /* CPU5 - A57 Cluster */
75 unsigned int cluster = data->cpus[policy->cpu].bpmp_cluster_id; in tegra186_cpufreq_init() local
[all …]
Dvexpress-spc-cpufreq.c1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (C) 2013 - 2019 ARM Ltd.
45 #define ACTUAL_FREQ(cluster, freq) ((cluster == A7_CLUSTER) ? freq << 1 : freq) argument
46 #define VIRT_FREQ(cluster, freq) ((cluster == A7_CLUSTER) ? freq >> 1 : freq) argument
71 static unsigned int find_cluster_maxfreq(int cluster) in find_cluster_maxfreq() argument
79 if (cluster == per_cpu(physical_cluster, j) && in find_cluster_maxfreq()
138 ret = -EIO; in ve_spc_cpufreq_set_rate()
154 /* Recalc freq for old cluster when switching clusters */ in ve_spc_cpufreq_set_rate()
156 /* Switch cluster */ in ve_spc_cpufreq_set_rate()
161 /* Set freq of old cluster if there are cpus left on it */ in ve_spc_cpufreq_set_rate()
[all …]
/kernel/linux/linux-6.6/Documentation/devicetree/bindings/watchdog/
Dsamsung-wdt.yaml1 # SPDX-License-Identifier: GPL-2.0
3 ---
4 $id: http://devicetree.org/schemas/watchdog/samsung-wdt.yaml#
5 $schema: http://devicetree.org/meta-schemas/core.yaml#
10 - Krzysztof Kozlowski <krzk@kernel.org>
20 - samsung,s3c2410-wdt # for S3C2410
21 - samsung,s3c6410-wdt # for S3C6410, S5PV210 and Exynos4
22 - samsung,exynos5250-wdt # for Exynos5250
23 - samsung,exynos5420-wdt # for Exynos5420
24 - samsung,exynos7-wdt # for Exynos7
[all …]
/kernel/linux/linux-5.10/include/linux/
Dswap.h1 /* SPDX-License-Identifier: GPL-2.0 */
14 #include <linux/page-flags.h>
27 #define SWAP_FLAG_DISCARD_ONCE 0x20000 /* discard swap area at swapon-time */
28 #define SWAP_FLAG_DISCARD_PAGES 0x40000 /* discard page-clusters after use */
37 return current->flags & PF_KSWAPD; in current_is_kswapd()
45 * on 32-bit-pgoff_t architectures. And that assumes that the architecture packs
95 ((1 << MAX_SWAPFILES_SHIFT) - SWP_DEVICE_NUM - \
96 SWP_MIGRATION_NUM - SWP_HWPOISON_NUM)
101 * swap area format, the second part of the union adds - in the
102 * old reserved area - some extra information. Note that the first
[all …]
/kernel/linux/linux-6.6/include/linux/
Dswap.h1 /* SPDX-License-Identifier: GPL-2.0 */
15 #include <linux/page-flags.h>
29 #define SWAP_FLAG_DISCARD_ONCE 0x20000 /* discard swap area at swapon-time */
30 #define SWAP_FLAG_DISCARD_PAGES 0x40000 /* discard page-clusters after use */
39 return current->flags & PF_KSWAPD; in current_is_kswapd()
47 * on 32-bit-pgoff_t architectures. And that assumes that the architecture packs
118 ((1 << MAX_SWAPFILES_SHIFT) - SWP_DEVICE_NUM - \
119 SWP_MIGRATION_NUM - SWP_HWPOISON_NUM - \
125 * swap area format, the second part of the union adds - in the
126 * old reserved area - some extra information. Note that the first
[all …]
/kernel/linux/linux-6.6/fs/btrfs/
Dfree-space-cache.c1 // SPDX-License-Identifier: GPL-2.0
12 #include <linux/error-injection.h>
18 #include "free-space-cache.h"
20 #include "disk-io.h"
23 #include "space-info.h"
24 #include "delalloc-space.h"
25 #include "block-group.h"
28 #include "inode-item.h"
30 #include "file-item.h"
65 while ((node = rb_last(&ctl->free_space_offset)) != NULL) { in __btrfs_remove_free_space_cache()
[all …]
/kernel/linux/linux-5.10/drivers/gpu/drm/msm/adreno/
Da6xx_gpu_state.c1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. */
91 list_add_tail(&obj->node, &a6xx_state->objs); in state_kcalloc()
92 return &obj->data; in state_kcalloc()
106 * Allocate 1MB for the crashdumper scratch region - 8k for the script and
110 #define A6XX_CD_DATA_SIZE (SZ_1M - 8192)
115 dumper->ptr = msm_gem_kernel_new_locked(gpu->dev, in a6xx_crashdumper_init()
116 SZ_1M, MSM_BO_UNCACHED, gpu->aspace, in a6xx_crashdumper_init()
117 &dumper->bo, &dumper->iova); in a6xx_crashdumper_init()
119 if (!IS_ERR(dumper->ptr)) in a6xx_crashdumper_init()
[all …]
/kernel/linux/linux-6.6/Documentation/devicetree/bindings/cpufreq/
Dapple,cluster-cpufreq.yaml1 # SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
3 ---
4 $id: http://devicetree.org/schemas/cpufreq/apple,cluster-cpufreq.yaml#
5 $schema: http://devicetree.org/meta-schemas/core.yaml#
7 title: Apple SoC cluster cpufreq device
10 - Hector Martin <marcan@marcan.st>
13 Apple SoCs (e.g. M1) have a per-cpu-cluster DVFS controller that is part of
14 the cluster management register block. This binding uses the standard
15 operating-points-v2 table to define the CPU performance states, with the
16 opp-level property specifying the hardware p-state index for that level.
[all …]
/kernel/linux/linux-5.10/fs/btrfs/
Dfree-space-cache.c1 // SPDX-License-Identifier: GPL-2.0
12 #include <linux/error-injection.h>
15 #include "free-space-cache.h"
17 #include "disk-io.h"
19 #include "inode-map.h"
21 #include "space-info.h"
22 #include "delalloc-space.h"
23 #include "block-group.h"
51 struct btrfs_fs_info *fs_info = root->fs_info; in __lookup_free_space_inode()
70 return ERR_PTR(-ENOENT); in __lookup_free_space_inode()
[all …]
/kernel/linux/linux-6.6/Documentation/admin-guide/perf/
Dhisi-pmu.rst10 The HiSilicon SoC encapsulates multiple CPU and IO dies. Each CPU cluster
12 called Super CPU cluster (SCCL) and is made up of 6 CCLs. Each SCCL has
13 two HHAs (0 - 1) and four DDRCs (0 - 3), respectively.
16 -------------------------------
28 name will appear in event listing as hisi_sccl<sccl-id>_module<index-id>.
29 where "sccl-id" is the identifier of the SCCL and "index-id" is the index of
32 e.g. hisi_sccl3_l3c0/rd_hit_cpipe is READ_HIT_CPIPE event of L3C index #0 in
35 e.g. hisi_sccl1_hha0/rx_operations is RX_OPERATIONS event of HHA index #0 in
45 ------------------------------------------
47 ------------------------------------------
[all …]
/kernel/linux/linux-6.6/drivers/gpu/drm/msm/adreno/
Da6xx_gpu_state.c1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. */
99 list_add_tail(&obj->node, &a6xx_state->objs); in state_kcalloc()
100 return &obj->data; in state_kcalloc()
114 * Allocate 1MB for the crashdumper scratch region - 8k for the script and
118 #define A6XX_CD_DATA_SIZE (SZ_1M - 8192)
123 dumper->ptr = msm_gem_kernel_new(gpu->dev, in a6xx_crashdumper_init()
124 SZ_1M, MSM_BO_WC, gpu->aspace, in a6xx_crashdumper_init()
125 &dumper->bo, &dumper->iova); in a6xx_crashdumper_init()
127 if (!IS_ERR(dumper->ptr)) in a6xx_crashdumper_init()
[all …]
/kernel/linux/linux-6.6/arch/arm/common/
Dmcpm_head.S1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * arch/arm/common/mcpm_head.S -- kernel entry point for multi-cluster PM
6 * Copyright: (C) 2012-2013 Linaro Limited
8 * Refer to Documentation/arch/arm/cluster-pm-race-avoidance.rst
18 .arch armv7-a
28 1903: .asciz " cluster"
56 ubfx r10, r0, #8, #8 @ r10 = cluster
58 mla r4, r3, r10, r9 @ r4 = canonical CPU index
88 mla r8, r0, r10, r8 @ r8 = sync cluster base
96 @ At this point, the cluster cannot unexpectedly enter the GOING_DOWN
[all …]
/kernel/linux/linux-5.10/arch/arm/common/
Dmcpm_head.S1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * arch/arm/common/mcpm_head.S -- kernel entry point for multi-cluster PM
6 * Copyright: (C) 2012-2013 Linaro Limited
8 * Refer to Documentation/arm/cluster-pm-race-avoidance.rst
26 1903: .asciz " cluster"
54 ubfx r10, r0, #8, #8 @ r10 = cluster
56 mla r4, r3, r10, r9 @ r4 = canonical CPU index
86 mla r8, r0, r10, r8 @ r8 = sync cluster base
94 @ At this point, the cluster cannot unexpectedly enter the GOING_DOWN
98 mla r11, r0, r10, r11 @ r11 = cluster first man lock
[all …]
/kernel/linux/linux-6.6/fs/f2fs/
Dcompress.c1 // SPDX-License-Identifier: GPL-2.0
12 #include <linux/backing-dev.h>
31 if (likely(size <= sbi->page_array_slab_size)) in page_array_alloc()
32 return f2fs_kmem_cache_alloc(sbi->page_array_slab, in page_array_alloc()
45 if (likely(size <= sbi->page_array_slab_size)) in page_array_free()
46 kmem_cache_free(sbi->page_array_slab, pages); in page_array_free()
61 static unsigned int offset_in_cluster(struct compress_ctx *cc, pgoff_t index) in offset_in_cluster() argument
63 return index & (cc->cluster_size - 1); in offset_in_cluster()
66 static pgoff_t cluster_idx(struct compress_ctx *cc, pgoff_t index) in cluster_idx() argument
68 return index >> cc->log_cluster_size; in cluster_idx()
[all …]

12345678910>>...18