/kernel/linux/linux-5.10/sound/soc/intel/skylake/ |
D | skl-sst-dsp.c | 39 skl->cores.state[SKL_DSP_CORE0_ID] = SKL_DSP_RUNNING; in skl_dsp_init_core_state() 40 skl->cores.usage_count[SKL_DSP_CORE0_ID] = 1; in skl_dsp_init_core_state() 42 for (i = SKL_DSP_CORE0_ID + 1; i < skl->cores.count; i++) { in skl_dsp_init_core_state() 43 skl->cores.state[i] = SKL_DSP_RESET; in skl_dsp_init_core_state() 44 skl->cores.usage_count[i] = 0; in skl_dsp_init_core_state() 55 core_mask = SKL_DSP_CORES_MASK(skl->cores.count); in skl_dsp_get_enabled_cores() 341 if (core_id >= skl->cores.count) { in skl_dsp_get_core() 346 skl->cores.usage_count[core_id]++; in skl_dsp_get_core() 348 if (skl->cores.state[core_id] == SKL_DSP_RESET) { in skl_dsp_get_core() 358 core_id, skl->cores.state[core_id], in skl_dsp_get_core() [all …]
|
D | skl-messages.c | 258 struct skl_dsp_cores *cores; in skl_init_dsp() local 287 cores = &skl->cores; in skl_init_dsp() 288 cores->count = ops->num_cores; in skl_init_dsp() 290 cores->state = kcalloc(cores->count, sizeof(*cores->state), GFP_KERNEL); in skl_init_dsp() 291 if (!cores->state) { in skl_init_dsp() 296 cores->usage_count = kcalloc(cores->count, sizeof(*cores->usage_count), in skl_init_dsp() 298 if (!cores->usage_count) { in skl_init_dsp() 308 kfree(cores->state); in skl_init_dsp() 325 kfree(skl->cores.state); in skl_free_dsp() 326 kfree(skl->cores.usage_count); in skl_free_dsp()
|
D | bxt-sst.c | 271 if (skl->cores.state[SKL_DSP_CORE0_ID] != SKL_DSP_RUNNING) in bxt_d0i3_target_state() 326 skl->cores.state[SKL_DSP_CORE0_ID] = SKL_DSP_RUNNING_D0I3; in bxt_set_dsp_D0i3() 358 if (skl->cores.state[SKL_DSP_CORE0_ID] != SKL_DSP_RUNNING_D0I3) in bxt_set_dsp_D0i0() 381 skl->cores.state[SKL_DSP_CORE0_ID] = SKL_DSP_RUNNING; in bxt_set_dsp_D0i0() 410 skl->cores.state[core_id] = SKL_DSP_RUNNING; in bxt_set_dsp_D0() 472 skl->cores.state[core_id] = SKL_DSP_RUNNING; in bxt_set_dsp_D0() 518 skl->cores.state[core_id] = SKL_DSP_RESET; in bxt_set_dsp_D3()
|
/kernel/linux/linux-5.10/Documentation/admin-guide/ |
D | lockup-watchdogs.rst | 67 By default, the watchdog runs on all online cores. However, on a 69 on the housekeeping cores, not the cores specified in the "nohz_full" 71 the "nohz_full" cores, we would have to run timer ticks to activate 73 from protecting the user code on those cores from the kernel. 74 Of course, disabling it by default on the nohz_full cores means that 75 when those cores do enter the kernel, by default we will not be 77 to continue to run on the housekeeping (non-tickless) cores means 78 that we will continue to detect lockups properly on those cores. 80 In either case, the set of cores excluded from running the watchdog 82 nohz_full cores, this may be useful for debugging a case where the [all …]
|
/kernel/linux/linux-5.10/drivers/gpu/drm/nouveau/dispnv50/ |
D | core.c | 44 } cores[] = { in nv50_core_new() local 64 cid = nvif_mclass(&disp->disp->object, cores); in nv50_core_new() 70 return cores[cid].new(drm, cores[cid].oclass, pcore); in nv50_core_new()
|
/kernel/linux/linux-5.10/Documentation/devicetree/bindings/timer/ |
D | snps,arc-timer.txt | 4 - Two idential copies TIMER0 and TIMER1 exist in ARC cores and historically 5 TIMER0 used as clockevent provider (true for all ARC cores) 12 (16 for ARCHS cores, 3 for ARC700 cores)
|
/kernel/linux/linux-5.10/drivers/remoteproc/ |
D | ti_k3_r5_remoteproc.c | 79 struct list_head cores; member 255 list_for_each_entry(core, &cluster->cores, elem) { in k3_r5_lockstep_reset() 266 list_for_each_entry(core, &cluster->cores, elem) { in k3_r5_lockstep_reset() 279 list_for_each_entry_continue_reverse(core, &cluster->cores, elem) { in k3_r5_lockstep_reset() 284 core = list_last_entry(&cluster->cores, struct k3_r5_core, elem); in k3_r5_lockstep_reset() 286 list_for_each_entry_from_reverse(core, &cluster->cores, elem) { in k3_r5_lockstep_reset() 300 list_for_each_entry_reverse(core, &cluster->cores, elem) { in k3_r5_lockstep_release() 312 list_for_each_entry_reverse(core, &cluster->cores, elem) { in k3_r5_lockstep_release() 324 list_for_each_entry_continue(core, &cluster->cores, elem) { in k3_r5_lockstep_release() 328 core = list_first_entry(&cluster->cores, struct k3_r5_core, elem); in k3_r5_lockstep_release() [all …]
|
/kernel/linux/linux-5.10/Documentation/devicetree/bindings/media/xilinx/ |
D | video.txt | 1 DT bindings for Xilinx video IP cores 4 Xilinx video IP cores process video streams by acting as video sinks and/or 10 cores are represented as defined in ../video-interfaces.txt. 18 The following properties are common to all Xilinx video IP cores. 21 AXI bus between video IP cores, using its VF code as defined in "AXI4-Stream
|
D | xlnx,video.txt | 8 video IP cores. Each video IP core is represented as documented in video.txt 11 mappings between DMAs and the video IP cores.
|
/kernel/linux/linux-5.10/Documentation/devicetree/bindings/bus/ |
D | brcm,bus-axi.txt | 9 The cores on the AXI bus are automatically detected by bcma with the 12 BCM47xx/BCM53xx ARM SoCs. To assign IRQ numbers to the cores, provide 17 The top-level axi bus may contain children representing attached cores 19 detected (e.g. IRQ numbers). Also some of the cores may be responsible
|
/kernel/linux/linux-5.10/arch/x86/mm/ |
D | amdtopology.c | 63 unsigned int bits, cores, apicid_base; in amd_numa_init() local 165 cores = 1 << bits; in amd_numa_init() 179 for (j = apicid_base; j < cores + apicid_base; j++) in amd_numa_init()
|
/kernel/linux/linux-5.10/drivers/gpu/drm/v3d/ |
D | v3d_irq.c | 216 for (core = 0; core < v3d->cores; core++) in v3d_irq_init() 261 for (core = 0; core < v3d->cores; core++) { in v3d_irq_enable() 276 for (core = 0; core < v3d->cores; core++) in v3d_irq_disable() 281 for (core = 0; core < v3d->cores; core++) in v3d_irq_disable()
|
D | v3d_debugfs.c | 102 for (core = 0; core < v3d->cores; core++) { in v3d_v3d_debugfs_regs() 132 u32 ident0, ident1, ident2, ident3, cores; in v3d_v3d_debugfs_ident() local 143 cores = V3D_GET_FIELD(ident1, V3D_HUB_IDENT1_NCORES); in v3d_v3d_debugfs_ident() 162 for (core = 0; core < cores; core++) { in v3d_v3d_debugfs_ident()
|
/kernel/linux/linux-5.10/drivers/bcma/ |
D | main.c | 91 list_for_each_entry(core, &bus->cores, list) { in bcma_find_core_unit() 271 INIT_LIST_HEAD(&bus->cores); in bcma_init_bus() 295 list_for_each_entry(core, &bus->cores, list) { in bcma_register_devices() 363 list_for_each_entry_safe(core, tmp, &bus->cores, list) { in bcma_unregister_cores() 373 list_for_each_entry_safe(core, tmp, &bus->cores, list) { in bcma_unregister_cores() 409 list_for_each_entry(core, &bus->cores, list) { in bcma_bus_register() 534 list_for_each_entry(core, &bus->cores, list) { in bcma_bus_suspend() 555 list_for_each_entry(core, &bus->cores, list) { in bcma_bus_resume()
|
/kernel/linux/linux-5.10/Documentation/ABI/testing/ |
D | sysfs-bus-bcma | 14 There are a few types of BCMA cores, they can be identified by 22 BCMA cores of the same type can still slightly differ depending
|
/kernel/linux/linux-5.10/drivers/soc/tegra/ |
D | Kconfig | 85 the Tegra210 has four Cortex-A57 cores paired with four Cortex-A53 86 cores in a switched configuration. It features a GPU of the Maxwell 88 and providing 256 CUDA cores. It supports hardware-accelerated en- 105 combination of Denver and Cortex-A57 CPU cores and a GPU based on
|
/kernel/linux/linux-5.10/Documentation/admin-guide/device-mapper/ |
D | unstriped.rst | 85 Intel NVMe drives contain two cores on the physical device. 88 in a 256k stripe across the two cores:: 100 are striped across the two cores. When we unstripe this hardware RAID 0 113 unstriped ontop of Intel NVMe device that has 2 cores
|
/kernel/linux/linux-5.10/arch/arm/boot/dts/ |
D | vexpress-v2p-ca15-tc1.dts | 199 volt-cores { 210 amp-cores { 211 /* Total current for the two cores */ 224 power-cores {
|
D | exynos5422-odroidxu3-lite.dts | 35 * than Odroid XU3/XU4 boards: 1.8 GHz for A15 cores & 1.3 GHz for A7 cores.
|
/kernel/linux/linux-5.10/Documentation/locking/ |
D | percpu-rw-semaphore.rst | 9 cores take the lock for reading, the cache line containing the semaphore 10 is bouncing between L1 caches of the cores, causing performance
|
/kernel/linux/linux-5.10/Documentation/admin-guide/perf/ |
D | arm_dsu_pmu.rst | 5 ARM DynamIQ Shared Unit integrates one or more cores with an L3 memory system, 11 cores connected to the same DSU. Like most of the other uncore PMUs, DSU
|
/kernel/linux/linux-5.10/Documentation/devicetree/bindings/ |
D | xilinx.txt | 1 d) Xilinx IP cores 3 The Xilinx EDK toolchain ships with a set of IP cores (devices) for use 14 device drivers how the IP cores are configured, but it requires the kernel 20 properties of the device node. In general, device nodes for IP-cores 89 That covers the general approach to binding xilinx IP cores into the
|
/kernel/liteos_a/kernel/ |
D | Kconfig | 13 This represents the number of multi-processing cores. 27 This option will enable task synchronized operate task across cores.
|
/kernel/linux/linux-5.10/Documentation/x86/ |
D | topology.rst | 24 threads, cores, packages, etc. 36 - cores 41 Packages contain a number of cores plus shared resources, e.g. DRAM 52 The number of cores in a package. This information is retrieved via CPUID. 65 and deduced from the APIC IDs of the cores in the package.
|
/kernel/linux/linux-5.10/tools/power/cpupower/lib/ |
D | cpupower.h | 7 unsigned int cores; member
|