/drivers/lguest/ |
D | hypercalls.c | 37 static void do_hcall(struct lg_cpu *cpu, struct hcall_args *args) in do_hcall() argument 57 kill_guest(cpu, "already have lguest_data"); in do_hcall() 68 __lgread(cpu, msg, args->arg1, sizeof(msg)); in do_hcall() 70 kill_guest(cpu, "CRASH: %s", msg); in do_hcall() 72 cpu->lg->dead = ERR_PTR(-ERESTART); in do_hcall() 78 guest_pagetable_clear_all(cpu); in do_hcall() 80 guest_pagetable_flush_user(cpu); in do_hcall() 88 guest_new_pagetable(cpu, args->arg1); in do_hcall() 91 guest_set_stack(cpu, args->arg1, args->arg2, args->arg3); in do_hcall() 95 guest_set_pte(cpu, args->arg1, args->arg2, in do_hcall() [all …]
|
D | interrupts_and_traps.c | 51 static void push_guest_stack(struct lg_cpu *cpu, unsigned long *gstack, u32 val) in push_guest_stack() argument 55 lgwrite(cpu, *gstack, u32, val); in push_guest_stack() 72 static void set_guest_interrupt(struct lg_cpu *cpu, u32 lo, u32 hi, in set_guest_interrupt() argument 84 if ((cpu->regs->ss&0x3) != GUEST_PL) { in set_guest_interrupt() 89 virtstack = cpu->esp1; in set_guest_interrupt() 90 ss = cpu->ss1; in set_guest_interrupt() 92 origstack = gstack = guest_pa(cpu, virtstack); in set_guest_interrupt() 99 push_guest_stack(cpu, &gstack, cpu->regs->ss); in set_guest_interrupt() 100 push_guest_stack(cpu, &gstack, cpu->regs->esp); in set_guest_interrupt() 103 virtstack = cpu->regs->esp; in set_guest_interrupt() [all …]
|
D | page_tables.c | 83 static pgd_t *spgd_addr(struct lg_cpu *cpu, u32 i, unsigned long vaddr) in spgd_addr() argument 88 return &cpu->lg->pgdirs[i].pgdir[index]; in spgd_addr() 97 static pmd_t *spmd_addr(struct lg_cpu *cpu, pgd_t spgd, unsigned long vaddr) in spmd_addr() argument 115 static pte_t *spte_addr(struct lg_cpu *cpu, pgd_t spgd, unsigned long vaddr) in spte_addr() argument 118 pmd_t *pmd = spmd_addr(cpu, spgd, vaddr); in spte_addr() 136 static unsigned long gpgd_addr(struct lg_cpu *cpu, unsigned long vaddr) in gpgd_addr() argument 139 return cpu->lg->pgdirs[cpu->cpu_pgd].gpgdir + index * sizeof(pgd_t); in gpgd_addr() 152 static unsigned long gpte_addr(struct lg_cpu *cpu, in gpte_addr() argument 162 static unsigned long gpte_addr(struct lg_cpu *cpu, in gpte_addr() argument 206 static pte_t gpte_to_spte(struct lg_cpu *cpu, pte_t gpte, int write) in gpte_to_spte() argument [all …]
|
D | lg.h | 135 #define lgread(cpu, addr, type) \ argument 136 ({ type _v; __lgread((cpu), &_v, (addr), sizeof(_v)); _v; }) 139 #define lgwrite(cpu, addr, type, val) \ argument 142 __lgwrite((cpu), (addr), &(val), sizeof(val)); \ 146 int run_guest(struct lg_cpu *cpu, unsigned long __user *user); 159 unsigned int interrupt_pending(struct lg_cpu *cpu, bool *more); 160 void try_deliver_interrupt(struct lg_cpu *cpu, unsigned int irq, bool more); 161 void set_interrupt(struct lg_cpu *cpu, unsigned int irq); 162 bool deliver_trap(struct lg_cpu *cpu, unsigned int num); 163 void load_guest_idt_entry(struct lg_cpu *cpu, unsigned int i, [all …]
|
D | segments.c | 67 static void fixup_gdt_table(struct lg_cpu *cpu, unsigned start, unsigned end) in fixup_gdt_table() argument 84 if (cpu->arch.gdt[i].dpl == 0) in fixup_gdt_table() 85 cpu->arch.gdt[i].dpl |= GUEST_PL; in fixup_gdt_table() 93 cpu->arch.gdt[i].type |= 0x1; in fixup_gdt_table() 136 void setup_guest_gdt(struct lg_cpu *cpu) in setup_guest_gdt() argument 142 cpu->arch.gdt[GDT_ENTRY_KERNEL_CS] = FULL_EXEC_SEGMENT; in setup_guest_gdt() 143 cpu->arch.gdt[GDT_ENTRY_KERNEL_DS] = FULL_SEGMENT; in setup_guest_gdt() 144 cpu->arch.gdt[GDT_ENTRY_KERNEL_CS].dpl |= GUEST_PL; in setup_guest_gdt() 145 cpu->arch.gdt[GDT_ENTRY_KERNEL_DS].dpl |= GUEST_PL; in setup_guest_gdt() 152 void copy_gdt_tls(const struct lg_cpu *cpu, struct desc_struct *gdt) in copy_gdt_tls() argument [all …]
|
D | lguest_user.c | 27 bool send_notify_to_eventfd(struct lg_cpu *cpu) in send_notify_to_eventfd() argument 49 map = rcu_dereference(cpu->lg->eventfds); in send_notify_to_eventfd() 55 if (map->map[i].addr == cpu->pending_notify) { in send_notify_to_eventfd() 57 cpu->pending_notify = 0; in send_notify_to_eventfd() 65 return cpu->pending_notify == 0; in send_notify_to_eventfd() 180 static int user_send_irq(struct lg_cpu *cpu, const unsigned long __user *input) in user_send_irq() argument 193 set_interrupt(cpu, irq); in user_send_irq() 204 struct lg_cpu *cpu; in read() local 215 cpu = &lg->cpus[cpu_id]; in read() 218 if (current != cpu->tsk) in read() [all …]
|
D | core.c | 187 void __lgread(struct lg_cpu *cpu, void *b, unsigned long addr, unsigned bytes) in __lgread() argument 189 if (!lguest_address_ok(cpu->lg, addr, bytes) in __lgread() 190 || copy_from_user(b, cpu->lg->mem_base + addr, bytes) != 0) { in __lgread() 193 kill_guest(cpu, "bad read address %#lx len %u", addr, bytes); in __lgread() 198 void __lgwrite(struct lg_cpu *cpu, unsigned long addr, const void *b, in __lgwrite() argument 201 if (!lguest_address_ok(cpu->lg, addr, bytes) in __lgwrite() 202 || copy_to_user(cpu->lg->mem_base + addr, b, bytes) != 0) in __lgwrite() 203 kill_guest(cpu, "bad write address %#lx len %u", addr, bytes); in __lgwrite() 212 int run_guest(struct lg_cpu *cpu, unsigned long __user *user) in run_guest() argument 215 while (!cpu->lg->dead) { in run_guest() [all …]
|
/drivers/lguest/x86/ |
D | core.c | 66 static struct lguest_pages *lguest_pages(unsigned int cpu) in lguest_pages() argument 68 return &(((struct lguest_pages *)(switcher_addr + PAGE_SIZE))[cpu]); in lguest_pages() 84 static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages) in copy_in_guest_info() argument 92 if (__this_cpu_read(lg_last_cpu) != cpu || cpu->last_pages != pages) { in copy_in_guest_info() 93 __this_cpu_write(lg_last_cpu, cpu); in copy_in_guest_info() 94 cpu->last_pages = pages; in copy_in_guest_info() 95 cpu->changed = CHANGED_ALL; in copy_in_guest_info() 107 map_switcher_in_guest(cpu, pages); in copy_in_guest_info() 113 pages->state.guest_tss.sp1 = cpu->esp1; in copy_in_guest_info() 114 pages->state.guest_tss.ss1 = cpu->ss1; in copy_in_guest_info() [all …]
|
/drivers/cpufreq/ |
D | intel_pstate.c | 75 int cpu; member 187 static inline void intel_pstate_busy_pid_reset(struct cpudata *cpu) in intel_pstate_busy_pid_reset() argument 189 pid_p_gain_set(&cpu->pid, cpu->pstate_policy->p_gain_pct); in intel_pstate_busy_pid_reset() 190 pid_d_gain_set(&cpu->pid, cpu->pstate_policy->d_gain_pct); in intel_pstate_busy_pid_reset() 191 pid_i_gain_set(&cpu->pid, cpu->pstate_policy->i_gain_pct); in intel_pstate_busy_pid_reset() 193 pid_reset(&cpu->pid, in intel_pstate_busy_pid_reset() 194 cpu->pstate_policy->setpoint, in intel_pstate_busy_pid_reset() 196 cpu->pstate_policy->deadband, in intel_pstate_busy_pid_reset() 202 unsigned int cpu; in intel_pstate_reset_all_pid() local 203 for_each_online_cpu(cpu) { in intel_pstate_reset_all_pid() [all …]
|
D | cpufreq_userspace.c | 49 if (!per_cpu(cpu_is_managed, freq->cpu)) in userspace_cpufreq_notifier() 54 freq->cpu, freq->new); in userspace_cpufreq_notifier() 55 per_cpu(cpu_cur_freq, freq->cpu) = freq->new; in userspace_cpufreq_notifier() 77 pr_debug("cpufreq_set for cpu %u, freq %u kHz\n", policy->cpu, freq); in cpufreq_set() 80 if (!per_cpu(cpu_is_managed, policy->cpu)) in cpufreq_set() 83 per_cpu(cpu_set_freq, policy->cpu) = freq; in cpufreq_set() 85 if (freq < per_cpu(cpu_min_freq, policy->cpu)) in cpufreq_set() 86 freq = per_cpu(cpu_min_freq, policy->cpu); in cpufreq_set() 87 if (freq > per_cpu(cpu_max_freq, policy->cpu)) in cpufreq_set() 88 freq = per_cpu(cpu_max_freq, policy->cpu); in cpufreq_set() [all …]
|
D | cpufreq.c | 70 #define lock_policy_rwsem(mode, cpu) \ argument 71 static int lock_policy_rwsem_##mode(int cpu) \ 73 int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); \ 80 lock_policy_rwsem(read, cpu); 81 lock_policy_rwsem(write, cpu); 83 #define unlock_policy_rwsem(mode, cpu) \ argument 84 static void unlock_policy_rwsem_##mode(int cpu) \ 86 int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); \ 91 unlock_policy_rwsem(read, cpu); 92 unlock_policy_rwsem(write, cpu); [all …]
|
D | cpufreq_stats.c | 32 unsigned int cpu; member 73 static int cpufreq_stats_update(unsigned int cpu) in cpufreq_stats_update() argument 81 stat = per_cpu(cpufreq_stats_table, cpu); in cpufreq_stats_update() 82 all_stat = per_cpu(all_cpufreq_stats, cpu); in cpufreq_stats_update() 101 struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, policy->cpu); in show_total_trans() 105 per_cpu(cpufreq_stats_table, stat->cpu)->total_trans); in show_total_trans() 112 struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, policy->cpu); in show_time_in_state() 115 cpufreq_stats_update(stat->cpu); in show_time_in_state() 160 unsigned int i, cpu; in show_current_in_state() local 164 for_each_possible_cpu(cpu) { in show_current_in_state() [all …]
|
D | speedstep-centrino.c | 234 struct cpuinfo_x86 *cpu = &cpu_data(policy->cpu); in centrino_cpu_init_table() local 238 if (centrino_verify_cpu_id(cpu, model->cpu_id) && in centrino_cpu_init_table() 240 strcmp(cpu->x86_model_id, model->model_name) == 0)) in centrino_cpu_init_table() 247 cpu->x86_model_id); in centrino_cpu_init_table() 254 cpu->x86_model_id); in centrino_cpu_init_table() 259 per_cpu(centrino_model, policy->cpu) = model; in centrino_cpu_init_table() 285 static unsigned extract_clock(unsigned msr, unsigned int cpu, int failsafe) in extract_clock() argument 294 if ((per_cpu(centrino_cpu, cpu) == &cpu_ids[CPU_BANIAS]) || in extract_clock() 295 (per_cpu(centrino_cpu, cpu) == &cpu_ids[CPU_DOTHAN_A1]) || in extract_clock() 296 (per_cpu(centrino_cpu, cpu) == &cpu_ids[CPU_DOTHAN_B0])) { in extract_clock() [all …]
|
D | ppc_cbe_cpufreq.c | 53 static int set_pmode(unsigned int cpu, unsigned int slow_mode) in set_pmode() argument 58 rc = cbe_cpufreq_set_pmode_pmi(cpu, slow_mode); in set_pmode() 60 rc = cbe_cpufreq_set_pmode(cpu, slow_mode); in set_pmode() 62 pr_debug("register contains slow mode %d\n", cbe_cpufreq_get_pmode(cpu)); in set_pmode() 76 struct device_node *cpu; in cbe_cpufreq_cpu_init() local 78 cpu = of_get_cpu_node(policy->cpu, NULL); in cbe_cpufreq_cpu_init() 80 if (!cpu) in cbe_cpufreq_cpu_init() 83 pr_debug("init cpufreq on CPU %d\n", policy->cpu); in cbe_cpufreq_cpu_init() 88 if (!cbe_get_cpu_pmd_regs(policy->cpu) || in cbe_cpufreq_cpu_init() 89 !cbe_get_cpu_mic_tm_regs(policy->cpu)) { in cbe_cpufreq_cpu_init() [all …]
|
D | sh-cpufreq.c | 33 static unsigned int sh_cpufreq_get(unsigned int cpu) in sh_cpufreq_get() argument 35 return (clk_get_rate(&per_cpu(sh_cpuclk, cpu)) + 500) / 1000; in sh_cpufreq_get() 45 unsigned int cpu = policy->cpu; in sh_cpufreq_target() local 46 struct clk *cpuclk = &per_cpu(sh_cpuclk, cpu); in sh_cpufreq_target() 53 set_cpus_allowed_ptr(current, cpumask_of(cpu)); in sh_cpufreq_target() 55 BUG_ON(smp_processor_id() != cpu); in sh_cpufreq_target() 57 dev = get_cpu_device(cpu); in sh_cpufreq_target() 67 freqs.old = sh_cpufreq_get(cpu); in sh_cpufreq_target() 83 struct clk *cpuclk = &per_cpu(sh_cpuclk, policy->cpu); in sh_cpufreq_verify() 104 unsigned int cpu = policy->cpu; in sh_cpufreq_cpu_init() local [all …]
|
/drivers/cpuidle/ |
D | driver.c | 21 static void __cpuidle_set_cpu_driver(struct cpuidle_driver *drv, int cpu); 22 static struct cpuidle_driver * __cpuidle_get_cpu_driver(int cpu); 26 int cpu = smp_processor_id(); in cpuidle_setup_broadcast_timer() local 27 clockevents_notify((long)(arg), &cpu); in cpuidle_setup_broadcast_timer() 30 static void __cpuidle_driver_init(struct cpuidle_driver *drv, int cpu) in __cpuidle_driver_init() argument 42 on_each_cpu_mask(get_cpu_mask(cpu), cpuidle_setup_broadcast_timer, in __cpuidle_driver_init() 48 static int __cpuidle_register_driver(struct cpuidle_driver *drv, int cpu) in __cpuidle_register_driver() argument 56 if (__cpuidle_get_cpu_driver(cpu)) in __cpuidle_register_driver() 59 __cpuidle_driver_init(drv, cpu); in __cpuidle_register_driver() 61 __cpuidle_set_cpu_driver(drv, cpu); in __cpuidle_register_driver() [all …]
|
D | coupled.c | 296 int cpu = (unsigned long)info; in cpuidle_coupled_poked() local 297 cpumask_clear_cpu(cpu, &cpuidle_coupled_poked_mask); in cpuidle_coupled_poked() 312 static void cpuidle_coupled_poke(int cpu) in cpuidle_coupled_poke() argument 314 struct call_single_data *csd = &per_cpu(cpuidle_coupled_poke_cb, cpu); in cpuidle_coupled_poke() 316 if (!cpumask_test_and_set_cpu(cpu, &cpuidle_coupled_poked_mask)) in cpuidle_coupled_poke() 317 __smp_call_function_single(cpu, csd, 0); in cpuidle_coupled_poke() 330 int cpu; in cpuidle_coupled_poke_others() local 332 for_each_cpu_mask(cpu, coupled->coupled_cpus) in cpuidle_coupled_poke_others() 333 if (cpu != this_cpu && cpu_online(cpu)) in cpuidle_coupled_poke_others() 334 cpuidle_coupled_poke(cpu); in cpuidle_coupled_poke_others() [all …]
|
/drivers/xen/ |
D | cpu_hotplug.c | 9 static void enable_hotplug_cpu(int cpu) in enable_hotplug_cpu() argument 11 if (!cpu_present(cpu)) in enable_hotplug_cpu() 12 arch_register_cpu(cpu); in enable_hotplug_cpu() 14 set_cpu_present(cpu, true); in enable_hotplug_cpu() 17 static void disable_hotplug_cpu(int cpu) in disable_hotplug_cpu() argument 19 if (cpu_present(cpu)) in disable_hotplug_cpu() 20 arch_unregister_cpu(cpu); in disable_hotplug_cpu() 22 set_cpu_present(cpu, false); in disable_hotplug_cpu() 25 static int vcpu_online(unsigned int cpu) in vcpu_online() argument 30 sprintf(dir, "cpu/%u", cpu); in vcpu_online() [all …]
|
/drivers/base/ |
D | cpu.c | 29 static void change_cpu_under_node(struct cpu *cpu, in change_cpu_under_node() argument 32 int cpuid = cpu->dev.id; in change_cpu_under_node() 35 cpu->node_id = to_nid; in change_cpu_under_node() 42 struct cpu *cpu = container_of(dev, struct cpu, dev); in show_online() local 44 return sprintf(buf, "%u\n", !!cpu_online(cpu->dev.id)); in show_online() 51 struct cpu *cpu = container_of(dev, struct cpu, dev); in store_online() local 52 int cpuid = cpu->dev.id; in store_online() 73 change_cpu_under_node(cpu, from_nid, to_nid); in store_online() 89 static void __cpuinit register_cpu_control(struct cpu *cpu) in register_cpu_control() argument 91 device_create_file(&cpu->dev, &dev_attr_online); in register_cpu_control() [all …]
|
/drivers/oprofile/ |
D | nmi_timer_int.c | 36 static int nmi_timer_start_cpu(int cpu) in nmi_timer_start_cpu() argument 38 struct perf_event *event = per_cpu(nmi_timer_events, cpu); in nmi_timer_start_cpu() 41 event = perf_event_create_kernel_counter(&nmi_timer_attr, cpu, NULL, in nmi_timer_start_cpu() 45 per_cpu(nmi_timer_events, cpu) = event; in nmi_timer_start_cpu() 54 static void nmi_timer_stop_cpu(int cpu) in nmi_timer_stop_cpu() argument 56 struct perf_event *event = per_cpu(nmi_timer_events, cpu); in nmi_timer_stop_cpu() 65 int cpu = (unsigned long)data; in nmi_timer_cpu_notifier() local 69 nmi_timer_start_cpu(cpu); in nmi_timer_cpu_notifier() 72 nmi_timer_stop_cpu(cpu); in nmi_timer_cpu_notifier() 84 int cpu; in nmi_timer_start() local [all …]
|
D | oprofile_perf.c | 39 u32 cpu = smp_processor_id(); in op_overflow_handler() local 42 if (per_cpu(perf_events, cpu)[id] == event) in op_overflow_handler() 49 "on cpu %u\n", cpu); in op_overflow_handler() 74 static int op_create_counter(int cpu, int event) in op_create_counter() argument 78 if (!counter_config[event].enabled || per_cpu(perf_events, cpu)[event]) in op_create_counter() 82 cpu, NULL, in op_create_counter() 91 "on CPU %d\n", event, cpu); in op_create_counter() 95 per_cpu(perf_events, cpu)[event] = pevent; in op_create_counter() 100 static void op_destroy_counter(int cpu, int event) in op_destroy_counter() argument 102 struct perf_event *pevent = per_cpu(perf_events, cpu)[event]; in op_destroy_counter() [all …]
|
/drivers/hwmon/ |
D | coretemp.c | 60 #define TO_PHYS_ID(cpu) (cpu_data(cpu).phys_proc_id) argument 61 #define TO_CORE_ID(cpu) (cpu_data(cpu).cpu_core_id) argument 62 #define TO_ATTR_NO(cpu) (TO_CORE_ID(cpu) + BASE_SYSFS_ATTR_NO) argument 65 #define for_each_sibling(i, cpu) for_each_cpu(i, cpu_sibling_mask(cpu)) argument 67 #define for_each_sibling(i, cpu) for (i = 0; false; ) argument 88 unsigned int cpu; member 143 rdmsr_on_cpu(tdata->cpu, tdata->status_reg, &eax, &edx); in show_crit_alarm() 178 rdmsr_on_cpu(tdata->cpu, tdata->status_reg, &eax, &edx); in show_temp() 404 static int __cpuinit chk_ucode_version(unsigned int cpu) in chk_ucode_version() argument 406 struct cpuinfo_x86 *c = &cpu_data(cpu); in chk_ucode_version() [all …]
|
/drivers/acpi/ |
D | processor_thermal.c | 60 #define reduction_pctg(cpu) \ argument 61 per_cpu(cpufreq_thermal_reduction_pctg, phys_package_first_cpu(cpu)) 70 static int phys_package_first_cpu(int cpu) in phys_package_first_cpu() argument 73 int id = topology_physical_package_id(cpu); in phys_package_first_cpu() 81 static int cpu_has_cpufreq(unsigned int cpu) in cpu_has_cpufreq() argument 84 if (!acpi_thermal_cpufreq_is_init || cpufreq_get_policy(&policy, cpu)) in cpu_has_cpufreq() 100 (100 - reduction_pctg(policy->cpu) * 20) in acpi_thermal_cpufreq_notifier() 113 static int cpufreq_get_max_state(unsigned int cpu) in cpufreq_get_max_state() argument 115 if (!cpu_has_cpufreq(cpu)) in cpufreq_get_max_state() 121 static int cpufreq_get_cur_state(unsigned int cpu) in cpufreq_get_cur_state() argument [all …]
|
/drivers/macintosh/ |
D | windfarm_pm72.c | 209 static int read_one_cpu_vals(int cpu, s32 *temp, s32 *power) in read_one_cpu_vals() argument 215 rc = wf_sensor_get(sens_cpu_temp[cpu], &dtemp); in read_one_cpu_vals() 217 DBG(" CPU%d: temp reading error !\n", cpu); in read_one_cpu_vals() 220 DBG_LOTS(" CPU%d: temp = %d.%03d\n", cpu, FIX32TOPRINT((dtemp))); in read_one_cpu_vals() 224 rc = wf_sensor_get(sens_cpu_volts[cpu], &volts); in read_one_cpu_vals() 226 DBG(" CPU%d, volts reading error !\n", cpu); in read_one_cpu_vals() 229 DBG_LOTS(" CPU%d: volts = %d.%03d\n", cpu, FIX32TOPRINT((volts))); in read_one_cpu_vals() 232 rc = wf_sensor_get(sens_cpu_amps[cpu], &s); in read_one_cpu_vals() 234 DBG(" CPU%d, current reading error !\n", cpu); in read_one_cpu_vals() 237 DBG_LOTS(" CPU%d: amps = %d.%03d\n", cpu, FIX32TOPRINT((amps))); in read_one_cpu_vals() [all …]
|
/drivers/clk/mvebu/ |
D | clk-cpu.c | 27 int cpu; member 46 div = (reg >> (cpuclk->cpu * 8)) & SYS_CTRL_CLK_DIVIDER_MASK; in clk_cpu_recalc_rate() 74 & (~(SYS_CTRL_CLK_DIVIDER_MASK << (cpuclk->cpu * 8)))) in clk_cpu_set_rate() 75 | (div << (cpuclk->cpu * 8)); in clk_cpu_set_rate() 78 reload_mask = 1 << (20 + cpuclk->cpu); in clk_cpu_set_rate() 133 int cpu, err; in of_cpu_clk_setup() local 138 err = of_property_read_u32(dn, "reg", &cpu); in of_cpu_clk_setup() 142 sprintf(clk_name, "cpu%d", cpu); in of_cpu_clk_setup() 145 cpuclk[cpu].parent_name = __clk_get_name(parent_clk); in of_cpu_clk_setup() 146 cpuclk[cpu].clk_name = clk_name; in of_cpu_clk_setup() [all …]
|