Home
last modified time | relevance | path

Searched full:vpe (Results 1 – 25 of 115) sorted by relevance

12345

/kernel/linux/linux-4.19/arch/mips/kernel/
Dvpe-mt.c18 #include <asm/vpe.h>
25 /* We are prepared so configure and start the VPE... */
26 int vpe_run(struct vpe *v) in vpe_run()
33 /* check we are the Master VPE */ in vpe_run()
37 pr_warn("VPE loader: only Master VPE's are able to config MT\n"); in vpe_run()
51 pr_warn("VPE loader: No TC's associated with VPE %d\n", in vpe_run()
71 pr_warn("VPE loader: TC %d is already active!\n", in vpe_run()
104 * bind the TC to VPE 1 as late as possible so we only have the final in vpe_run()
105 * VPE registers to set up, and so an EJTAG probe can trigger on it in vpe_run()
119 /* enable this VPE */ in vpe_run()
[all …]
Dvpe-cmp.c15 #include <asm/vpe.h>
27 struct vpe *vpe = get_vpe(aprp_cpu_index()); in store_kill() local
30 list_for_each_entry(notifier, &vpe->notify, list) in store_kill()
33 release_progmem(vpe->load_addr); in store_kill()
34 vpe->state = VPE_STATE_UNUSED; in store_kill()
43 struct vpe *vpe = get_vpe(aprp_cpu_index()); in ntcs_show() local
45 return sprintf(buf, "%d\n", vpe->ntcs); in ntcs_show()
51 struct vpe *vpe = get_vpe(aprp_cpu_index()); in ntcs_store() local
59 /* APRP can only reserve one TC in a VPE and no more. */ in ntcs_store()
63 vpe->ntcs = new; in ntcs_store()
[all …]
Dvpe.c9 * VPE spport module for loading a MIPS SP program into VPE1. The SP
36 #include <asm/vpe.h>
52 /* get the vpe associated with this minor */
53 struct vpe *get_vpe(int minor) in get_vpe()
55 struct vpe *res, *v; in get_vpe()
73 /* get the vpe associated with this minor */
91 /* allocate a vpe and associate it with this minor (or index) */
92 struct vpe *alloc_vpe(int minor) in alloc_vpe()
94 struct vpe *v; in alloc_vpe()
96 v = kzalloc(sizeof(struct vpe), GFP_KERNEL); in alloc_vpe()
[all …]
Dcps-vec.S176 * deactivate this VPE if it should be offline.
241 /* Only allow 1 TC per VPE to execute... */
244 /* ...and for the moment only 1 VPE */
250 /* Enter VPE configuration state */
265 /* Loop through each VPE within this core */
272 /* Bind TC to VPE (1:1 TC:VPE mapping) */
288 /* Next VPE */
294 /* Leave VPE configuration state */
309 * struct vpe_boot_config in v1, VPE ID in t9
342 /* Calculate a mask for the VPE ID from EBase.CPUNum */
[all …]
Dsmp-mt.c63 /* Deactivate all but VPE 0 */ in smvp_vpe_init()
69 /* master VPE */ in smvp_vpe_init()
98 /* bind a TC to each VPE, May as well put all excess TC's in smvp_tc_init()
99 on the last VPE */ in smvp_tc_init()
151 * assumes a 1:1 mapping of TC => VPE
164 /* enable the tc this vpe/cpu will be running */ in vsmp_boot_secondary()
169 /* enable the VPE */ in vsmp_boot_secondary()
220 /* we'll always have more TC's than VPE's, so loop setting everything in vsmp_smp_setup()
Dsmp-cps.c56 /* Detect & record VPE topology */ in cps_smp_setup()
59 pr_info("%s topology ", cpu_has_mips_r6 ? "VP" : "VPE"); in cps_smp_setup()
90 /* Indicate present CPUs (CPU being synonymous with VPE) */ in cps_smp_setup()
183 /* Allocate VPE boot configuration structs */ in cps_prepare_cpus()
190 pr_err("Failed to allocate %u VPE boot configs\n", in cps_prepare_cpus()
321 /* Boot a VPE on a powered down core */ in cps_boot_secondary()
334 /* Boot a VPE on another powered up core */ in cps_boot_secondary()
356 /* Boot a VPE on this core */ in cps_boot_secondary()
365 /* Disable MT - we only want to run 1 TC per VPE */ in cps_init_secondary()
442 /* Look for another online VPE within the core */ in play_dead()
[all …]
/kernel/linux/linux-5.10/arch/mips/kernel/
Dvpe-mt.c18 #include <asm/vpe.h>
25 /* We are prepared so configure and start the VPE... */
26 int vpe_run(struct vpe *v) in vpe_run()
33 /* check we are the Master VPE */ in vpe_run()
37 pr_warn("VPE loader: only Master VPE's are able to config MT\n"); in vpe_run()
51 pr_warn("VPE loader: No TC's associated with VPE %d\n", in vpe_run()
71 pr_warn("VPE loader: TC %d is already active!\n", in vpe_run()
104 * bind the TC to VPE 1 as late as possible so we only have the final in vpe_run()
105 * VPE registers to set up, and so an EJTAG probe can trigger on it in vpe_run()
119 /* enable this VPE */ in vpe_run()
[all …]
Dvpe-cmp.c15 #include <asm/vpe.h>
27 struct vpe *vpe = get_vpe(aprp_cpu_index()); in store_kill() local
30 list_for_each_entry(notifier, &vpe->notify, list) in store_kill()
33 release_progmem(vpe->load_addr); in store_kill()
34 vpe->state = VPE_STATE_UNUSED; in store_kill()
43 struct vpe *vpe = get_vpe(aprp_cpu_index()); in ntcs_show() local
45 return sprintf(buf, "%d\n", vpe->ntcs); in ntcs_show()
51 struct vpe *vpe = get_vpe(aprp_cpu_index()); in ntcs_store() local
59 /* APRP can only reserve one TC in a VPE and no more. */ in ntcs_store()
63 vpe->ntcs = new; in ntcs_store()
[all …]
Dvpe.c9 * VPE spport module for loading a MIPS SP program into VPE1. The SP
36 #include <asm/vpe.h>
52 /* get the vpe associated with this minor */
53 struct vpe *get_vpe(int minor) in get_vpe()
55 struct vpe *res, *v; in get_vpe()
73 /* get the vpe associated with this minor */
91 /* allocate a vpe and associate it with this minor (or index) */
92 struct vpe *alloc_vpe(int minor) in alloc_vpe()
94 struct vpe *v; in alloc_vpe()
96 v = kzalloc(sizeof(struct vpe), GFP_KERNEL); in alloc_vpe()
[all …]
Dcps-vec.S172 * deactivate this VPE if it should be offline.
237 /* Only allow 1 TC per VPE to execute... */
240 /* ...and for the moment only 1 VPE */
246 /* Enter VPE configuration state */
261 /* Loop through each VPE within this core */
268 /* Bind TC to VPE (1:1 TC:VPE mapping) */
284 /* Next VPE */
290 /* Leave VPE configuration state */
305 * struct vpe_boot_config in v1, VPE ID in t9
338 /* Calculate a mask for the VPE ID from EBase.CPUNum */
[all …]
Dsmp-mt.c52 /* Deactivate all but VPE 0 */ in smvp_vpe_init()
58 /* master VPE */ in smvp_vpe_init()
87 /* bind a TC to each VPE, May as well put all excess TC's in smvp_tc_init()
88 on the last VPE */ in smvp_tc_init()
140 * assumes a 1:1 mapping of TC => VPE
153 /* enable the tc this vpe/cpu will be running */ in vsmp_boot_secondary()
158 /* enable the VPE */ in vsmp_boot_secondary()
209 /* we'll always have more TC's than VPE's, so loop setting everything in vsmp_smp_setup()
Dmips-mt.c71 printk("-- per-VPE State --\n"); in mips_mt_regdump()
76 printk(" VPE %d\n", i); in mips_mt_regdump()
81 printk(" VPE%d.Status : %08lx\n", in mips_mt_regdump()
83 printk(" VPE%d.EPC : %08lx %pS\n", in mips_mt_regdump()
86 printk(" VPE%d.Cause : %08lx\n", in mips_mt_regdump()
88 printk(" VPE%d.Config7 : %08lx\n", in mips_mt_regdump()
90 break; /* Next VPE */ in mips_mt_regdump()
101 printk(" TC %d (current TC with VPE EPC above)\n", tc); in mips_mt_regdump()
/kernel/linux/linux-5.10/arch/mips/lantiq/
Dirq.c49 #define ltq_icu_w32(vpe, m, x, y) \ argument
50 ltq_w32((x), ltq_icu_membase[vpe] + m*LTQ_ICU_IM_SIZE + (y))
52 #define ltq_icu_r32(vpe, m, x) \ argument
53 ltq_r32(ltq_icu_membase[vpe] + m*LTQ_ICU_IM_SIZE + (x))
82 int vpe; in ltq_disable_irq() local
87 for_each_present_cpu(vpe) { in ltq_disable_irq()
88 ltq_icu_w32(vpe, im, in ltq_disable_irq()
89 ltq_icu_r32(vpe, im, LTQ_ICU_IER) & ~BIT(offset), in ltq_disable_irq()
100 int vpe; in ltq_mask_and_ack_irq() local
105 for_each_present_cpu(vpe) { in ltq_mask_and_ack_irq()
[all …]
/kernel/linux/linux-5.10/drivers/irqchip/
Dirq-gic-v4.c54 * with the actual vPE affinity, and not the braindead concept of
64 * The choice made here is that each vcpu (VPE in old northern GICv4
67 * interrupt becomes a handle for the VPE, and that the hypervisor
71 * contain an irq domain where each interrupt maps to a VPE. In
77 * - irq_set_affinity is used to move a VPE from one redistributor to
81 * creating a new sub-API, namely scheduling/descheduling a VPE
95 static int its_alloc_vcpu_sgis(struct its_vpe *vpe, int idx) in its_alloc_vcpu_sgis() argument
107 vpe->fwnode = irq_domain_alloc_named_id_fwnode(name, idx); in its_alloc_vcpu_sgis()
108 if (!vpe->fwnode) in its_alloc_vcpu_sgis()
114 vpe->sgi_domain = irq_domain_create_linear(vpe->fwnode, 16, in its_alloc_vcpu_sgis()
[all …]
Dirq-gic-v3-its.c259 static int vpe_to_cpuid_lock(struct its_vpe *vpe, unsigned long *flags) in vpe_to_cpuid_lock() argument
261 raw_spin_lock_irqsave(&vpe->vpe_lock, *flags); in vpe_to_cpuid_lock()
262 return vpe->col_idx; in vpe_to_cpuid_lock()
265 static void vpe_to_cpuid_unlock(struct its_vpe *vpe, unsigned long flags) in vpe_to_cpuid_unlock() argument
267 raw_spin_unlock_irqrestore(&vpe->vpe_lock, flags); in vpe_to_cpuid_unlock()
276 cpu = vpe_to_cpuid_lock(map->vpe, flags); in irq_to_cpuid_lock()
293 vpe_to_cpuid_unlock(map->vpe, flags); in irq_to_cpuid_unlock()
304 static struct its_vpe *valid_vpe(struct its_node *its, struct its_vpe *vpe) in valid_vpe() argument
306 if (valid_col(its->collections + vpe->col_idx)) in valid_vpe()
307 return vpe; in valid_vpe()
[all …]
/kernel/linux/linux-5.10/arch/mips/include/asm/
Dvpe.h17 #define VPE_MODULE_NAME "vpe"
54 struct vpe { struct
57 /* (device) minor associated with this vpe */
69 /* tc's associated with this vpe */ argument
72 /* The list of vpe's */ argument
88 struct vpe *pvpe; /* parent VPE */ argument
89 struct list_head tc; /* The list of TC's with this VPE */
94 void (*start)(int vpe);
95 void (*stop)(int vpe);
116 struct vpe *get_vpe(int minor);
[all …]
/kernel/linux/linux-4.19/arch/mips/include/asm/
Dvpe.h17 #define VPE_MODULE_NAME "vpe"
54 struct vpe { struct
57 /* (device) minor associated with this vpe */
69 /* tc's associated with this vpe */ argument
72 /* The list of vpe's */ argument
88 struct vpe *pvpe; /* parent VPE */ argument
89 struct list_head tc; /* The list of TC's with this VPE */
94 void (*start)(int vpe);
95 void (*stop)(int vpe);
116 struct vpe *get_vpe(int minor);
[all …]
/kernel/linux/linux-5.10/include/linux/irqchip/
Darm-gic-v4.h35 /* per-vPE VLPI tracking */
40 /* VPE resident */
45 /* VPE proxy mapping */
65 * vPE and vLPI operations using vpe->col_idx.
70 * redistributor for this VPE. The ID itself isn't involved in
74 /* Unique (system-wide) VPE identifier */
87 * @vpe: Pointer to the GICv4 notion of a virtual CPU (VPE)
90 * @db_enabled: Is the VPE doorbell to be generated?
94 struct its_vpe *vpe; member
130 int its_make_vpe_resident(struct its_vpe *vpe, bool g0en, bool g1en);
[all …]
/kernel/linux/linux-5.10/Documentation/devicetree/bindings/media/
Dti,vpe.yaml4 $id: http://devicetree.org/schemas/media/ti,vpe.yaml#
7 title: Texas Instruments DRA7x Video Processing Engine (VPE) Device Tree Bindings
13 The Video Processing Engine (VPE) is a key component for image post
14 processing applications. VPE consist of a single memory to memory
20 const: ti,dra7-vpe
24 - description: The VPE main register region
51 vpe: vpe@489d0000 {
52 compatible = "ti,dra7-vpe";
/kernel/linux/linux-4.19/include/linux/irqchip/
Darm-gic-v4.h49 /* VPE proxy mapping */
53 * redistributor for this VPE. The ID itself isn't involved in
57 /* Unique (system-wide) VPE identifier */
72 * @vpe: Pointer to the GICv4 notion of a virtual CPU (VPE)
75 * @db_enabled: Is the VPE doorbell to be generated?
79 struct its_vpe *vpe; member
105 int its_schedule_vpe(struct its_vpe *vpe, bool on);
106 int its_invall_vpe(struct its_vpe *vpe);
/kernel/linux/linux-4.19/drivers/irqchip/
Dirq-gic-v3-its.c217 static struct its_vpe *valid_vpe(struct its_node *its, struct its_vpe *vpe) in valid_vpe() argument
219 if (valid_col(its->collections + vpe->col_idx)) in valid_vpe()
220 return vpe; in valid_vpe()
278 struct its_vpe *vpe; member
282 struct its_vpe *vpe; member
288 struct its_vpe *vpe; member
296 struct its_vpe *vpe; member
303 struct its_vpe *vpe; member
593 its_encode_vpeid(cmd, desc->its_vinvall_cmd.vpe->vpe_id); in its_build_vinvall_cmd()
597 return valid_vpe(its, desc->its_vinvall_cmd.vpe); in its_build_vinvall_cmd()
[all …]
Dirq-gic-v4.c65 * with the actual vPE affinity, and not the braindead concept of
75 * The choice made here is that each vcpu (VPE in old northern GICv4
78 * interrupt becomes a handle for the VPE, and that the hypervisor
82 * contain an irq domain where each interrupt maps to a VPE. In
88 * - irq_set_affinity is used to move a VPE from one redistributor to
92 * creating a new sub-API, namely scheduling/descheduling a VPE
104 vm->fwnode = irq_domain_alloc_named_id_fwnode("GICv4-vpe", in its_alloc_vcpu_irqs()
147 static int its_send_vpe_cmd(struct its_vpe *vpe, struct its_cmd_info *info) in its_send_vpe_cmd() argument
149 return irq_set_vcpu_affinity(vpe->irq, info); in its_send_vpe_cmd()
152 int its_schedule_vpe(struct its_vpe *vpe, bool on) in its_schedule_vpe() argument
[all …]
/kernel/linux/linux-5.10/arch/arm64/kvm/vgic/
Dvgic-v4.c94 * The v4.1 doorbell can fire concurrently with the vPE being in vgic_v4_doorbell_handler()
108 static void vgic_v4_sync_sgi_config(struct its_vpe *vpe, struct vgic_irq *irq) in vgic_v4_sync_sgi_config() argument
110 vpe->sgi_config[irq->intid].enabled = irq->enabled; in vgic_v4_sync_sgi_config()
111 vpe->sgi_config[irq->intid].group = irq->group; in vgic_v4_sync_sgi_config()
112 vpe->sgi_config[irq->intid].priority = irq->priority; in vgic_v4_sync_sgi_config()
117 struct its_vpe *vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe; in vgic_v4_enable_vsgis() local
137 irq->host_irq = irq_find_mapping(vpe->sgi_domain, i); in vgic_v4_enable_vsgis()
139 /* Transfer the full irq state to the vPE */ in vgic_v4_enable_vsgis()
140 vgic_v4_sync_sgi_config(vpe, irq); in vgic_v4_enable_vsgis()
241 kvm_err("VPE IRQ allocation failure\n"); in vgic_v4_init()
[all …]
/kernel/linux/linux-4.19/arch/mips/pmcs-msp71xx/
Dmsp_irq_cic.c82 int vpe; in unmask_cic_irq() local
96 vpe = get_current_vpe(); in unmask_cic_irq()
98 cic_msk_reg[vpe] |= (1 << (d->irq - MSP_CIC_INTBASE)); in unmask_cic_irq()
106 int vpe = get_current_vpe(); in mask_cic_irq() local
111 cic_msk_reg[vpe] &= ~(1 << (d->irq - MSP_CIC_INTBASE)); in mask_cic_irq()
142 /* enable if any of each VPE's TCs require this IRQ */ in msp_cic_irq_set_affinity()
/kernel/linux/linux-4.19/drivers/media/platform/ti-vpe/
DMakefile2 obj-$(CONFIG_VIDEO_TI_VPE) += ti-vpe.o
7 ti-vpe-y := vpe.o

12345