Lines Matching refs:svcpu
269 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); in kvmppc_mmu_next_segment() local
276 for (i = 0; i < svcpu->slb_max; i++) { in kvmppc_mmu_next_segment()
277 if (!(svcpu->slb[i].esid & SLB_ESID_V)) in kvmppc_mmu_next_segment()
279 else if ((svcpu->slb[i].esid & ESID_MASK) == esid) { in kvmppc_mmu_next_segment()
297 if ((svcpu->slb_max) == max_slb_size) in kvmppc_mmu_next_segment()
300 r = svcpu->slb_max; in kvmppc_mmu_next_segment()
301 svcpu->slb_max++; in kvmppc_mmu_next_segment()
304 svcpu_put(svcpu); in kvmppc_mmu_next_segment()
310 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); in kvmppc_mmu_map_segment() local
323 svcpu->slb[slb_index].esid = 0; in kvmppc_mmu_map_segment()
344 svcpu->slb[slb_index].esid = slb_esid; in kvmppc_mmu_map_segment()
345 svcpu->slb[slb_index].vsid = slb_vsid; in kvmppc_mmu_map_segment()
350 svcpu_put(svcpu); in kvmppc_mmu_map_segment()
356 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); in kvmppc_mmu_flush_segment() local
360 for (i = 0; i < svcpu->slb_max; i++) { in kvmppc_mmu_flush_segment()
361 if ((svcpu->slb[i].esid & SLB_ESID_V) && in kvmppc_mmu_flush_segment()
362 (svcpu->slb[i].esid & seg_mask) == ea) { in kvmppc_mmu_flush_segment()
364 svcpu->slb[i].esid = 0; in kvmppc_mmu_flush_segment()
368 svcpu_put(svcpu); in kvmppc_mmu_flush_segment()
373 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); in kvmppc_mmu_flush_segments() local
374 svcpu->slb_max = 0; in kvmppc_mmu_flush_segments()
375 svcpu->slb[0].esid = 0; in kvmppc_mmu_flush_segments()
376 svcpu_put(svcpu); in kvmppc_mmu_flush_segments()