/external/tpm2-tss/src/tss2-esys/ |
D | esys_tr.c | 49 r = iesys_MU_IESYS_RESOURCE_Marshal(&esys_object->rsrc, NULL, SIZE_MAX, in Esys_TR_Serialize() 57 r = iesys_MU_IESYS_RESOURCE_Marshal(&esys_object->rsrc, *buffer, in Esys_TR_Serialize() 96 &esys_object->rsrc); in Esys_TR_Deserialize() 140 esysHandleNode->rsrc.handle = tpm_handle; in Esys_TR_FromTPMPublic_Async() 200 if (objectHandleNode->rsrc.handle >= TPM2_NV_INDEX_FIRST in Esys_TR_FromTPMPublic_Finish() 201 && objectHandleNode->rsrc.handle <= TPM2_NV_INDEX_LAST) { in Esys_TR_FromTPMPublic_Finish() 212 objectHandleNode->rsrc.rsrcType = IESYSC_NV_RSRC; in Esys_TR_FromTPMPublic_Finish() 213 objectHandleNode->rsrc.name = *nvName; in Esys_TR_FromTPMPublic_Finish() 214 objectHandleNode->rsrc.misc.rsrc_nv_pub = *nvPublic; in Esys_TR_FromTPMPublic_Finish() 217 } else if(objectHandleNode->rsrc.handle >> TPM2_HR_SHIFT == TPM2_HT_LOADED_SESSION in Esys_TR_FromTPMPublic_Finish() [all …]
|
D | esys_iutil.c | 129 if (esys_context->session_tab[i]->rsrc.rsrcType != IESYSC_SESSION_RSRC) { in init_session_tab() 174 if (session->rsrc.misc.rsrc_session. in iesys_compute_encrypt_nonce() 182 *encryptNonce = &session->rsrc.misc.rsrc_session.nonceTPM; in iesys_compute_encrypt_nonce() 235 session->rsrc.misc.rsrc_session.authHash) { in iesys_compute_cp_hashtab() 242 r = iesys_crypto_cpHash(session->rsrc.misc.rsrc_session. in iesys_compute_cp_hashtab() 250 session->rsrc.misc.rsrc_session.authHash; in iesys_compute_cp_hashtab() 298 if (rp_hash_tab[j].alg == session->rsrc.misc.rsrc_session.authHash) { in iesys_compute_rp_hashtab() 305 r = iesys_crypto_rpHash(session->rsrc.misc.rsrc_session.authHash, in iesys_compute_rp_hashtab() 311 session->rsrc.misc.rsrc_session.authHash; in iesys_compute_rp_hashtab() 466 TPM2B_PUBLIC pub = tpmKeyNode->rsrc.misc.rsrc_key_pub; in iesys_compute_encrypted_salt() [all …]
|
/external/llvm-project/llvm/test/Transforms/InstCombine/AMDGPU/ |
D | amdgcn-demanded-vector-elts.ll | 8 ; CHECK-NEXT: %data = call float @llvm.amdgcn.buffer.load.f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, … 10 define amdgpu_ps float @buffer_load_f32(<4 x i32> inreg %rsrc, i32 %idx, i32 %ofs) #0 { 11 …%data = call float @llvm.amdgcn.buffer.load.f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i1 false, i1 … 16 ; CHECK-NEXT: %data = call <1 x float> @llvm.amdgcn.buffer.load.v1f32(<4 x i32> %rsrc, i32 %idx, i3… 18 define amdgpu_ps <1 x float> @buffer_load_v1f32(<4 x i32> inreg %rsrc, i32 %idx, i32 %ofs) #0 { 19 …%data = call <1 x float> @llvm.amdgcn.buffer.load.v1f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i1 fa… 24 ; CHECK-NEXT: %data = call <2 x float> @llvm.amdgcn.buffer.load.v2f32(<4 x i32> %rsrc, i32 %idx, i3… 26 define amdgpu_ps <2 x float> @buffer_load_v2f32(<4 x i32> inreg %rsrc, i32 %idx, i32 %ofs) #0 { 27 …%data = call <2 x float> @llvm.amdgcn.buffer.load.v2f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i1 fa… 32 ; CHECK-NEXT: %data = call <4 x float> @llvm.amdgcn.buffer.load.v4f32(<4 x i32> %rsrc, i32 %idx, i3… [all …]
|
/external/llvm-project/llvm/test/CodeGen/AMDGPU/ |
D | llvm.amdgcn.raw.buffer.store.ll | 55 define amdgpu_ps void @buffer_store_x1(<4 x i32> inreg %rsrc, float %data, i32 %offset) { 57 …call void @llvm.amdgcn.raw.buffer.store.f32(float %data, <4 x i32> %rsrc, i32 %offset, i32 0, i32 … 64 define amdgpu_ps void @buffer_store_x2(<4 x i32> inreg %rsrc, <2 x float> %data, i32 %offset) #0 { 66 …call void @llvm.amdgcn.raw.buffer.store.v2f32(<2 x float> %data, <4 x i32> %rsrc, i32 %offset, i32… 74 define amdgpu_ps void @buffer_store_x1_offen_merged_and(<4 x i32> inreg %rsrc, i32 %a, float %v1, f… 81 call void @llvm.amdgcn.raw.buffer.store.f32(float %v1, <4 x i32> %rsrc, i32 %a1, i32 0, i32 0) 82 call void @llvm.amdgcn.raw.buffer.store.f32(float %v2, <4 x i32> %rsrc, i32 %a2, i32 0, i32 0) 83 call void @llvm.amdgcn.raw.buffer.store.f32(float %v3, <4 x i32> %rsrc, i32 %a3, i32 0, i32 0) 84 call void @llvm.amdgcn.raw.buffer.store.f32(float %v4, <4 x i32> %rsrc, i32 %a4, i32 0, i32 0) 85 call void @llvm.amdgcn.raw.buffer.store.f32(float %v5, <4 x i32> %rsrc, i32 %a5, i32 0, i32 0) [all …]
|
D | llvm.amdgcn.buffer.store.ll | 83 define amdgpu_ps void @buffer_store_x1(<4 x i32> inreg %rsrc, float %data, i32 %index) { 85 …call void @llvm.amdgcn.buffer.store.f32(float %data, <4 x i32> %rsrc, i32 %index, i32 0, i1 0, i1 … 92 define amdgpu_ps void @buffer_store_x2(<4 x i32> inreg %rsrc, <2 x float> %data, i32 %index) #0 { 94 …call void @llvm.amdgcn.buffer.store.v2f32(<2 x float> %data, <4 x i32> %rsrc, i32 %index, i32 0, i… 102 define amdgpu_ps void @buffer_store_x1_offen_merged(<4 x i32> inreg %rsrc, i32 %a, float %v1, float… 109 call void @llvm.amdgcn.buffer.store.f32(float %v1, <4 x i32> %rsrc, i32 0, i32 %a1, i1 0, i1 0) 110 call void @llvm.amdgcn.buffer.store.f32(float %v2, <4 x i32> %rsrc, i32 0, i32 %a2, i1 0, i1 0) 111 call void @llvm.amdgcn.buffer.store.f32(float %v3, <4 x i32> %rsrc, i32 0, i32 %a3, i1 0, i1 0) 112 call void @llvm.amdgcn.buffer.store.f32(float %v4, <4 x i32> %rsrc, i32 0, i32 %a4, i1 0, i1 0) 113 call void @llvm.amdgcn.buffer.store.f32(float %v5, <4 x i32> %rsrc, i32 0, i32 %a5, i1 0, i1 0) [all …]
|
D | llvm.amdgcn.buffer.load.ll | 90 define amdgpu_ps float @buffer_load_x1(<4 x i32> inreg %rsrc, i32 %idx, i32 %ofs) { 92 %data = call float @llvm.amdgcn.buffer.load.f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i1 0, i1 0) 99 define amdgpu_ps <2 x float> @buffer_load_x2(<4 x i32> inreg %rsrc, i32 %idx, i32 %ofs) { 101 …%data = call <2 x float> @llvm.amdgcn.buffer.load.v2f32(<4 x i32> %rsrc, i32 %idx, i32 %ofs, i1 0,… 120 define amdgpu_ps float @buffer_load_mmo(<4 x i32> inreg %rsrc, float addrspace(3)* %lds) { 123 %val = call float @llvm.amdgcn.buffer.load.f32(<4 x i32> %rsrc, i32 0, i32 0, i1 0, i1 0) 134 define amdgpu_ps void @buffer_load_x1_offen_merged(<4 x i32> inreg %rsrc, i32 %a) { 142 %r1 = call float @llvm.amdgcn.buffer.load.f32(<4 x i32> %rsrc, i32 0, i32 %a1, i1 0, i1 0) 143 %r2 = call float @llvm.amdgcn.buffer.load.f32(<4 x i32> %rsrc, i32 0, i32 %a2, i1 0, i1 0) 144 %r3 = call float @llvm.amdgcn.buffer.load.f32(<4 x i32> %rsrc, i32 0, i32 %a3, i1 0, i1 0) [all …]
|
D | llvm.amdgcn.raw.buffer.load.ll | 81 define amdgpu_ps float @buffer_load_x1(<4 x i32> inreg %rsrc, i32 %ofs) { 83 %data = call float @llvm.amdgcn.raw.buffer.load.f32(<4 x i32> %rsrc, i32 %ofs, i32 0, i32 0) 90 define amdgpu_ps <2 x float> @buffer_load_x2(<4 x i32> inreg %rsrc, i32 %ofs) { 92 …%data = call <2 x float> @llvm.amdgcn.raw.buffer.load.v2f32(<4 x i32> %rsrc, i32 %ofs, i32 0, i32 … 112 define amdgpu_ps float @buffer_load_mmo(<4 x i32> inreg %rsrc, float addrspace(3)* %lds) { 115 %val = call float @llvm.amdgcn.raw.buffer.load.f32(<4 x i32> %rsrc, i32 0, i32 0, i32 0) 127 define amdgpu_ps void @buffer_load_x1_offen_merged_and(<4 x i32> inreg %rsrc, i32 %a) { 135 %r1 = call float @llvm.amdgcn.raw.buffer.load.f32(<4 x i32> %rsrc, i32 %a1, i32 0, i32 0) 136 %r2 = call float @llvm.amdgcn.raw.buffer.load.f32(<4 x i32> %rsrc, i32 %a2, i32 0, i32 0) 137 %r3 = call float @llvm.amdgcn.raw.buffer.load.f32(<4 x i32> %rsrc, i32 %a3, i32 0, i32 0) [all …]
|
D | llvm.amdgcn.buffer.atomic.ll | 22 define amdgpu_ps float @test1(<4 x i32> inreg %rsrc, i32 %data, i32 %vindex, i32 %voffset) { 24 %o1 = call i32 @llvm.amdgcn.buffer.atomic.swap.i32(i32 %data, <4 x i32> %rsrc, i32 0, i32 0, i1 0) 25 …%o2 = call i32 @llvm.amdgcn.buffer.atomic.swap.i32(i32 %o1, <4 x i32> %rsrc, i32 %vindex, i32 0, i… 26 …%o3 = call i32 @llvm.amdgcn.buffer.atomic.swap.i32(i32 %o2, <4 x i32> %rsrc, i32 0, i32 %voffset, … 27 …%o4 = call i32 @llvm.amdgcn.buffer.atomic.swap.i32(i32 %o3, <4 x i32> %rsrc, i32 %vindex, i32 %vof… 29 …%o5 = call i32 @llvm.amdgcn.buffer.atomic.swap.i32(i32 %o4, <4 x i32> %rsrc, i32 0, i32 %ofs.5, i1… 30 …%o6 = call i32 @llvm.amdgcn.buffer.atomic.swap.i32(i32 %o5, <4 x i32> %rsrc, i32 0, i32 8192, i1 0) 31 …%unused = call i32 @llvm.amdgcn.buffer.atomic.swap.i32(i32 %o6, <4 x i32> %rsrc, i32 0, i32 0, i1 … 53 define amdgpu_ps float @test11(<4 x i32> inreg %rsrc, i32 %data, i32 %vindex, i32 %voffset) { 56 %o1 = call i64 @llvm.amdgcn.buffer.atomic.swap.i64(i64 %o0, <4 x i32> %rsrc, i32 0, i32 0, i1 0) [all …]
|
D | image_ls_mipmap_zero.ll | 8 define amdgpu_ps <4 x float> @load_mip_1d(<8 x i32> inreg %rsrc, i32 %s) { 10 …oat> @llvm.amdgcn.image.load.mip.1d.v4f32.i32(i32 15, i32 %s, i32 0, <8 x i32> %rsrc, i32 0, i32 0) 17 define amdgpu_ps <4 x float> @load_mip_2d(<8 x i32> inreg %rsrc, i32 %s, i32 %t) { 19 …vm.amdgcn.image.load.mip.2d.v4f32.i32(i32 15, i32 %s, i32 %t, i32 0, <8 x i32> %rsrc, i32 0, i32 0) 26 define amdgpu_ps <4 x float> @load_mip_3d(<8 x i32> inreg %rsrc, i32 %s, i32 %t, i32 %u) { 28 …n.image.load.mip.3d.v4f32.i32(i32 15, i32 %s, i32 %t, i32 %u, i32 0, <8 x i32> %rsrc, i32 0, i32 0) 35 define amdgpu_ps <4 x float> @load_mip_1darray(<8 x i32> inreg %rsrc, i32 %s, i32 %t) { 37 …dgcn.image.load.mip.1darray.v4f32.i32(i32 15, i32 %s, i32 %t, i32 0, <8 x i32> %rsrc, i32 0, i32 0) 44 define amdgpu_ps <4 x float> @load_mip_2darray(<8 x i32> inreg %rsrc, i32 %s, i32 %t, i32 %u) { 46 …ge.load.mip.2darray.v4f32.i32(i32 15, i32 %s, i32 %t, i32 %u, i32 0, <8 x i32> %rsrc, i32 0, i32 0) [all …]
|
D | llvm.amdgcn.image.msaa.load.ll | 5 define amdgpu_ps <4 x float> @load_1d(<8 x i32> inreg %rsrc, i32 %s) { 7 …%v = call <4 x float> @llvm.amdgcn.image.msaa.load.1d.v4f32.i32(i32 15, i32 %s, <8 x i32> %rsrc, i… 13 define amdgpu_ps <4 x float> @load_1d_tfe(<8 x i32> inreg %rsrc, i32 addrspace(1)* inreg %out, i32 … 15 …t>,i32} @llvm.amdgcn.image.msaa.load.1d.v4f32i32.i32(i32 15, i32 %s, <8 x i32> %rsrc, i32 1, i32 0) 24 define amdgpu_ps <4 x float> @load_1d_lwe(<8 x i32> inreg %rsrc, i32 addrspace(1)* inreg %out, i32 … 26 …>, i32} @llvm.amdgcn.image.msaa.load.1d.v4f32i32.i32(i32 15, i32 %s, <8 x i32> %rsrc, i32 2, i32 0) 35 define amdgpu_ps <4 x float> @load_2d(<8 x i32> inreg %rsrc, i32 %s, i32 %t) { 37 …t> @llvm.amdgcn.image.msaa.load.2d.v4f32.i32(i32 15, i32 %s, i32 %t, <8 x i32> %rsrc, i32 0, i32 0) 43 define amdgpu_ps <4 x float> @load_2d_tfe(<8 x i32> inreg %rsrc, i32 addrspace(1)* inreg %out, i32 … 45 …@llvm.amdgcn.image.msaa.load.2d.v4f32i32.i32(i32 15, i32 %s, i32 %t, <8 x i32> %rsrc, i32 1, i32 0) [all …]
|
D | llvm.amdgcn.image.atomic.dim.ll | 8 define amdgpu_ps float @atomic_swap_1d(<8 x i32> inreg %rsrc, i32 %data, i32 %s) { 10 …%v = call i32 @llvm.amdgcn.image.atomic.swap.1d.i32.i32(i32 %data, i32 %s, <8 x i32> %rsrc, i32 0,… 18 define amdgpu_ps <2 x float> @atomic_swap_1d_i64(<8 x i32> inreg %rsrc, i64 %data, i32 %s) { 20 …%v = call i64 @llvm.amdgcn.image.atomic.swap.1d.i64.i32(i64 %data, i32 %s, <8 x i32> %rsrc, i32 0,… 28 define amdgpu_ps float @atomic_add_1d(<8 x i32> inreg %rsrc, i32 %data, i32 %s) { 30 …%v = call i32 @llvm.amdgcn.image.atomic.add.1d.i32.i32(i32 %data, i32 %s, <8 x i32> %rsrc, i32 0, … 38 define amdgpu_ps float @atomic_sub_1d(<8 x i32> inreg %rsrc, i32 %data, i32 %s) { 40 …%v = call i32 @llvm.amdgcn.image.atomic.sub.1d.i32.i32(i32 %data, i32 %s, <8 x i32> %rsrc, i32 0, … 48 define amdgpu_ps float @atomic_smin_1d(<8 x i32> inreg %rsrc, i32 %data, i32 %s) { 50 …%v = call i32 @llvm.amdgcn.image.atomic.smin.1d.i32.i32(i32 %data, i32 %s, <8 x i32> %rsrc, i32 0,… [all …]
|
D | llvm.amdgcn.struct.buffer.atomic.ll | 21 define amdgpu_ps float @test1(<4 x i32> inreg %rsrc, i32 %data, i32 %vindex, i32 %voffset) { 23 …%o1 = call i32 @llvm.amdgcn.struct.buffer.atomic.swap.i32(i32 %data, <4 x i32> %rsrc, i32 0, i32 0… 24 …%o2 = call i32 @llvm.amdgcn.struct.buffer.atomic.swap.i32(i32 %o1, <4 x i32> %rsrc, i32 %vindex, i… 25 …%o3 = call i32 @llvm.amdgcn.struct.buffer.atomic.swap.i32(i32 %o2, <4 x i32> %rsrc, i32 0, i32 %vo… 26 …%o4 = call i32 @llvm.amdgcn.struct.buffer.atomic.swap.i32(i32 %o3, <4 x i32> %rsrc, i32 %vindex, i… 28 …%o5 = call i32 @llvm.amdgcn.struct.buffer.atomic.swap.i32(i32 %o4, <4 x i32> %rsrc, i32 0, i32 %of… 29 …%o6 = call i32 @llvm.amdgcn.struct.buffer.atomic.swap.i32(i32 %o5, <4 x i32> %rsrc, i32 0, i32 4, … 30 …%unused = call i32 @llvm.amdgcn.struct.buffer.atomic.swap.i32(i32 %o6, <4 x i32> %rsrc, i32 0, i32… 32 …%out = call float @llvm.amdgcn.struct.buffer.atomic.swap.f32(float %o7, <4 x i32> %rsrc, i32 0, i3… 59 define amdgpu_ps float @test2(<4 x i32> inreg %rsrc, i32 %data, i32 %vindex) { [all …]
|
D | llvm.amdgcn.image.load.a16.ll | 7 define amdgpu_ps <4 x float> @load.f32.1d(<8 x i32> inreg %rsrc, <2 x i16> %coords) { 10 …%v = call <4 x float> @llvm.amdgcn.image.load.1d.v4f32.i16(i32 1, i16 %x, <8 x i32> %rsrc, i32 0, … 17 define amdgpu_ps <4 x float> @load.v2f32.1d(<8 x i32> inreg %rsrc, <2 x i16> %coords) { 20 …%v = call <4 x float> @llvm.amdgcn.image.load.1d.v4f32.i16(i32 3, i16 %x, <8 x i32> %rsrc, i32 0, … 27 define amdgpu_ps <4 x float> @load.v3f32.1d(<8 x i32> inreg %rsrc, <2 x i16> %coords) { 30 …%v = call <4 x float> @llvm.amdgcn.image.load.1d.v4f32.i16(i32 7, i16 %x, <8 x i32> %rsrc, i32 0, … 37 define amdgpu_ps <4 x float> @load.v4f32.1d(<8 x i32> inreg %rsrc, <2 x i16> %coords) { 40 …%v = call <4 x float> @llvm.amdgcn.image.load.1d.v4f32.i16(i32 15, i16 %x, <8 x i32> %rsrc, i32 0,… 47 define amdgpu_ps <4 x float> @load.f32.2d(<8 x i32> inreg %rsrc, <2 x i16> %coords) { 51 …%v = call <4 x float> @llvm.amdgcn.image.load.2d.v4f32.i16(i32 1, i16 %x, i16 %y, <8 x i32> %rsrc,… [all …]
|
/external/tpm2-tss/src/tss2-esys/api/ |
D | Esys_StartAuthSession.c | 243 : tpmKeyNode->rsrc.handle, in Esys_StartAuthSession_Async() 245 : bindNode->rsrc.handle, nonceCaller, in Esys_StartAuthSession_Async() 261 none.rsrc.handle = TPM2_RH_NULL; in Esys_StartAuthSession_Async() 262 none.rsrc.rsrcType = IESYSC_WITHOUT_MISC_RSRC; in Esys_StartAuthSession_Async() 264 none.rsrc.name.name, in Esys_StartAuthSession_Async() 265 sizeof(none.rsrc.name.name), in Esys_StartAuthSession_Async() 268 none.rsrc.name.size = offset; in Esys_StartAuthSession_Async() 351 IESYS_RESOURCE *rsrc = &sessionHandleNode->rsrc; in Esys_StartAuthSession_Finish() local 352 rsrc->handle = ESYS_TR_NONE; in Esys_StartAuthSession_Finish() 353 rsrc->misc.rsrc_session.sessionAttributes = TPMA_SESSION_CONTINUESESSION; in Esys_StartAuthSession_Finish() [all …]
|
/external/mesa3d/src/gallium/drivers/panfrost/ |
D | pan_resource.c | 118 struct panfrost_resource *rsrc = (struct panfrost_resource *) pt; in panfrost_resource_get_handle() local 119 struct renderonly_scanout *scanout = rsrc->scanout; in panfrost_resource_get_handle() 121 handle->modifier = rsrc->modifier; in panfrost_resource_get_handle() 122 rsrc->modifier_constant = true; in panfrost_resource_get_handle() 130 handle->handle = rsrc->bo->gem_handle; in panfrost_resource_get_handle() 131 handle->stride = rsrc->slices[0].stride; in panfrost_resource_get_handle() 132 handle->offset = rsrc->slices[0].offset; in panfrost_resource_get_handle() 150 int fd = panfrost_bo_export(rsrc->bo); in panfrost_resource_get_handle() 156 handle->stride = rsrc->slices[0].stride; in panfrost_resource_get_handle() 157 handle->offset = rsrc->slices[0].offset; in panfrost_resource_get_handle() [all …]
|
D | pan_sfbd.c | 82 struct panfrost_resource *rsrc = pan_resource(surf->texture); in panfrost_sfbd_set_cbuf() local 87 signed stride = rsrc->slices[level].stride; in panfrost_sfbd_set_cbuf() 89 mali_ptr base = panfrost_get_texture_address(rsrc, level, first_layer, 0); in panfrost_sfbd_set_cbuf() 97 if (rsrc->modifier == DRM_FORMAT_MOD_LINEAR) in panfrost_sfbd_set_cbuf() 99 else if (rsrc->modifier == DRM_FORMAT_MOD_ARM_16X16_BLOCK_U_INTERLEAVED) { in panfrost_sfbd_set_cbuf() 113 struct panfrost_resource *rsrc = pan_resource(surf->texture); in panfrost_sfbd_set_zsbuf() local 118 if (rsrc->modifier != DRM_FORMAT_MOD_ARM_16X16_BLOCK_U_INTERLEAVED) in panfrost_sfbd_set_zsbuf() 122 fb->zs_writeback.base = rsrc->bo->ptr.gpu + rsrc->slices[level].offset; in panfrost_sfbd_set_zsbuf() 123 fb->zs_writeback.row_stride = rsrc->slices[level].stride * 16; in panfrost_sfbd_set_zsbuf() 221 struct panfrost_resource *rsrc = pan_resource(surf->texture); in panfrost_sfbd_fragment() local [all …]
|
D | pan_job.c | 555 struct panfrost_resource *rsrc, in panfrost_batch_add_resource_bos() argument 558 panfrost_batch_add_bo(batch, rsrc->bo, flags); in panfrost_batch_add_resource_bos() 561 if (rsrc->slices[i].checksum_bo) in panfrost_batch_add_resource_bos() 562 panfrost_batch_add_bo(batch, rsrc->slices[i].checksum_bo, flags); in panfrost_batch_add_resource_bos() 564 if (rsrc->separate_stencil) in panfrost_batch_add_resource_bos() 565 panfrost_batch_add_bo(batch, rsrc->separate_stencil->bo, flags); in panfrost_batch_add_resource_bos() 576 struct panfrost_resource *rsrc = pan_resource(batch->key.cbufs[i]->texture); in panfrost_batch_add_fbo_bos() local 577 panfrost_batch_add_resource_bos(batch, rsrc, flags); in panfrost_batch_add_fbo_bos() 581 struct panfrost_resource *rsrc = pan_resource(batch->key.zsbuf->texture); in panfrost_batch_add_fbo_bos() local 582 panfrost_batch_add_resource_bos(batch, rsrc, flags); in panfrost_batch_add_fbo_bos() [all …]
|
D | pan_mfbd.c | 38 struct panfrost_resource *rsrc = pan_resource(surf->texture); in panfrost_mfbd_has_zs_crc_ext() local 40 if (rsrc->checksummed) in panfrost_mfbd_has_zs_crc_ext() 133 struct panfrost_resource *rsrc = pan_resource(surf->texture); in panfrost_mfbd_rt_set_buf() local 137 int stride = rsrc->slices[level].stride; in panfrost_mfbd_rt_set_buf() 142 unsigned layer_stride = (nr_samples > 1) ? rsrc->slices[level].size0 : 0; in panfrost_mfbd_rt_set_buf() 143 mali_ptr base = panfrost_get_texture_address(rsrc, level, first_layer, 0); in panfrost_mfbd_rt_set_buf() 154 if (rsrc->modifier == DRM_FORMAT_MOD_LINEAR) { in panfrost_mfbd_rt_set_buf() 163 } else if (rsrc->modifier == DRM_FORMAT_MOD_ARM_16X16_BLOCK_U_INTERLEAVED) { in panfrost_mfbd_rt_set_buf() 172 } else if (drm_is_afbc(rsrc->modifier)) { in panfrost_mfbd_rt_set_buf() 178 unsigned header_size = rsrc->slices[level].header_size; in panfrost_mfbd_rt_set_buf() [all …]
|
/external/llvm-project/llvm/test/Analysis/DivergenceAnalysis/AMDGPU/ |
D | llvm.amdgcn.buffer.atomic.ll | 4 define float @buffer_atomic_swap(<4 x i32> inreg %rsrc, i32 inreg %data) #0 { 6 …%orig = call i32 @llvm.amdgcn.buffer.atomic.swap.i32(i32 %data, <4 x i32> %rsrc, i32 0, i32 0, i1 … 12 define float @buffer_atomic_add(<4 x i32> inreg %rsrc, i32 inreg %data) #0 { 14 …%orig = call i32 @llvm.amdgcn.buffer.atomic.add.i32(i32 %data, <4 x i32> %rsrc, i32 0, i32 0, i1 0) 20 define float @buffer_atomic_sub(<4 x i32> inreg %rsrc, i32 inreg %data) #0 { 22 …%orig = call i32 @llvm.amdgcn.buffer.atomic.sub.i32(i32 %data, <4 x i32> %rsrc, i32 0, i32 0, i1 0) 28 define float @buffer_atomic_smin(<4 x i32> inreg %rsrc, i32 inreg %data) #0 { 30 …%orig = call i32 @llvm.amdgcn.buffer.atomic.smin.i32(i32 %data, <4 x i32> %rsrc, i32 0, i32 0, i1 … 36 define float @buffer_atomic_umin(<4 x i32> inreg %rsrc, i32 inreg %data) #0 { 38 …%orig = call i32 @llvm.amdgcn.buffer.atomic.umin.i32(i32 %data, <4 x i32> %rsrc, i32 0, i32 0, i1 … [all …]
|
D | llvm.amdgcn.image.atomic.ll | 4 define float @image_atomic_swap(<8 x i32> inreg %rsrc, i32 inreg %addr, i32 inreg %data) #0 { 6 …%orig = call i32 @llvm.amdgcn.image.atomic.swap.1d.i32.i32(i32 %data, i32 %addr, <8 x i32> %rsrc, … 12 define float @image_atomic_add(<8 x i32> inreg %rsrc, i32 inreg %addr, i32 inreg %data) #0 { 14 …%orig = call i32 @llvm.amdgcn.image.atomic.add.1d.i32.i32(i32 %data, i32 %addr, <8 x i32> %rsrc, i… 20 define float @image_atomic_sub(<8 x i32> inreg %rsrc, i32 inreg %addr, i32 inreg %data) #0 { 22 …%orig = call i32 @llvm.amdgcn.image.atomic.sub.1d.i32.i32(i32 %data, i32 %addr, <8 x i32> %rsrc, i… 28 define float @image_atomic_smin(<8 x i32> inreg %rsrc, i32 inreg %addr, i32 inreg %data) #0 { 30 …%orig = call i32 @llvm.amdgcn.image.atomic.smin.1d.i32.i32(i32 %data, i32 %addr, <8 x i32> %rsrc, … 36 define float @image_atomic_umin(<8 x i32> inreg %rsrc, i32 inreg %addr, i32 inreg %data) #0 { 38 …%orig = call i32 @llvm.amdgcn.image.atomic.umin.1d.i32.i32(i32 %data, i32 %addr, <8 x i32> %rsrc, … [all …]
|
/external/llvm-project/llvm/test/Analysis/LegacyDivergenceAnalysis/AMDGPU/ |
D | llvm.amdgcn.buffer.atomic.ll | 4 define float @buffer_atomic_swap(<4 x i32> inreg %rsrc, i32 inreg %data) #0 { 6 …%orig = call i32 @llvm.amdgcn.buffer.atomic.swap.i32(i32 %data, <4 x i32> %rsrc, i32 0, i32 0, i1 … 12 define float @buffer_atomic_add(<4 x i32> inreg %rsrc, i32 inreg %data) #0 { 14 …%orig = call i32 @llvm.amdgcn.buffer.atomic.add.i32(i32 %data, <4 x i32> %rsrc, i32 0, i32 0, i1 0) 20 define float @buffer_atomic_sub(<4 x i32> inreg %rsrc, i32 inreg %data) #0 { 22 …%orig = call i32 @llvm.amdgcn.buffer.atomic.sub.i32(i32 %data, <4 x i32> %rsrc, i32 0, i32 0, i1 0) 28 define float @buffer_atomic_smin(<4 x i32> inreg %rsrc, i32 inreg %data) #0 { 30 …%orig = call i32 @llvm.amdgcn.buffer.atomic.smin.i32(i32 %data, <4 x i32> %rsrc, i32 0, i32 0, i1 … 36 define float @buffer_atomic_umin(<4 x i32> inreg %rsrc, i32 inreg %data) #0 { 38 …%orig = call i32 @llvm.amdgcn.buffer.atomic.umin.i32(i32 %data, <4 x i32> %rsrc, i32 0, i32 0, i1 … [all …]
|
D | llvm.amdgcn.image.atomic.ll | 4 define float @image_atomic_swap(<8 x i32> inreg %rsrc, i32 inreg %addr, i32 inreg %data) #0 { 6 …%orig = call i32 @llvm.amdgcn.image.atomic.swap.1d.i32.i32(i32 %data, i32 %addr, <8 x i32> %rsrc, … 12 define float @image_atomic_add(<8 x i32> inreg %rsrc, i32 inreg %addr, i32 inreg %data) #0 { 14 …%orig = call i32 @llvm.amdgcn.image.atomic.add.1d.i32.i32(i32 %data, i32 %addr, <8 x i32> %rsrc, i… 20 define float @image_atomic_sub(<8 x i32> inreg %rsrc, i32 inreg %addr, i32 inreg %data) #0 { 22 …%orig = call i32 @llvm.amdgcn.image.atomic.sub.1d.i32.i32(i32 %data, i32 %addr, <8 x i32> %rsrc, i… 28 define float @image_atomic_smin(<8 x i32> inreg %rsrc, i32 inreg %addr, i32 inreg %data) #0 { 30 …%orig = call i32 @llvm.amdgcn.image.atomic.smin.1d.i32.i32(i32 %data, i32 %addr, <8 x i32> %rsrc, … 36 define float @image_atomic_umin(<8 x i32> inreg %rsrc, i32 inreg %addr, i32 inreg %data) #0 { 38 …%orig = call i32 @llvm.amdgcn.image.atomic.umin.1d.i32.i32(i32 %data, i32 %addr, <8 x i32> %rsrc, … [all …]
|
/external/llvm/test/Analysis/DivergenceAnalysis/AMDGPU/ |
D | llvm.amdgcn.buffer.atomic.ll | 4 define float @buffer_atomic_swap(<4 x i32> inreg %rsrc, i32 inreg %data) #0 { 6 %orig = call i32 @llvm.amdgcn.buffer.atomic.swap(i32 %data, <4 x i32> %rsrc, i32 0, i32 0, i1 0) 12 define float @buffer_atomic_add(<4 x i32> inreg %rsrc, i32 inreg %data) #0 { 14 %orig = call i32 @llvm.amdgcn.buffer.atomic.add(i32 %data, <4 x i32> %rsrc, i32 0, i32 0, i1 0) 20 define float @buffer_atomic_sub(<4 x i32> inreg %rsrc, i32 inreg %data) #0 { 22 %orig = call i32 @llvm.amdgcn.buffer.atomic.sub(i32 %data, <4 x i32> %rsrc, i32 0, i32 0, i1 0) 28 define float @buffer_atomic_smin(<4 x i32> inreg %rsrc, i32 inreg %data) #0 { 30 %orig = call i32 @llvm.amdgcn.buffer.atomic.smin(i32 %data, <4 x i32> %rsrc, i32 0, i32 0, i1 0) 36 define float @buffer_atomic_umin(<4 x i32> inreg %rsrc, i32 inreg %data) #0 { 38 %orig = call i32 @llvm.amdgcn.buffer.atomic.umin(i32 %data, <4 x i32> %rsrc, i32 0, i32 0, i1 0) [all …]
|
D | llvm.amdgcn.image.atomic.ll | 4 define float @image_atomic_swap(<8 x i32> inreg %rsrc, i32 inreg %addr, i32 inreg %data) #0 { 6 …%orig = call i32 @llvm.amdgcn.image.atomic.swap.i32(i32 %data, i32 %addr, <8 x i32> %rsrc, i1 0, i… 12 define float @image_atomic_add(<8 x i32> inreg %rsrc, i32 inreg %addr, i32 inreg %data) #0 { 14 …%orig = call i32 @llvm.amdgcn.image.atomic.add.i32(i32 %data, i32 %addr, <8 x i32> %rsrc, i1 0, i1… 20 define float @image_atomic_sub(<8 x i32> inreg %rsrc, i32 inreg %addr, i32 inreg %data) #0 { 22 …%orig = call i32 @llvm.amdgcn.image.atomic.sub.i32(i32 %data, i32 %addr, <8 x i32> %rsrc, i1 0, i1… 28 define float @image_atomic_smin(<8 x i32> inreg %rsrc, i32 inreg %addr, i32 inreg %data) #0 { 30 …%orig = call i32 @llvm.amdgcn.image.atomic.smin.i32(i32 %data, i32 %addr, <8 x i32> %rsrc, i1 0, i… 36 define float @image_atomic_umin(<8 x i32> inreg %rsrc, i32 inreg %addr, i32 inreg %data) #0 { 38 …%orig = call i32 @llvm.amdgcn.image.atomic.umin.i32(i32 %data, i32 %addr, <8 x i32> %rsrc, i1 0, i… [all …]
|
/external/llvm/test/CodeGen/AMDGPU/ |
D | llvm.amdgcn.buffer.atomic.ll | 20 define amdgpu_ps float @test1(<4 x i32> inreg %rsrc, i32 %data, i32 %vindex, i32 %voffset) { 22 %o1 = call i32 @llvm.amdgcn.buffer.atomic.swap(i32 %data, <4 x i32> %rsrc, i32 0, i32 0, i1 0) 23 %o2 = call i32 @llvm.amdgcn.buffer.atomic.swap(i32 %o1, <4 x i32> %rsrc, i32 %vindex, i32 0, i1 0) 24 …%o3 = call i32 @llvm.amdgcn.buffer.atomic.swap(i32 %o2, <4 x i32> %rsrc, i32 0, i32 %voffset, i1 0) 25 …%o4 = call i32 @llvm.amdgcn.buffer.atomic.swap(i32 %o3, <4 x i32> %rsrc, i32 %vindex, i32 %voffset… 27 %o5 = call i32 @llvm.amdgcn.buffer.atomic.swap(i32 %o4, <4 x i32> %rsrc, i32 0, i32 %ofs.5, i1 0) 28 %o6 = call i32 @llvm.amdgcn.buffer.atomic.swap(i32 %o5, <4 x i32> %rsrc, i32 0, i32 8192, i1 0) 29 %unused = call i32 @llvm.amdgcn.buffer.atomic.swap(i32 %o6, <4 x i32> %rsrc, i32 0, i32 0, i1 0) 52 define amdgpu_ps float @test2(<4 x i32> inreg %rsrc, i32 %data, i32 %vindex) { 54 …%t1 = call i32 @llvm.amdgcn.buffer.atomic.add(i32 %data, <4 x i32> %rsrc, i32 %vindex, i32 0, i1 0) [all …]
|