• Home
  • Raw
  • Download

Lines Matching refs:DeviceData

142   std::vector<DeviceDataTy> &DeviceData;  member in __anon3ed6d2340111::StreamManagerTy
152 CUresult Err = cuCtxSetCurrent(DeviceData[DeviceId].Context); in resizeStreamPool()
171 std::vector<DeviceDataTy> &DeviceData) in StreamManagerTy() argument
173 DeviceData(DeviceData) { in StreamManagerTy()
192 checkResult(cuCtxSetCurrent(DeviceData[I].Context), in ~StreamManagerTy()
279 std::vector<DeviceDataTy> DeviceData; member in __anon3ed6d2340111::DeviceRTLTy
284 FuncOrGblEntryTy &E = DeviceData[DeviceId].FuncGblEntries.back(); in addOffloadEntry()
292 DeviceData[DeviceId].FuncGblEntries.back().Entries) in getOffloadEntry()
301 FuncOrGblEntryTy &E = DeviceData[DeviceId].FuncGblEntries.back(); in getOffloadEntriesTable()
315 DeviceData[DeviceId].FuncGblEntries.emplace_back(); in clearOffloadEntriesTable()
316 FuncOrGblEntryTy &E = DeviceData[DeviceId].FuncGblEntries.back(); in clearOffloadEntriesTable()
355 DeviceData.resize(NumberOfDevices); in DeviceRTLTy()
370 std::make_unique<StreamManagerTy>(NumberOfDevices, DeviceData); in DeviceRTLTy()
382 for (DeviceDataTy &D : DeviceData) { in ~DeviceRTLTy()
437 Err = cuDevicePrimaryCtxRetain(&DeviceData[DeviceId].Context, Device); in initDevice()
441 Err = cuCtxSetCurrent(DeviceData[DeviceId].Context); in initDevice()
456 DeviceData[DeviceId].BlocksPerGrid = DeviceRTLTy::DefaultNumTeams; in initDevice()
459 DeviceData[DeviceId].BlocksPerGrid = MaxGridDimX; in initDevice()
464 DeviceData[DeviceId].BlocksPerGrid = DeviceRTLTy::HardTeamLimit; in initDevice()
474 DeviceData[DeviceId].ThreadsPerBlock = DeviceRTLTy::DefaultNumThreads; in initDevice()
477 DeviceData[DeviceId].ThreadsPerBlock = MaxBlockDimX; in initDevice()
482 DeviceData[DeviceId].ThreadsPerBlock = DeviceRTLTy::HardThreadLimit; in initDevice()
491 DeviceData[DeviceId].WarpSize = 32; in initDevice()
494 DeviceData[DeviceId].WarpSize = WarpSize; in initDevice()
498 if (EnvTeamLimit > 0 && DeviceData[DeviceId].BlocksPerGrid > EnvTeamLimit) { in initDevice()
501 DeviceData[DeviceId].BlocksPerGrid = EnvTeamLimit; in initDevice()
507 DeviceData[DeviceId].BlocksPerGrid, in initDevice()
508 DeviceData[DeviceId].ThreadsPerBlock, DeviceData[DeviceId].WarpSize); in initDevice()
514 DeviceData[DeviceId].NumTeams = EnvNumTeams; in initDevice()
516 DeviceData[DeviceId].NumTeams = DeviceRTLTy::DefaultNumTeams; in initDevice()
521 if (DeviceData[DeviceId].NumTeams > DeviceData[DeviceId].BlocksPerGrid) { in initDevice()
523 DeviceData[DeviceId].BlocksPerGrid); in initDevice()
524 DeviceData[DeviceId].NumTeams = DeviceData[DeviceId].BlocksPerGrid; in initDevice()
528 DeviceData[DeviceId].NumThreads = DeviceRTLTy::DefaultNumThreads; in initDevice()
531 if (DeviceData[DeviceId].NumThreads > in initDevice()
532 DeviceData[DeviceId].ThreadsPerBlock) { in initDevice()
534 DeviceData[DeviceId].ThreadsPerBlock); in initDevice()
535 DeviceData[DeviceId].NumTeams = DeviceData[DeviceId].ThreadsPerBlock; in initDevice()
544 CUresult Err = cuCtxSetCurrent(DeviceData[DeviceId].Context); in loadBinary()
566 std::list<KernelTy> &KernelsList = DeviceData[DeviceId].KernelsList; in loadBinary()
726 CUresult Err = cuCtxSetCurrent(DeviceData[DeviceId].Context); in dataAlloc()
742 CUresult Err = cuCtxSetCurrent(DeviceData[DeviceId].Context); in dataSubmit()
764 CUresult Err = cuCtxSetCurrent(DeviceData[DeviceId].Context); in dataRetrieve()
786 CUresult Err = cuCtxSetCurrent(DeviceData[SrcDevId].Context); in dataExchange()
809 Err = cuCtxEnablePeerAccess(DeviceData[DstDevId].Context, 0); in dataExchange()
818 Err = cuMemcpyPeerAsync((CUdeviceptr)DstPtr, DeviceData[DstDevId].Context, in dataExchange()
819 (CUdeviceptr)SrcPtr, DeviceData[SrcDevId].Context, in dataExchange()
834 CUresult Err = cuCtxSetCurrent(DeviceData[DeviceId].Context); in dataDelete()
850 CUresult Err = cuCtxSetCurrent(DeviceData[DeviceId].Context); in runTargetTeamRegion()
871 DP("Adding master warp: +%d threads\n", DeviceData[DeviceId].WarpSize); in runTargetTeamRegion()
872 CudaThreadsPerBlock += DeviceData[DeviceId].WarpSize; in runTargetTeamRegion()
876 DeviceData[DeviceId].NumThreads); in runTargetTeamRegion()
877 CudaThreadsPerBlock = DeviceData[DeviceId].NumThreads; in runTargetTeamRegion()
880 if (CudaThreadsPerBlock > DeviceData[DeviceId].ThreadsPerBlock) { in runTargetTeamRegion()
882 DeviceData[DeviceId].ThreadsPerBlock); in runTargetTeamRegion()
883 CudaThreadsPerBlock = DeviceData[DeviceId].ThreadsPerBlock; in runTargetTeamRegion()
928 DP("Using default number of teams %d\n", DeviceData[DeviceId].NumTeams); in runTargetTeamRegion()
929 CudaBlocksPerGrid = DeviceData[DeviceId].NumTeams; in runTargetTeamRegion()
931 } else if (TeamNum > DeviceData[DeviceId].BlocksPerGrid) { in runTargetTeamRegion()
933 DeviceData[DeviceId].BlocksPerGrid); in runTargetTeamRegion()
934 CudaBlocksPerGrid = DeviceData[DeviceId].BlocksPerGrid; in runTargetTeamRegion()