• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright 2015 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 
16 #include "tensorflow/compiler/xla/stream_executor/platform.h"
17 
18 #include "absl/strings/str_cat.h"
19 #include "tensorflow/compiler/xla/stream_executor/lib/error.h"
20 #include "tensorflow/compiler/xla/stream_executor/platform/logging.h"
21 #include "tensorflow/compiler/xla/stream_executor/platform/port.h"
22 #include "tensorflow/compiler/xla/stream_executor/stream_executor_pimpl.h"
23 
24 namespace stream_executor {
25 
PlatformKindString(PlatformKind kind)26 std::string PlatformKindString(PlatformKind kind) {
27   switch (kind) {
28     case PlatformKind::kCuda:
29       return "CUDA";
30     case PlatformKind::kROCm:
31       return "ROCm";
32     case PlatformKind::kOpenCL:
33       return "OpenCL";
34     case PlatformKind::kHost:
35       return "Host";
36     case PlatformKind::kMock:
37       return "Mock";
38     default:
39       return absl::StrCat("InvalidPlatformKind(", static_cast<int>(kind), ")");
40   }
41 }
42 
PlatformKindFromString(std::string kind)43 PlatformKind PlatformKindFromString(std::string kind) {
44   for (int i = 0; i < static_cast<int>(PlatformKind::kSize); ++i) {
45     if (kind == PlatformKindString(static_cast<PlatformKind>(i))) {
46       return static_cast<PlatformKind>(i);
47     }
48   }
49 
50   return PlatformKind::kInvalid;
51 }
52 
PlatformIsRunnable(PlatformKind kind)53 bool PlatformIsRunnable(PlatformKind kind) {
54   switch (kind) {
55     case PlatformKind::kCuda:
56     case PlatformKind::kROCm:
57     case PlatformKind::kOpenCL:
58     case PlatformKind::kHost:
59       return true;
60     default:
61       return false;
62   }
63 }
64 
PlatformIsRunnableOnDevice(PlatformKind kind)65 bool PlatformIsRunnableOnDevice(PlatformKind kind) {
66   switch (kind) {
67     case PlatformKind::kCuda:
68     case PlatformKind::kROCm:
69     case PlatformKind::kOpenCL:
70       return true;
71     default:
72       return false;
73   }
74 }
75 
CheckPlatformKindIsValid(PlatformKind kind)76 void CheckPlatformKindIsValid(PlatformKind kind) {
77   CHECK(static_cast<int>(PlatformKind::kCuda) <= static_cast<int>(kind) &&
78         static_cast<int>(kind) <= static_cast<int>(PlatformKind::kMock))
79       << "invalid GPU executor kind: " << PlatformKindString(kind);
80 }
81 
StreamExecutorConfig()82 StreamExecutorConfig::StreamExecutorConfig()
83     : ordinal(-1), device_options(DeviceOptions::Default()) {}
84 
StreamExecutorConfig(int ordinal_in)85 StreamExecutorConfig::StreamExecutorConfig(int ordinal_in)
86     : ordinal(ordinal_in), device_options(DeviceOptions::Default()) {}
87 
~Platform()88 Platform::~Platform() {}
89 
Initialized() const90 bool Platform::Initialized() const { return true; }
91 
Initialize(const std::map<std::string,std::string> & platform_options)92 port::Status Platform::Initialize(
93     const std::map<std::string, std::string> &platform_options) {
94   if (!platform_options.empty()) {
95     return port::Status(port::error::UNIMPLEMENTED,
96                         "this platform does not support custom initialization");
97   }
98   return ::tensorflow::OkStatus();
99 }
100 
ForceExecutorShutdown()101 port::Status Platform::ForceExecutorShutdown() {
102   return port::Status(port::error::UNIMPLEMENTED,
103                       "executor shutdown is not supported on this platform");
104 }
105 
GetPeerAccessMap()106 std::unique_ptr<Platform::PeerAccessMap> Platform::GetPeerAccessMap() {
107   auto *map = new PeerAccessMap;
108 
109   int device_count = VisibleDeviceCount();
110   for (int i = 0; i < device_count; ++i) {
111     for (int j = 0; j < device_count; ++j) {
112       StreamExecutor *from = ExecutorForDevice(i).ValueOrDie();
113       StreamExecutor *to = ExecutorForDevice(j).ValueOrDie();
114       (*map)[{i, j}] = from->CanEnablePeerAccessTo(to);
115     }
116   }
117 
118   return std::unique_ptr<Platform::PeerAccessMap>{map};
119 }
120 
EnablePeerAccess()121 port::Status Platform::EnablePeerAccess() {
122   auto peer_access_map = GetPeerAccessMap();
123   for (const auto &access : *peer_access_map) {
124     auto devices = access.first;
125     if (access.second) {
126       StreamExecutor *from = ExecutorForDevice(devices.first).ValueOrDie();
127       StreamExecutor *to = ExecutorForDevice(devices.second).ValueOrDie();
128       auto status = from->EnablePeerAccessTo(to);
129       if (!status.ok()) {
130         return status;
131       }
132     } else {
133       LOG(INFO) << "cannot enable peer access from device ordinal "
134                 << devices.first << " to device ordinal " << devices.second;
135     }
136   }
137   return ::tensorflow::OkStatus();
138 }
139 
140 }  // namespace stream_executor
141