• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright 2015 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 
16 #include "tensorflow/core/kernels/logging_ops.h"
17 
18 #include <iostream>
19 
20 #include "absl/strings/str_cat.h"
21 #include "tensorflow/core/framework/logging.h"
22 #include "tensorflow/core/framework/op_kernel.h"
23 #include "tensorflow/core/lib/core/status.h"
24 #include "tensorflow/core/lib/strings/str_util.h"
25 
26 namespace tensorflow {
27 
28 namespace {
29 
30 // If the following string is found at the beginning of an output stream, it
31 // will be interpreted as a file path.
32 const char kOutputStreamEscapeStr[] = "file://";
33 
34 // A mutex that guards appending strings to files.
35 static mutex* file_mutex = new mutex();
36 
37 // Appends the given data to the specified file. It will create the file if it
38 // doesn't already exist.
AppendStringToFile(const std::string & fname,StringPiece data,Env * env)39 Status AppendStringToFile(const std::string& fname, StringPiece data,
40                           Env* env) {
41   // TODO(ckluk): If opening and closing on every log causes performance issues,
42   // we can reimplement using reference counters.
43   mutex_lock l(*file_mutex);
44   std::unique_ptr<WritableFile> file;
45   TF_RETURN_IF_ERROR(env->NewAppendableFile(fname, &file));
46   Status a = file->Append(data);
47   Status c = file->Close();
48   return a.ok() ? c : a;
49 }
50 
51 }  // namespace
52 
AssertOp(OpKernelConstruction * ctx)53 AssertOp::AssertOp(OpKernelConstruction* ctx) : OpKernel(ctx) {
54   OP_REQUIRES_OK(ctx, ctx->GetAttr("summarize", &summarize_));
55 }
56 
Compute(OpKernelContext * ctx)57 void AssertOp::Compute(OpKernelContext* ctx) {
58   const Tensor& cond = ctx->input(0);
59   OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(cond.shape()),
60               errors::InvalidArgument("In[0] should be a scalar: ",
61                                       cond.shape().DebugString()));
62 
63   if (cond.scalar<bool>()()) {
64     return;
65   }
66   string msg = "assertion failed: ";
67   for (int i = 1; i < ctx->num_inputs(); ++i) {
68     strings::StrAppend(&msg, "[", ctx->input(i).SummarizeValue(summarize_),
69                        "]");
70     if (i < ctx->num_inputs() - 1) strings::StrAppend(&msg, " ");
71   }
72   ctx->SetStatus(errors::InvalidArgument(msg));
73 }
74 
75 REGISTER_KERNEL_BUILDER(Name("Assert")
76                             .Device(DEVICE_DEFAULT)
77                             .HostMemory("condition")
78                             .HostMemory("data"),
79                         AssertOp);
80 
81 class PrintOp : public OpKernel {
82  public:
PrintOp(OpKernelConstruction * ctx)83   explicit PrintOp(OpKernelConstruction* ctx)
84       : OpKernel(ctx), call_counter_(0) {
85     OP_REQUIRES_OK(ctx, ctx->GetAttr("message", &message_));
86     OP_REQUIRES_OK(ctx, ctx->GetAttr("first_n", &first_n_));
87     OP_REQUIRES_OK(ctx, ctx->GetAttr("summarize", &summarize_));
88   }
89 
Compute(OpKernelContext * ctx)90   void Compute(OpKernelContext* ctx) override {
91     if (IsRefType(ctx->input_dtype(0))) {
92       ctx->forward_ref_input_to_ref_output(0, 0);
93     } else {
94       ctx->set_output(0, ctx->input(0));
95     }
96     if (first_n_ >= 0) {
97       mutex_lock l(mu_);
98       if (call_counter_ >= first_n_) return;
99       call_counter_++;
100     }
101     string msg;
102     strings::StrAppend(&msg, message_);
103     for (int i = 1; i < ctx->num_inputs(); ++i) {
104       strings::StrAppend(&msg, "[", ctx->input(i).SummarizeValue(summarize_),
105                          "]");
106     }
107     std::cerr << msg << std::endl;
108   }
109 
110  private:
111   mutex mu_;
112   int64 call_counter_ TF_GUARDED_BY(mu_) = 0;
113   int64 first_n_ = 0;
114   int32 summarize_ = 0;
115   string message_;
116 };
117 
118 REGISTER_KERNEL_BUILDER(Name("Print").Device(DEVICE_CPU), PrintOp);
119 
120 class PrintV2Op : public OpKernel {
121  public:
PrintV2Op(OpKernelConstruction * ctx)122   explicit PrintV2Op(OpKernelConstruction* ctx) : OpKernel(ctx) {
123     OP_REQUIRES_OK(ctx, ctx->GetAttr("output_stream", &output_stream_));
124     OP_REQUIRES_OK(ctx, ctx->GetAttr("end", &end_));
125 
126     SetFilePathIfAny();
127     if (!file_path_.empty()) return;
128 
129     auto output_stream_index =
130         std::find(std::begin(valid_output_streams_),
131                   std::end(valid_output_streams_), output_stream_);
132 
133     if (output_stream_index == std::end(valid_output_streams_)) {
134       string error_msg = strings::StrCat(
135           "Unknown output stream: ", output_stream_, ", Valid streams are:");
136       for (auto valid_stream : valid_output_streams_) {
137         strings::StrAppend(&error_msg, " ", valid_stream);
138       }
139       OP_REQUIRES(ctx, false, errors::InvalidArgument(error_msg));
140     }
141   }
142 
Compute(OpKernelContext * ctx)143   void Compute(OpKernelContext* ctx) override {
144     const Tensor* input_;
145     OP_REQUIRES_OK(ctx, ctx->input("input", &input_));
146     OP_REQUIRES(
147         ctx, TensorShapeUtils::IsScalar(input_->shape()),
148         errors::InvalidArgument("Input is expected to be scalar, but got ",
149                                 input_->shape()));
150     const string& msg = input_->scalar<tstring>()();
151 
152     string ended_msg = strings::StrCat(msg, end_);
153 
154     if (!file_path_.empty()) {
155       // Outputs to a file at the specified path.
156       OP_REQUIRES_OK(ctx,
157                      AppendStringToFile(file_path_, ended_msg, ctx->env()));
158       return;
159     }
160 
161     if (logging::LogToListeners(ended_msg, "")) {
162       return;
163     }
164 
165     if (output_stream_ == "stdout") {
166       std::cout << ended_msg << std::flush;
167     } else if (output_stream_ == "stderr") {
168       std::cerr << ended_msg << std::flush;
169     } else if (output_stream_ == "log(info)") {
170       LOG(INFO) << ended_msg << std::flush;
171     } else if (output_stream_ == "log(warning)") {
172       LOG(WARNING) << ended_msg << std::flush;
173     } else if (output_stream_ == "log(error)") {
174       LOG(ERROR) << ended_msg << std::flush;
175     } else {
176       string error_msg = strings::StrCat(
177           "Unknown output stream: ", output_stream_, ", Valid streams are:");
178       for (auto valid_stream : valid_output_streams_) {
179         strings::StrAppend(&error_msg, " ", valid_stream);
180       }
181       strings::StrAppend(&error_msg, ", or file://<filename>");
182       OP_REQUIRES(ctx, false, errors::InvalidArgument(error_msg));
183     }
184   }
185 
186   const char* valid_output_streams_[5] = {"stdout", "stderr", "log(info)",
187                                           "log(warning)", "log(error)"};
188 
189  private:
190   string end_;
191   // Either output_stream_ or file_path_ (but not both) will be non-empty.
192   string output_stream_;
193   string file_path_;
194 
195   // If output_stream_ is a file path, extracts it to file_path_ and clears
196   // output_stream_; otherwise sets file_paths_ to "".
SetFilePathIfAny()197   void SetFilePathIfAny() {
198     if (absl::StartsWith(output_stream_, kOutputStreamEscapeStr)) {
199       file_path_ = output_stream_.substr(strlen(kOutputStreamEscapeStr));
200       output_stream_ = "";
201     } else {
202       file_path_ = "";
203     }
204   }
205 };
206 
207 REGISTER_KERNEL_BUILDER(Name("PrintV2").Device(DEVICE_CPU), PrintV2Op);
208 
209 class TimestampOp : public OpKernel {
210  public:
TimestampOp(OpKernelConstruction * context)211   explicit TimestampOp(OpKernelConstruction* context) : OpKernel(context) {}
212 
Compute(OpKernelContext * context)213   void Compute(OpKernelContext* context) override {
214     TensorShape output_shape;  // Default shape is 0 dim, 1 element
215     Tensor* output_tensor = nullptr;
216     OP_REQUIRES_OK(context,
217                    context->allocate_output(0, output_shape, &output_tensor));
218 
219     auto output_scalar = output_tensor->scalar<double>();
220     double now_us = static_cast<double>(Env::Default()->NowMicros());
221     double now_s = now_us / 1000000;
222     output_scalar() = now_s;
223   }
224 };
225 
226 REGISTER_KERNEL_BUILDER(Name("Timestamp").Device(DEVICE_CPU), TimestampOp);
227 
228 }  // end namespace tensorflow
229