• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 
16 // Must be included first.
17 #include "tensorflow/python/lib/core/numpy.h"
18 
19 #include <vector>
20 
21 #include "tensorflow/c/c_api.h"
22 #include "tensorflow/core/lib/core/errors.h"
23 #include "tensorflow/core/platform/mutex.h"
24 #include "tensorflow/python/lib/core/bfloat16.h"
25 #include "tensorflow/python/lib/core/ndarray_tensor_bridge.h"
26 
27 namespace tensorflow {
28 
29 // Mutex used to serialize accesses to cached vector of pointers to python
30 // arrays to be dereferenced.
DelayedDecrefLock()31 static mutex* DelayedDecrefLock() {
32   static mutex* decref_lock = new mutex;
33   return decref_lock;
34 }
35 
36 // Caches pointers to numpy arrays which need to be dereferenced.
DecrefCache()37 static std::vector<void*>* DecrefCache() {
38   static std::vector<void*>* decref_cache = new std::vector<void*>;
39   return decref_cache;
40 }
41 
42 // Destructor passed to TF_NewTensor when it reuses a numpy buffer. Stores a
43 // pointer to the pyobj in a buffer to be dereferenced later when we're actually
44 // holding the GIL.
DelayedNumpyDecref(void * data,size_t len,void * obj)45 void DelayedNumpyDecref(void* data, size_t len, void* obj) {
46   mutex_lock ml(*DelayedDecrefLock());
47   DecrefCache()->push_back(obj);
48 }
49 
50 // Actually dereferences cached numpy arrays. REQUIRES being called while
51 // holding the GIL.
ClearDecrefCache()52 void ClearDecrefCache() {
53   std::vector<void*> cache_copy;
54   {
55     mutex_lock ml(*DelayedDecrefLock());
56     cache_copy.swap(*DecrefCache());
57   }
58   for (void* obj : cache_copy) {
59     Py_DECREF(reinterpret_cast<PyObject*>(obj));
60   }
61 }
62 
63 // Structure which keeps a reference to a Tensor alive while numpy has a pointer
64 // to it.
65 struct TensorReleaser {
66   // Python macro to include standard members.
67   PyObject_HEAD
68 
69       // Destructor responsible for releasing the memory.
70       std::function<void()>* destructor;
71 };
72 
73 extern PyTypeObject TensorReleaserType;
74 
TensorReleaser_dealloc(PyObject * pself)75 static void TensorReleaser_dealloc(PyObject* pself) {
76   TensorReleaser* self = reinterpret_cast<TensorReleaser*>(pself);
77   (*self->destructor)();
78   delete self->destructor;
79   TensorReleaserType.tp_free(pself);
80 }
81 
82 PyTypeObject TensorReleaserType = {
83     PyVarObject_HEAD_INIT(nullptr, 0) /* head init */
84     "tensorflow_wrapper",             /* tp_name */
85     sizeof(TensorReleaser),           /* tp_basicsize */
86     0,                                /* tp_itemsize */
87     /* methods */
88     TensorReleaser_dealloc,      /* tp_dealloc */
89     0,                           /* tp_print */
90     nullptr,                     /* tp_getattr */
91     nullptr,                     /* tp_setattr */
92     nullptr,                     /* tp_compare */
93     nullptr,                     /* tp_repr */
94     nullptr,                     /* tp_as_number */
95     nullptr,                     /* tp_as_sequence */
96     nullptr,                     /* tp_as_mapping */
97     nullptr,                     /* tp_hash */
98     nullptr,                     /* tp_call */
99     nullptr,                     /* tp_str */
100     nullptr,                     /* tp_getattro */
101     nullptr,                     /* tp_setattro */
102     nullptr,                     /* tp_as_buffer */
103     Py_TPFLAGS_DEFAULT,          /* tp_flags */
104     "Wrapped TensorFlow Tensor", /* tp_doc */
105     nullptr,                     /* tp_traverse */
106     nullptr,                     /* tp_clear */
107     nullptr,                     /* tp_richcompare */
108 };
109 
TF_DataType_to_PyArray_TYPE(TF_DataType tf_datatype,int * out_pyarray_type)110 Status TF_DataType_to_PyArray_TYPE(TF_DataType tf_datatype,
111                                    int* out_pyarray_type) {
112   switch (tf_datatype) {
113     case TF_HALF:
114       *out_pyarray_type = NPY_FLOAT16;
115       break;
116     case TF_FLOAT:
117       *out_pyarray_type = NPY_FLOAT32;
118       break;
119     case TF_DOUBLE:
120       *out_pyarray_type = NPY_FLOAT64;
121       break;
122     case TF_INT32:
123       *out_pyarray_type = NPY_INT32;
124       break;
125     case TF_UINT32:
126       *out_pyarray_type = NPY_UINT32;
127       break;
128     case TF_UINT8:
129       *out_pyarray_type = NPY_UINT8;
130       break;
131     case TF_UINT16:
132       *out_pyarray_type = NPY_UINT16;
133       break;
134     case TF_INT8:
135       *out_pyarray_type = NPY_INT8;
136       break;
137     case TF_INT16:
138       *out_pyarray_type = NPY_INT16;
139       break;
140     case TF_INT64:
141       *out_pyarray_type = NPY_INT64;
142       break;
143     case TF_UINT64:
144       *out_pyarray_type = NPY_UINT64;
145       break;
146     case TF_BOOL:
147       *out_pyarray_type = NPY_BOOL;
148       break;
149     case TF_COMPLEX64:
150       *out_pyarray_type = NPY_COMPLEX64;
151       break;
152     case TF_COMPLEX128:
153       *out_pyarray_type = NPY_COMPLEX128;
154       break;
155     case TF_STRING:
156       *out_pyarray_type = NPY_OBJECT;
157       break;
158     case TF_RESOURCE:
159       *out_pyarray_type = NPY_VOID;
160       break;
161     // TODO(keveman): These should be changed to NPY_VOID, and the type used for
162     // the resulting numpy array should be the custom struct types that we
163     // expect for quantized types.
164     case TF_QINT8:
165       *out_pyarray_type = NPY_INT8;
166       break;
167     case TF_QUINT8:
168       *out_pyarray_type = NPY_UINT8;
169       break;
170     case TF_QINT16:
171       *out_pyarray_type = NPY_INT16;
172       break;
173     case TF_QUINT16:
174       *out_pyarray_type = NPY_UINT16;
175       break;
176     case TF_QINT32:
177       *out_pyarray_type = NPY_INT32;
178       break;
179     case TF_BFLOAT16:
180       *out_pyarray_type = Bfloat16NumpyType();
181       break;
182     default:
183       return errors::Internal("Tensorflow type ", tf_datatype,
184                               " not convertible to numpy dtype.");
185   }
186   return Status::OK();
187 }
188 
ArrayFromMemory(int dim_size,npy_intp * dims,void * data,DataType dtype,std::function<void ()> destructor,PyObject ** result)189 Status ArrayFromMemory(int dim_size, npy_intp* dims, void* data, DataType dtype,
190                        std::function<void()> destructor, PyObject** result) {
191   if (dtype == DT_STRING || dtype == DT_RESOURCE) {
192     return errors::FailedPrecondition(
193         "Cannot convert string or resource Tensors.");
194   }
195 
196   int type_num = -1;
197   Status s =
198       TF_DataType_to_PyArray_TYPE(static_cast<TF_DataType>(dtype), &type_num);
199   if (!s.ok()) {
200     return s;
201   }
202 
203   auto* np_array = reinterpret_cast<PyArrayObject*>(
204       PyArray_SimpleNewFromData(dim_size, dims, type_num, data));
205   PyArray_CLEARFLAGS(np_array, NPY_ARRAY_OWNDATA);
206   if (PyType_Ready(&TensorReleaserType) == -1) {
207     return errors::Unknown("Python type initialization failed.");
208   }
209   auto* releaser = reinterpret_cast<TensorReleaser*>(
210       TensorReleaserType.tp_alloc(&TensorReleaserType, 0));
211   releaser->destructor = new std::function<void()>(std::move(destructor));
212   if (PyArray_SetBaseObject(np_array, reinterpret_cast<PyObject*>(releaser)) ==
213       -1) {
214     Py_DECREF(releaser);
215     return errors::Unknown("Python array refused to use memory.");
216   }
217   *result = reinterpret_cast<PyObject*>(np_array);
218   return Status::OK();
219 }
220 
221 }  // namespace tensorflow
222