/external/tensorflow/tensorflow/python/keras/engine/ |
D | input_spec.py | 78 ndim=None, argument 91 self.ndim = len(shape) 94 self.ndim = ndim 106 if self.axes and (self.ndim is not None or self.max_ndim is not None): 107 max_dim = (self.ndim if self.ndim else self.max_ndim) - 1 116 ('ndim=' + str(self.ndim)) if self.ndim else '', 126 'ndim': self.ndim, 148 if spec.ndim is None and spec.shape is None: 153 shape = [None] * spec.ndim 216 if spec.ndim is not None and not spec.allow_last_axis_squeeze: [all …]
|
D | input_spec_test.py | 43 spec = input_spec.InputSpec(ndim=5) 47 spec = input_spec.InputSpec(ndim=0) 51 spec = input_spec.InputSpec(ndim=3, axes={1: 3, -1: 2})
|
/external/python/pybind11/include/pybind11/ |
D | buffer_info.h | 20 auto ndim = shape.size(); in PYBIND11_NAMESPACE_BEGIN() local 21 std::vector<ssize_t> strides(ndim, itemsize); in PYBIND11_NAMESPACE_BEGIN() 22 if (ndim > 0) in PYBIND11_NAMESPACE_BEGIN() 23 for (size_t i = ndim - 1; i > 0; --i) in PYBIND11_NAMESPACE_BEGIN() 30 auto ndim = shape.size(); in f_strides() local 31 std::vector<ssize_t> strides(ndim, itemsize); in f_strides() 32 for (size_t i = 1; i < ndim; ++i) in f_strides() 45 ssize_t ndim = 0; // Number of dimensions member 52 buffer_info(void *ptr, ssize_t itemsize, const std::string &format, ssize_t ndim, 54 : ptr(ptr), itemsize(itemsize), size(1), format(format), ndim(ndim), in ptr() [all …]
|
D | numpy.h | 405 ssize_t ndim() const { return dims_; } 414 return std::accumulate(shape_, shape_ + ndim(), (ssize_t) 1, std::multiplies<ssize_t>()); 578 auto ndim = shape->size(); 579 if (ndim != strides->size()) 595 api.PyArray_Type_, descr.release().ptr(), (int) ndim, 640 return std::accumulate(shape(), shape() + ndim(), (ssize_t) 1, std::multiplies<ssize_t>()); 654 ssize_t ndim() const { 670 if (dim >= ndim()) 682 if (dim >= ndim()) 719 if ((ssize_t) sizeof...(index) > ndim()) [all …]
|
/external/python/cpython3/Objects/ |
D | memoryobject.c | 262 (view->suboffsets && view->suboffsets[dest->ndim-1] >= 0) 267 assert(dest->ndim > 0 && src->ndim > 0); in last_dim_is_contiguous() 270 dest->strides[dest->ndim-1] == dest->itemsize && in last_dim_is_contiguous() 271 src->strides[src->ndim-1] == src->itemsize); in last_dim_is_contiguous() 305 if (dest->ndim != src->ndim) in equiv_shape() 308 for (i = 0; i < dest->ndim; i++) { in equiv_shape() 368 copy_rec(const Py_ssize_t *shape, Py_ssize_t ndim, Py_ssize_t itemsize, in copy_rec() argument 375 assert(ndim >= 1); in copy_rec() 377 if (ndim == 1) { in copy_rec() 389 copy_rec(shape+1, ndim-1, itemsize, in copy_rec() [all …]
|
/external/tensorflow/tensorflow/python/layers/ |
D | utils.py | 26 def convert_data_format(data_format, ndim): argument 28 if ndim == 3: 30 elif ndim == 4: 32 elif ndim == 5: 35 raise ValueError('Input rank not supported:', ndim) 37 if ndim == 3: 39 elif ndim == 4: 41 elif ndim == 5: 44 raise ValueError('Input rank not supported:', ndim)
|
/external/tensorflow/tensorflow/python/framework/ |
D | fast_tensor_util.pyx | 10 tensor_proto, np.ndarray[np.uint16_t, ndim=1] nparray): argument 22 tensor_proto, np.ndarray[np.uint16_t, ndim=1] nparray): argument 30 tensor_proto, np.ndarray[np.float32_t, ndim=1] nparray): argument 38 tensor_proto, np.ndarray[np.float64_t, ndim=1] nparray): argument 46 tensor_proto, np.ndarray[np.int32_t, ndim=1] nparray): argument 53 tensor_proto, np.ndarray[np.uint32_t, ndim=1] nparray): argument 60 tensor_proto, np.ndarray[np.int64_t, ndim=1] nparray): argument 67 tensor_proto, np.ndarray[np.uint64_t, ndim=1] nparray): argument 74 tensor_proto, np.ndarray[np.uint8_t, ndim=1] nparray): argument 82 tensor_proto, np.ndarray[np.uint16_t, ndim=1] nparray): argument [all …]
|
/external/python/cpython3/Modules/ |
D | _testbuffer.c | 154 base->ndim = 1; in ndbuf_new() 267 if (ndbuf->base.ndim == 0) in init_flags() 473 copy_rec(const Py_ssize_t *shape, Py_ssize_t ndim, Py_ssize_t itemsize, in copy_rec() argument 480 assert(ndim >= 1); in copy_rec() 482 if (ndim == 1) { in copy_rec() 506 copy_rec(shape+1, ndim-1, itemsize, in copy_rec() 520 dest->ndim != src->ndim) in cmp_structure() 523 for (i = 0; i < dest->ndim; i++) { in cmp_structure() 541 assert(dest->ndim > 0); in copy_buffer() 549 if ((dest->suboffsets && dest->suboffsets[dest->ndim-1] >= 0) || in copy_buffer() [all …]
|
/external/tensorflow/tensorflow/c/eager/ |
D | dlpack.cc | 231 int ndim) { in IsValidStrideCompactRowMajorData() argument 232 if (ndim >= 1 && stride_arr[ndim - 1] != 1) { in IsValidStrideCompactRowMajorData() 235 for (int i = ndim - 2; i >= 0; --i) { in IsValidStrideCompactRowMajorData() 278 int ndim = tensor->dims(); in TFE_HandleToDLPack() local 279 dlm_tensor->dl_tensor.ndim = ndim; in TFE_HandleToDLPack() 285 shape_arr->resize(ndim); in TFE_HandleToDLPack() 286 stride_arr->resize(ndim, 1); in TFE_HandleToDLPack() 287 for (int i = 0; i < ndim; i++) { in TFE_HandleToDLPack() 290 for (int i = ndim - 2; i >= 0; --i) { in TFE_HandleToDLPack() 324 int num_dims = dl_tensor->ndim; in TFE_HandleFromDLPack()
|
/external/python/cpython2/Objects/ |
D | memoryobject.c | 11 if (buf->ndim == 0) in get_shape0() 23 if (src->ndim == 1 && src->shape != NULL) { in dup_buffer() 27 if (src->ndim == 1 && src->strides != NULL) { in dup_buffer() 181 if (view->ndim > PY_SSIZE_T_MAX / sizeof(Py_ssize_t)) { in _indirect_copy_nd() 186 indices = (Py_ssize_t *)PyMem_Malloc(sizeof(Py_ssize_t)*view->ndim); in _indirect_copy_nd() 191 for (k=0; k<view->ndim;k++) { in _indirect_copy_nd() 196 for (k=0; k<view->ndim; k++) { in _indirect_copy_nd() 206 func(view->ndim, indices, view->shape); in _indirect_copy_nd() 297 _strided_copy_nd(dest, view->buf, view->ndim, view->shape, in PyMemoryView_GetContiguous() 366 return _IntTupleFromSsizet(self->view.ndim, self->view.shape); in memory_shape_get() [all …]
|
/external/tensorflow/tensorflow/python/keras/ |
D | activations_test.py | 83 x = backend.placeholder(ndim=2) 91 x = backend.placeholder(ndim=1) 104 x = backend.placeholder(ndim=2) 122 x = backend.placeholder(ndim=2) 133 x = backend.placeholder(ndim=2) 149 x = backend.placeholder(ndim=2) 162 x = backend.placeholder(ndim=2) 170 x = backend.placeholder(ndim=2) 191 x = backend.placeholder(ndim=2) 205 x = backend.placeholder(ndim=2) [all …]
|
/external/python/cpython3/Lib/test/ |
D | test_buffer.py | 263 def strides_from_shape(ndim, shape, itemsize, layout): argument 266 if ndim == 0: 270 for i in range(ndim-2, -1, -1): 274 for i in range(1, ndim): 320 def getindex(ndim, ind, strides): argument 323 for i in range(ndim): 332 ndim = len(shape) 333 sstrides = strides_from_shape(ndim, shape, 1, 'C') 334 dstrides = strides_from_shape(ndim, shape[::-1], 1, 'C') 337 fr = getindex(ndim, ind, sstrides) [all …]
|
/external/tensorflow/tensorflow/python/keras/utils/ |
D | conv_utils.py | 31 def convert_data_format(data_format, ndim): argument 33 if ndim == 3: 35 elif ndim == 4: 37 elif ndim == 5: 40 raise ValueError('Input rank not supported:', ndim) 42 if ndim == 3: 44 elif ndim == 4: 46 elif ndim == 5: 49 raise ValueError('Input rank not supported:', ndim)
|
/external/tensorflow/tensorflow/python/keras/layers/preprocessing/ |
D | normalization.py | 127 ndim = len(input_shape) 129 if any(a < 1 - ndim or a >= ndim for a in self.axis): 132 'ndim: `{}`, axis: {}'.format(ndim, self.axis)) 136 self._keep_axis = sorted([d if d >= 0 else d + ndim for d in self.axis]) 138 self._reduce_axis = [d for d in range(ndim) if d not in self._keep_axis] 141 0 if d in self._keep_axis else 1 for d in range(ndim) 145 input_shape[d] if d in self._keep_axis else 1 for d in range(ndim)
|
D | normalization_v1.py | 118 ndim = len(input_shape) 124 sorted(self.axis, key=lambda a: a if a >= 0 else ndim + a)) 126 if any(a < 1 - ndim for a in self.axis) or any( 127 a >= ndim for a in self.axis): 132 ' axis: {}'.format(ndim, original_axis)) 219 if values.ndim == 1: 223 axis_mask = np.ones([values.ndim], dtype=bool) 236 reduction_axes = tuple(np.arange(values.ndim)[axis_mask])
|
/external/tensorflow/tensorflow/python/ops/ragged/ |
D | ragged_factory_ops.py | 174 if not isinstance(pylist, (list, tuple)) and np.ndim(pylist) == 0: 263 if isinstance(pylist, (list, tuple)) or np.ndim(pylist) != 0: 282 if not isinstance(item, (list, tuple)) and np.ndim(item) == 0: 290 is_nested = isinstance(item, (list, tuple)) or np.ndim(item) != 0 303 isinstance(v, (list, tuple)) or np.ndim(v) != 0 for v in flat_values):
|
/external/tensorflow/tensorflow/compiler/xla/python/ |
D | dlpack.cc | 290 dt.ndim = buffer->buffer()->on_device_shape().dimensions_size(); in BufferToDLPackManagedTensor() 327 if (dlmt->dl_tensor.ndim < 0) { in DLPackManagedTensorToBuffer() 330 dlmt->dl_tensor.ndim); in DLPackManagedTensorToBuffer() 336 reinterpret_cast<int64*>(dlmt->dl_tensor.shape), dlmt->dl_tensor.ndim); in DLPackManagedTensorToBuffer() 345 dlmt->dl_tensor.ndim); in DLPackManagedTensorToBuffer() 348 minor_to_major.resize(dlmt->dl_tensor.ndim); in DLPackManagedTensorToBuffer()
|
/external/tensorflow/tensorflow/python/keras/layers/ |
D | merge.py | 125 input_ndims = list(map(K.ndim, inputs)) 132 x_ndim = K.ndim(x) 142 x_ndim = K.ndim(x) 165 y_ndim = K.ndim(y) 562 elif K.ndim(mask_i) < K.ndim(input_i): 690 axes = [self.axes % K.ndim(x1), self.axes % K.ndim(x2)] 697 axes.append(self.axes[i] % K.ndim(inputs[i]))
|
D | local.py | 156 self.input_spec = InputSpec(ndim=3) 244 self.input_spec = InputSpec(ndim=3, axes={1: input_dim}) 246 self.input_spec = InputSpec(ndim=3, axes={-1: input_dim}) 458 self.input_spec = InputSpec(ndim=4) 549 self.input_spec = InputSpec(ndim=4, axes={1: input_filter}) 551 self.input_spec = InputSpec(ndim=4, axes={-1: input_filter}) 681 ndims = int(mask.ndim / 2) 735 kernel = make_2d(kernel, split_dim=K.ndim(kernel) // 2)
|
/external/python/pybind11/tests/ |
D | test_buffers.py | 124 assert info.ndim == 1 139 assert info.ndim == 2 161 assert cinfo.ndim == pyinfo.ndim
|
D | test_numpy_array.cpp | 118 if (r.ndim() != 2) throw std::domain_error("error: ndim != 2"); in auxiliaries() 123 l.append(r.ndim()); in auxiliaries() 160 sm.def("ndim", [](const arr& a) { return a.ndim(); }); in TEST_SUBMODULE() 161 sm.def("shape", [](const arr& a) { return arr(a.ndim(), a.shape()); }); in TEST_SUBMODULE() 163 sm.def("strides", [](const arr& a) { return arr(a.ndim(), a.strides()); }); in TEST_SUBMODULE() 198 {a.shape(), a.shape() + a.ndim()}, in TEST_SUBMODULE() 199 {a.strides(), a.strides() + a.ndim()}, in TEST_SUBMODULE() 339 if (r.ndim() != 2) throw std::domain_error("error: ndim != 2"); in TEST_SUBMODULE() 347 if (r.ndim() != 3) throw std::domain_error("error: ndim != 3"); in TEST_SUBMODULE()
|
/external/tensorflow/tensorflow/python/kernel_tests/ |
D | fingerprint_op_test.py | 36 self.assertEqual(fingerprint0.ndim, 2) 42 self.assertEqual(f0.ndim, 2)
|
/external/OpenCL-CTS/test_conformance/gl/ |
D | test_images_read_common.cpp | 297 int ndim = 1; in test_cl_image_read() local 302 ndim = 1; in test_cl_image_read() 310 ndim = 2; in test_cl_image_read() 317 ndim = 3; in test_cl_image_read() 329 local_range = (size_t*)malloc(sizeof(size_t) * ndim); in test_cl_image_read() 333 local_range = (size_t*)malloc(sizeof(size_t) * ndim); in test_cl_image_read() 337 error = clEnqueueNDRangeKernel( queue, kernel, ndim, NULL, global_range, in test_cl_image_read()
|
/external/tensorflow/tensorflow/compiler/xla/service/gpu/ |
D | cusolver_rewriter.cc | 56 int ndim = a_shape.dimensions_size(); in CreateCholesky() local 57 CHECK_GE(ndim, 2); in CreateCholesky() 58 int64 n = a_shape.dimensions(ndim - 1); in CreateCholesky()
|
/external/python/cpython3/Doc/c-api/ |
D | buffer.rst | 156 .. c:member:: int ndim 169 An array of :c:type:`Py_ssize_t` of length :c:member:`~Py_buffer.ndim` 171 ``shape[0] * ... * shape[ndim-1] * itemsize`` MUST be equal to 182 An array of :c:type:`Py_ssize_t` of length :c:member:`~Py_buffer.ndim` 194 An array of :c:type:`Py_ssize_t` of length :c:member:`~Py_buffer.ndim`. 235 :c:member:`~Py_buffer.len`, :c:member:`~Py_buffer.itemsize`, :c:member:`~Py_buffer.ndim`. 348 :c:member:`~Py_buffer.ndim`, :c:member:`~Py_buffer.shape` and :c:member:`~Py_buffer.strides`. 350 If ``ndim == 0``, the memory location pointed to by :c:member:`~Py_buffer.buf` is 370 def verify_structure(memlen, itemsize, ndim, shape, strides, offset): 384 if ndim <= 0: [all …]
|