| /external/executorch/backends/cadence/runtime/ | 
| D | utils.py | 17 def distance(fn: Callable[[np.ndarray, np.ndarray], float]) -> Callable[31         a: Union[np.ndarray, torch.Tensor],
 33         b: Union[np.ndarray, torch.Tensor],
 72 def rms(a: np.ndarray, b: np.ndarray) -> float:
 78 def max_abs_diff(a: np.ndarray, b: np.ndarray) -> float:
 84 def max_rel_diff(x: np.ndarray, x_ref: np.ndarray) -> float:
 89 def to_np_arr_fp64(x: Union[np.ndarray, torch.Tensor]) -> np.ndarray:
 100     predicted: Union[np.ndarray, torch.Tensor],
 102     ground_truth: Union[np.ndarray, torch.Tensor],
 
 | 
| D | runtime.py | 227 def to_nd_array(v: Union[bool, numbers.Number, ndarray, torch.Tensor]) -> np.ndarray:  argument
 | 
| /external/libopus/dnn/torch/weight-exchange/wexchange/c_export/ | 
| D | common.py | 196                        weight : np.ndarray,197                        bias : np.ndarray,
 198                        scale : np.ndarray = None,
 263                       weight : np.ndarray,
 264                       bias : np.ndarray,
 281                        weight : np.ndarray,
 282                        bias : np.ndarray,
 306                        weight : np.ndarray,
 307                        bias : np.ndarray,
 330                     weight : np.ndarray,
 [all …]
 
 | 
| /external/armnn/python/pyarmnn/examples/common/ | 
| D | cv_utils.py | 15 def preprocess(frame: np.ndarray, input_data_type, input_data_shape: tuple, is_normalised: bool,55 def resize_with_aspect_ratio(frame: np.ndarray, input_data_shape: tuple):
 153 def draw_bounding_boxes(frame: np.ndarray, detections: list, resize_factor, labels: dict):
 199 def crop_bounding_box_object(input_frame: np.ndarray, x_min: float, y_min: float, x_max: float, y_m…
 
 | 
| /external/tensorflow/tensorflow/lite/tools/optimize/debugging/python/ | 
| D | debugger.py | 62                                                      Callable[[np.ndarray],65                    str, Callable[[Sequence[np.ndarray], Sequence[np.ndarray]],
 135                    [], Iterable[Sequence[np.ndarray]]]] = None,
 430                          tensor_data: Sequence[np.ndarray],
 
 | 
| /external/tensorflow/tensorflow/python/tpu/ | 
| D | device_assignment.py | 65   def __init__(self, topology: Topology, core_assignment: np.ndarray):175             computation_shape: Optional[np.ndarray] = None,
 176             computation_stride: Optional[np.ndarray] = None,
 334     computation_shape: Optional[np.ndarray] = None,
 335     computation_stride: Optional[np.ndarray] = None,
 
 | 
| /external/armnn/python/pyarmnn/examples/object_detection/ | 
| D | style_transfer.py | 11 def style_transfer_postprocess(preprocessed_frame: np.ndarray, image_shape: tuple):33 def create_stylized_detection(style_transfer_executor, style_transfer_class, frame: np.ndarray,
 73                  style_image: np.ndarray, backends: list, delegate_path: str):
 
 | 
| D | ssd.py | 12 def ssd_processing(output: np.ndarray, confidence_threshold=0.60):
 | 
| D | yolo.py | 47 def yolo_processing(output: np.ndarray, confidence_threshold=0.40, iou_threshold=0.40):
 | 
| /external/armnn/python/pyarmnn/src/pyarmnn/_tensor/ | 
| D | const_tensor.py | 118     def __check_size(data: np.ndarray, num_bytes: int, num_elements: int):141 …ef __create_memory_area(self, data_type: int, num_bytes: int, num_elements: int, data: np.ndarray):
 
 | 
| D | workload_tensors.py | 17                        input_data: List[np.ndarray]) -> List[Tuple[int, ConstTensor]]:
 | 
| /external/tensorflow/tensorflow/python/ops/numpy_ops/ | 
| D | np_arrays.py | 48 ndarray = ops.Tensor  variable
 | 
| /external/armnn/python/pyarmnn/examples/keyword_spotting/ | 
| D | audio_utils.py | 9 def decode(model_output: np.ndarray, labels: dict) -> list:
 | 
| /external/tensorflow/tensorflow/compiler/xla/python_api/ | 
| D | xla_literal.py | 60 def _ConvertNumpyArrayToLiteral(ndarray):  argument
 | 
| D | xla_shape.py | 96 def _CreateShapeFromNumpy(ndarray):  # pylint: disable=invalid-name  argument
 | 
| /external/armnn/python/pyarmnn/examples/speech_recognition/ | 
| D | audio_utils.py | 9 def decode(model_output: np.ndarray, labels: dict) -> str:
 | 
| /external/tensorflow/tensorflow/python/compiler/tensorrt/model_tests/ | 
| D | model_handler.py | 176               output_tensors: Sequence[np.ndarray],299           inputs: Optional[Mapping[str, np.ndarray]] = None,
 424               calibration_inputs: Optional[Mapping[str, np.ndarray]] = None,
 471               calibration_inputs: Optional[Mapping[str, np.ndarray]] = None,
 492           inputs: Optional[Mapping[str, np.ndarray]] = None,
 
 | 
| /external/pytorch/torch/testing/_internal/opinfo/ | 
| D | utils.py | 209     def wrapper(x: np.ndarray, *args, **kwargs):
 | 
| /external/tensorflow/tensorflow/dtensor/python/ | 
| D | layout.py | 79                global_device_ids: np.ndarray,697   def unravel(self, unpacked_tensors: List[np.ndarray]) -> np.ndarray:
 
 | 
| /external/federated-compute/fcp/artifact_building/ | 
| D | checkpoint_utils.py | 459         np.ndarray,
 | 
| /external/python/cpython3/Lib/test/test_capi/ | 
| D | test_number.py | 15     ndarray = None  variable
 | 
| /external/tensorflow/tensorflow/java/src/main/native/ | 
| D | tensor_jni.cc | 190     jobjectArray ndarray = static_cast<jobjectArray>(src);  in writeNDArray()  local209     jobjectArray ndarray = static_cast<jobjectArray>(dst);  in readNDArray()  local
 
 | 
| /external/tensorflow/tensorflow/lite/java/src/main/native/ | 
| D | tensor_jni.cc | 293     jobjectArray ndarray = static_cast<jobjectArray>(dst);  in ReadMultiDimensionalArray()  local348     jobjectArray ndarray = static_cast<jobjectArray>(src);  in WriteMultiDimensionalArray()  local
 
 | 
| /external/tensorflow/tensorflow/python/lib/core/ | 
| D | ndarray_tensor.cc | 479 Status NdarrayToTensor(TFE_Context* ctx, PyObject* ndarray,  in NdarrayToTensor()
 | 
| /external/toolchain-utils/cros_utils/ | 
| D | tabulator.py | 78     sample: Union[np.ndarray, list], baseline: Union[np.ndarray, list]
 |