/third_party/mindspore/tests/syntax/simple_expression/ |
D | test_math_ops.py | 37 input_y = -3.2 38 result1 = input_x + input_y 40 result2 = add_net(input_x, input_y) 48 input_y = Tensor(np.zeros(shape=[3])).astype(np.int8) 49 result1 = input_x + input_y 51 result2 = add_net(input_x, input_y) 59 input_y = Tensor(np.zeros(shape=[3])).astype(np.int16) 60 result1 = input_x + input_y 62 result2 = add_net(input_x, input_y) 70 input_y = Tensor(np.zeros(shape=[3])).astype(np.int32) [all …]
|
/third_party/mindspore/tests/st/ops/gpu/ |
D | test_squared_difference_op.py | 41 input_y = np.random.uniform(40, 50, (3, 4, 5, 2)).astype(np.float16) 42 output = net(Tensor(input_x), Tensor(input_y)).asnumpy() 43 diff = input_x-input_y 56 input_y = np.random.rand(3, 4, 5, 2).astype(np.float32) 57 output = net(Tensor(input_x), Tensor(input_y)).asnumpy() 58 diff = input_x-input_y 71 input_y = np.random.rand(3, 4, 5, 2).astype(np.int32) 72 output = net(Tensor(input_x), Tensor(input_y)).asnumpy() 73 diff = input_x-input_y 86 input_y = np.random.rand(3, 1, 5, 1).astype(np.int32) [all …]
|
D | test_batch_matmul.py | 40 input_y = Tensor(np.arange(2 * 4 * 3 * 4).reshape(2, 4, 3, 4), mstype.float32) 44 output = net(input_x, input_y) 62 input_y = Tensor(np.arange(2 * 4 * 3 * 4).reshape(2, 4, 3, 4), mstype.float64) 66 output = net(input_x, input_y) 84 input_y = Tensor(np.arange(2 * 4 * 3 * 4).reshape(2, 4, 3, 4), mstype.float32) 88 output = net(input_x, input_y) 106 input_y = Tensor(np.arange(2 * 4 * 4 * 3).reshape(2, 4, 4, 3), mstype.float32) 110 output = net(input_x, input_y) 128 input_y = Tensor(np.arange(2 * 4 * 4 * 3).reshape(2, 4, 4, 3), mstype.float32) 132 output = net(input_x, input_y) [all …]
|
/third_party/mindspore/mindspore/ccsrc/backend/kernel_compiler/gpu/cuda_impl/ |
D | select_impl.cu | 23 __global__ void Select(const size_t size, const bool* cond, const T* input_x, const T* input_y, T* … in Select() argument 25 output[pos] = cond[pos] ? input_x[pos] : input_y[pos]; in Select() 31 void CalSelect(const size_t size, const bool* cond, const T* input_x, const T* input_y, T* output, in CalSelect() argument 33 Select<<<GET_BLOCKS(size), GET_THREADS, 0, cuda_stream>>>(size, cond, input_x, input_y, output); in CalSelect() 37 …alSelect<double>(const size_t size, const bool* cond, const double* input_X, const double* input_y, 39 …d CalSelect<float>(const size_t size, const bool* cond, const float* input_X, const float* input_y, 41 …lect<int>(const size_t size, const bool* cond, const int* input_X, const int* input_y, int* output, 43 …void CalSelect<half>(const size_t size, const bool* cond, const half* input_X, const half* input_y, 45 …elect<int64_t>(const size_t size, const bool* cond, const int64_t* input_X, const int64_t* input_y, 47 …void CalSelect<bool>(const size_t size, const bool *cond, const bool *input_X, const bool *input_y,
|
D | loss_with_reduction_impl.cu | 137 …vLossKernel(const int input_size, const int reduction, const T *input_x, const T *input_y, T *loss, in KLDivLossKernel() argument 142 T denominator = maxT(input_y[i], epsilon); in KLDivLossKernel() 143 T value = input_y[i] * (logT(denominator) - input_x[i]); in KLDivLossKernel() 148 T denominator = maxT(input_y[i], epsilon); in KLDivLossKernel() 149 T value = input_y[i] * (logT(denominator) - input_x[i]); in KLDivLossKernel() 156 void KLDivLoss(const int &input_size, const int &reduction, const T *input_x, const T *input_y, T *… in KLDivLoss() argument 159 …<<<GET_BLOCKS(input_size), GET_THREADS, 0, stream>>>(input_size, reduction, input_x, input_y, loss, in KLDivLoss() 176 … KLDivLossGradKernel(const int input_size, const int reduction, const T *input_x, const T *input_y, in KLDivLossGradKernel() argument 182 T denominator = maxT(input_y[i], epsilon); in KLDivLossGradKernel() 183 dx[i] = -input_y[i] * dloss[i]; in KLDivLossGradKernel() [all …]
|
D | loss_with_reduction_impl.cuh | 21 …ryCrossEntropyLoss(const int &input_size, const int &reduction, const T *input_x, const T *input_y, 24 …ossEntropyLossGrad(const int &input_size, const int &reduction, const T *input_x, const T *input_y, 27 void KLDivLoss(const int &input_size, const int &reduction, const T *input_x, const T *input_y, T *… 30 void KLDivLossGrad(const int &input_size, const int &reduction, const T *input_x, const T *input_y,…
|
/third_party/mindspore/mindspore/ccsrc/backend/optimizer/graph_kernel/expanders/ |
D | bias_add.cc | 44 auto input_y = inputs[1]; in Expand() local 46 …input_y = gb.Emit("Reshape", {input_y}, {{"shape", MakeValue(ExpandDimsInferShape(input_y->shape, … in Expand() 52 …input_y = gb.Emit("Reshape", {input_y}, {{"shape", MakeValue(ExpandDimsInferShape(input_y->shape, … in Expand() 55 return {gb.Emit("Add", {input_x, input_y})}; in Expand()
|
/third_party/mindspore/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32_grad/ |
D | binary_cross_entropy.c | 21 … const float *input_y, const float *weight, float *loss, float *tmp_loss) { in BinaryCrossEntropyLossKernel() argument 26 …-weight[i] * (input_y[i] * logf(input_x[i] + epsilon) + (1 - input_y[i]) * logf(1 - input_x[i] + e… in BinaryCrossEntropyLossKernel() 32 …-weight[i] * (input_y[i] * logf(input_x[i] + epsilon) + (1 - input_y[i]) * logf(1 - input_x[i] + e… in BinaryCrossEntropyLossKernel() 38 …CrossEntropy(const int input_size, const int reduction, const float *input_x, const float *input_y, in BinaryCrossEntropy() argument 41 BinaryCrossEntropyLossKernel(input_size, reduction, input_x, input_y, weight, loss, tmp_loss); in BinaryCrossEntropy()
|
/third_party/mindspore/tests/ut/python/pipeline/parse/ |
D | test_call_innetr_net_attr.py | 61 def construct(self, input_x, input_y): argument 64 return self.inner_net.t + self.inner_net(input_x) - input_y 72 def construct(self, input_x, input_y): argument 83 def construct(self, input_x, input_y): argument 84 return self.grad_all(self.forward_net)(input_x, input_y) 89 input_y = Tensor(np.ones((2, 3)) * 3) 93 test_var_net(input_x, input_y) 96 grad_net(input_x, input_y) 99 ret = test_const_net(input_x, input_y) 104 ret = test_outer_net(input_x, input_y)
|
/third_party/mindspore/tests/st/ops/cpu/ |
D | test_batch_matmul.py | 46 input_y = Tensor(np.arange(2 * 4 * 3 * 4).reshape((2, 4, 3, 4)), mstype.float32) 50 output = net(input_x, input_y) 67 input_y = Tensor(np.arange(2 * 3 * 3 * 4).reshape((2, 3, 3, 4)), mstype.float32) 71 output = net(input_x, input_y) 92 input_y = Tensor(np.arange(2 * 3 * 3 * 4).reshape((2, 3, 3, 4)), mstype.float32) 96 output = net(input_x, input_y) 117 input_y = Tensor(np.arange(2 * 3 * 4 * 3).reshape((2, 3, 4, 3)), mstype.float32) 121 output = net(input_x, input_y) 142 input_y = Tensor(np.arange(2 * 3 * 4 * 3).reshape((2, 3, 4, 3)), mstype.float16) 146 output = net(input_x, input_y)
|
D | test_matmul.py | 44 input_y = Tensor(np.arange(3 * 5).reshape((3, 5)), mstype.float32) 48 output = net(input_x, input_y) 58 input_y = Tensor(np.arange(3 * 5).reshape((3, 5)), mstype.float32) 62 output = net(input_x, input_y) 75 input_y = Tensor(np.arange(3 * 4).reshape((3, 4)), mstype.float32) 79 output = net(input_x, input_y) 90 input_y = Tensor(np.arange(5 * 3).reshape((5, 3)), mstype.float32) 94 output = net(input_x, input_y) 105 input_y = Tensor(np.arange(4 * 3).reshape((4, 3)), mstype.float16) 109 output = net(input_x, input_y)
|
D | test_notequal_op.py | 44 input_y = Tensor(np.array([11, 2, 13]).astype(np.int32)) 45 outputs = op_wrapper(input_x, input_y) 59 input_y = Tensor(np.array([-1, 0, 3]).astype(np.float32)) 60 outputs = op_wrapper(input_x, input_y)
|
/third_party/mindspore/tests/st/ops/graph_kernel/ |
D | test_simplify.py | 68 input_y = np.random.normal(0, 1, [2, 3, 4, 3]).astype(np.float32) 69 input_y = np.abs(input_y) + 3 71 sub_res = input_y + (-3) 74 pow_res = input_y * input_y 82 result = net(Tensor(input_x), Tensor(input_y)) 91 input_y = np.random.normal(0, 1, [2, 3, 4, 3]).astype(np.float32) 92 expect = input_y 95 result = net(Tensor(input_x), Tensor(input_y))
|
D | test_maximum_grad.py | 39 input_y = Tensor(input_y_np) 41 return input_x, input_y, input_dout 44 def get_maximum_grad_output(input_x, input_y, input_dout, enable_graph_kernel=False): argument 47 result = net(input_x, input_y, input_dout) 52 input_x, input_y, input_dout = gen_data() 53 result_off = get_maximum_grad_output(input_x, input_y, input_dout, False) 54 result_on = get_maximum_grad_output(input_x, input_y, input_dout, True)
|
D | test_minimum_grad.py | 39 input_y = Tensor(input_y_np) 41 return input_x, input_y, input_dout 44 def get_minimum_grad_output(input_x, input_y, input_dout, enable_graph_kernel=False): argument 47 result = net(input_x, input_y, input_dout) 52 input_x, input_y, input_dout = gen_data() 53 result_off = get_minimum_grad_output(input_x, input_y, input_dout, False) 54 result_on = get_minimum_grad_output(input_x, input_y, input_dout, True)
|
D | test_optimize_assign.py | 58 input_y = np.random.normal(0, 1, [2, 2, 2]).astype(np.float32) 63 result_open_gk = net(Tensor(input_x), Tensor(input_y)) 68 result_close_gk = net_beta(Tensor(input_x), Tensor(input_y)) 76 input_y = np.random.normal(0, 1, [2, 2, 2]).astype(np.float32) 81 result_open_gk = net(Tensor(input_x), Tensor(input_y)) 86 result_close_gk = net_beta(Tensor(input_x), Tensor(input_y))
|
D | test_atomic_add.py | 78 input_y = np.random.normal(0, 1, [2, 2, 2, 256]).astype(np.float32) 80 expect = np.sum(input_x * input_y) + input_x 83 result = net(Tensor(input_x), Tensor(input_y)) 92 input_y = np.random.normal(0, 1, [2, 2, 2, 256]).astype(np.float32) 94 expect = np.sum(np.square(input_x + input_y)) + (input_x + input_y) 97 result = net(Tensor(input_x), Tensor(input_y))
|
D | test_user_define.py | 110 input_y = np.random.normal(0, 1, [4, 4]).astype(np.float32) 113 output = test(Tensor(input_x), Tensor(input_y)) 114 expect = np.matmul(input_x, input_y) 122 input_y = np.random.normal(0, 1, shape).astype(np.float16) 125 output = test(Tensor(input_x), Tensor(input_y)) 126 assert np.allclose(input_x + input_y, output.asnumpy(), 0.001, 0.001)
|
/third_party/mindspore/mindspore/ccsrc/backend/kernel_compiler/cpu/ |
D | binary_cross_entropy_cpu_kernel.cc | 52 const auto *input_y = reinterpret_cast<T *>(inputs[1]->addr); in LaunchKernel() local 63 …-weight[i] * (input_y[i] * log(input_x[i] + epsilon) + (one - input_y[i]) * log(one - input_x[i] +… in LaunchKernel() 69 … -(input_y[i] * log(input_x[i] + epsilon) + (one - input_y[i]) * log(one - input_x[i] + epsilon))); in LaunchKernel() 77 …-weight[i] * (input_y[i] * log(input_x[i] + epsilon) + (one - input_y[i]) * log(one - input_x[i] +… in LaunchKernel() 83 … -(input_y[i] * log(input_x[i] + epsilon) + (one - input_y[i]) * log(one - input_x[i] + epsilon))); in LaunchKernel()
|
D | minimum_cpu_kernel.cc | 84 void MinimumCPUKernel<T>::BroadcastArith(const T *input_x, const T *input_y, T *output) const { in BroadcastArith() argument 86 MS_EXCEPTION_IF_NULL(input_y); in BroadcastArith() 96 input_y, output); in BroadcastArith() 99 BroadcastArithOneScalarOneTensor(input_x, input_y, output); in BroadcastArith() 101 BroadcastArithTensors(input_x, input_y, output); in BroadcastArith() 157 … const size_t d6, const T *input_x, const T *input_y, T *output) const { in BroadcastArithKernel() argument 181 output[pos] = MinimumFunc(input_x[l_index], input_y[r_index]); in BroadcastArithKernel() 186 void MinimumCPUKernel<T>::BroadcastArithOneScalarOneTensor(const T *input_x, const T *input_y, T *o… in BroadcastArithOneScalarOneTensor() argument 189 output[i] = MinimumFunc(input_x[0], input_y[i]); in BroadcastArithOneScalarOneTensor() 193 output[i] = MinimumFunc(input_x[i], input_y[0]); in BroadcastArithOneScalarOneTensor() [all …]
|
D | maximum_cpu_kernel.cc | 84 void MaximumCPUKernel<T>::BroadcastArith(const T *input_x, const T *input_y, T *output) const { in BroadcastArith() argument 86 MS_EXCEPTION_IF_NULL(input_y); in BroadcastArith() 96 input_y, output); in BroadcastArith() 99 BroadcastArithOneScalarOneTensor(input_x, input_y, output); in BroadcastArith() 101 BroadcastArithTensors(input_x, input_y, output); in BroadcastArith() 157 … const size_t d6, const T *input_x, const T *input_y, T *output) const { in BroadcastArithKernel() argument 181 output[pos] = MaximumFunc(input_x[l_index], input_y[r_index]); in BroadcastArithKernel() 186 void MaximumCPUKernel<T>::BroadcastArithOneScalarOneTensor(const T *input_x, const T *input_y, T *o… in BroadcastArithOneScalarOneTensor() argument 189 output[i] = MaximumFunc(input_x[0], input_y[i]); in BroadcastArithOneScalarOneTensor() 193 output[i] = MaximumFunc(input_x[i], input_y[0]); in BroadcastArithOneScalarOneTensor() [all …]
|
/third_party/mindspore/mindspore/_extends/graph_kernel/expanders/ |
D | tanh_grad.py | 24 input_y, input_dy = self.inputs 26 const_one = graph_builder.value(input_y.dtype, 1) 27 double_y = graph_builder.emit('Mul', [input_y, input_y])
|
D | sigmoid_grad.py | 24 input_y, dy = self.inputs 27 const_one = graph_builder.value(input_y.dtype, 1.0) 28 one_mins_y = graph_builder.emit('Sub', [const_one, input_y]) 29 y_mul_dy = graph_builder.emit('Mul', [input_y, dy])
|
D | maximum_grad.py | 31 input_x, input_y, input_dout = self.inputs 32 ge_result = graph_builder.emit('GreaterEqual', [input_x, input_y]) 38 reduce_axis_y = MinimumGrad.get_reduce_axis(input_y.shape, dy.shape) 50 if dy_reduce.shape != input_y.shape: 51 dy_out = graph_builder.emit('Reshape', [dy_reduce], attrs={'shape': input_y.shape})
|
D | minimum_grad.py | 30 input_x, input_y, input_dout = self.inputs 32 le_result = graph_builder.emit('LessEqual', [input_x, input_y]) 41 reduce_axis_y = self.get_reduce_axis(input_y.shape, dy.shape) 53 if dy_reduce.shape != input_y.shape: 54 dy_out = graph_builder.emit('Reshape', [dy_reduce], attrs={'shape': input_y.shape})
|