Home
last modified time | relevance | path

Searched refs:input_y (Results 1 – 25 of 83) sorted by relevance

1234

/third_party/mindspore/tests/syntax/simple_expression/
Dtest_math_ops.py37 input_y = -3.2
38 result1 = input_x + input_y
40 result2 = add_net(input_x, input_y)
48 input_y = Tensor(np.zeros(shape=[3])).astype(np.int8)
49 result1 = input_x + input_y
51 result2 = add_net(input_x, input_y)
59 input_y = Tensor(np.zeros(shape=[3])).astype(np.int16)
60 result1 = input_x + input_y
62 result2 = add_net(input_x, input_y)
70 input_y = Tensor(np.zeros(shape=[3])).astype(np.int32)
[all …]
/third_party/mindspore/tests/st/ops/gpu/
Dtest_squared_difference_op.py41 input_y = np.random.uniform(40, 50, (3, 4, 5, 2)).astype(np.float16)
42 output = net(Tensor(input_x), Tensor(input_y)).asnumpy()
43 diff = input_x-input_y
56 input_y = np.random.rand(3, 4, 5, 2).astype(np.float32)
57 output = net(Tensor(input_x), Tensor(input_y)).asnumpy()
58 diff = input_x-input_y
71 input_y = np.random.rand(3, 4, 5, 2).astype(np.int32)
72 output = net(Tensor(input_x), Tensor(input_y)).asnumpy()
73 diff = input_x-input_y
86 input_y = np.random.rand(3, 1, 5, 1).astype(np.int32)
[all …]
Dtest_batch_matmul.py40 input_y = Tensor(np.arange(2 * 4 * 3 * 4).reshape(2, 4, 3, 4), mstype.float32)
44 output = net(input_x, input_y)
62 input_y = Tensor(np.arange(2 * 4 * 3 * 4).reshape(2, 4, 3, 4), mstype.float64)
66 output = net(input_x, input_y)
84 input_y = Tensor(np.arange(2 * 4 * 3 * 4).reshape(2, 4, 3, 4), mstype.float32)
88 output = net(input_x, input_y)
106 input_y = Tensor(np.arange(2 * 4 * 4 * 3).reshape(2, 4, 4, 3), mstype.float32)
110 output = net(input_x, input_y)
128 input_y = Tensor(np.arange(2 * 4 * 4 * 3).reshape(2, 4, 4, 3), mstype.float32)
132 output = net(input_x, input_y)
[all …]
/third_party/mindspore/mindspore/ccsrc/backend/kernel_compiler/gpu/cuda_impl/
Dselect_impl.cu23 __global__ void Select(const size_t size, const bool* cond, const T* input_x, const T* input_y, T* … in Select() argument
25 output[pos] = cond[pos] ? input_x[pos] : input_y[pos]; in Select()
31 void CalSelect(const size_t size, const bool* cond, const T* input_x, const T* input_y, T* output, in CalSelect() argument
33 Select<<<GET_BLOCKS(size), GET_THREADS, 0, cuda_stream>>>(size, cond, input_x, input_y, output); in CalSelect()
37 …alSelect<double>(const size_t size, const bool* cond, const double* input_X, const double* input_y,
39 …d CalSelect<float>(const size_t size, const bool* cond, const float* input_X, const float* input_y,
41 …lect<int>(const size_t size, const bool* cond, const int* input_X, const int* input_y, int* output,
43 …void CalSelect<half>(const size_t size, const bool* cond, const half* input_X, const half* input_y,
45 …elect<int64_t>(const size_t size, const bool* cond, const int64_t* input_X, const int64_t* input_y,
47 …void CalSelect<bool>(const size_t size, const bool *cond, const bool *input_X, const bool *input_y,
Dloss_with_reduction_impl.cu137 …vLossKernel(const int input_size, const int reduction, const T *input_x, const T *input_y, T *loss, in KLDivLossKernel() argument
142 T denominator = maxT(input_y[i], epsilon); in KLDivLossKernel()
143 T value = input_y[i] * (logT(denominator) - input_x[i]); in KLDivLossKernel()
148 T denominator = maxT(input_y[i], epsilon); in KLDivLossKernel()
149 T value = input_y[i] * (logT(denominator) - input_x[i]); in KLDivLossKernel()
156 void KLDivLoss(const int &input_size, const int &reduction, const T *input_x, const T *input_y, T *… in KLDivLoss() argument
159 …<<<GET_BLOCKS(input_size), GET_THREADS, 0, stream>>>(input_size, reduction, input_x, input_y, loss, in KLDivLoss()
176 … KLDivLossGradKernel(const int input_size, const int reduction, const T *input_x, const T *input_y, in KLDivLossGradKernel() argument
182 T denominator = maxT(input_y[i], epsilon); in KLDivLossGradKernel()
183 dx[i] = -input_y[i] * dloss[i]; in KLDivLossGradKernel()
[all …]
Dloss_with_reduction_impl.cuh21 …ryCrossEntropyLoss(const int &input_size, const int &reduction, const T *input_x, const T *input_y,
24 …ossEntropyLossGrad(const int &input_size, const int &reduction, const T *input_x, const T *input_y,
27 void KLDivLoss(const int &input_size, const int &reduction, const T *input_x, const T *input_y, T *…
30 void KLDivLossGrad(const int &input_size, const int &reduction, const T *input_x, const T *input_y,…
/third_party/mindspore/mindspore/ccsrc/backend/optimizer/graph_kernel/expanders/
Dbias_add.cc44 auto input_y = inputs[1]; in Expand() local
46input_y = gb.Emit("Reshape", {input_y}, {{"shape", MakeValue(ExpandDimsInferShape(input_y->shape, … in Expand()
52input_y = gb.Emit("Reshape", {input_y}, {{"shape", MakeValue(ExpandDimsInferShape(input_y->shape, … in Expand()
55 return {gb.Emit("Add", {input_x, input_y})}; in Expand()
/third_party/mindspore/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32_grad/
Dbinary_cross_entropy.c21 … const float *input_y, const float *weight, float *loss, float *tmp_loss) { in BinaryCrossEntropyLossKernel() argument
26 …-weight[i] * (input_y[i] * logf(input_x[i] + epsilon) + (1 - input_y[i]) * logf(1 - input_x[i] + e… in BinaryCrossEntropyLossKernel()
32 …-weight[i] * (input_y[i] * logf(input_x[i] + epsilon) + (1 - input_y[i]) * logf(1 - input_x[i] + e… in BinaryCrossEntropyLossKernel()
38 …CrossEntropy(const int input_size, const int reduction, const float *input_x, const float *input_y, in BinaryCrossEntropy() argument
41 BinaryCrossEntropyLossKernel(input_size, reduction, input_x, input_y, weight, loss, tmp_loss); in BinaryCrossEntropy()
/third_party/mindspore/tests/ut/python/pipeline/parse/
Dtest_call_innetr_net_attr.py61 def construct(self, input_x, input_y): argument
64 return self.inner_net.t + self.inner_net(input_x) - input_y
72 def construct(self, input_x, input_y): argument
83 def construct(self, input_x, input_y): argument
84 return self.grad_all(self.forward_net)(input_x, input_y)
89 input_y = Tensor(np.ones((2, 3)) * 3)
93 test_var_net(input_x, input_y)
96 grad_net(input_x, input_y)
99 ret = test_const_net(input_x, input_y)
104 ret = test_outer_net(input_x, input_y)
/third_party/mindspore/tests/st/ops/cpu/
Dtest_batch_matmul.py46 input_y = Tensor(np.arange(2 * 4 * 3 * 4).reshape((2, 4, 3, 4)), mstype.float32)
50 output = net(input_x, input_y)
67 input_y = Tensor(np.arange(2 * 3 * 3 * 4).reshape((2, 3, 3, 4)), mstype.float32)
71 output = net(input_x, input_y)
92 input_y = Tensor(np.arange(2 * 3 * 3 * 4).reshape((2, 3, 3, 4)), mstype.float32)
96 output = net(input_x, input_y)
117 input_y = Tensor(np.arange(2 * 3 * 4 * 3).reshape((2, 3, 4, 3)), mstype.float32)
121 output = net(input_x, input_y)
142 input_y = Tensor(np.arange(2 * 3 * 4 * 3).reshape((2, 3, 4, 3)), mstype.float16)
146 output = net(input_x, input_y)
Dtest_matmul.py44 input_y = Tensor(np.arange(3 * 5).reshape((3, 5)), mstype.float32)
48 output = net(input_x, input_y)
58 input_y = Tensor(np.arange(3 * 5).reshape((3, 5)), mstype.float32)
62 output = net(input_x, input_y)
75 input_y = Tensor(np.arange(3 * 4).reshape((3, 4)), mstype.float32)
79 output = net(input_x, input_y)
90 input_y = Tensor(np.arange(5 * 3).reshape((5, 3)), mstype.float32)
94 output = net(input_x, input_y)
105 input_y = Tensor(np.arange(4 * 3).reshape((4, 3)), mstype.float16)
109 output = net(input_x, input_y)
Dtest_notequal_op.py44 input_y = Tensor(np.array([11, 2, 13]).astype(np.int32))
45 outputs = op_wrapper(input_x, input_y)
59 input_y = Tensor(np.array([-1, 0, 3]).astype(np.float32))
60 outputs = op_wrapper(input_x, input_y)
/third_party/mindspore/tests/st/ops/graph_kernel/
Dtest_simplify.py68 input_y = np.random.normal(0, 1, [2, 3, 4, 3]).astype(np.float32)
69 input_y = np.abs(input_y) + 3
71 sub_res = input_y + (-3)
74 pow_res = input_y * input_y
82 result = net(Tensor(input_x), Tensor(input_y))
91 input_y = np.random.normal(0, 1, [2, 3, 4, 3]).astype(np.float32)
92 expect = input_y
95 result = net(Tensor(input_x), Tensor(input_y))
Dtest_maximum_grad.py39 input_y = Tensor(input_y_np)
41 return input_x, input_y, input_dout
44 def get_maximum_grad_output(input_x, input_y, input_dout, enable_graph_kernel=False): argument
47 result = net(input_x, input_y, input_dout)
52 input_x, input_y, input_dout = gen_data()
53 result_off = get_maximum_grad_output(input_x, input_y, input_dout, False)
54 result_on = get_maximum_grad_output(input_x, input_y, input_dout, True)
Dtest_minimum_grad.py39 input_y = Tensor(input_y_np)
41 return input_x, input_y, input_dout
44 def get_minimum_grad_output(input_x, input_y, input_dout, enable_graph_kernel=False): argument
47 result = net(input_x, input_y, input_dout)
52 input_x, input_y, input_dout = gen_data()
53 result_off = get_minimum_grad_output(input_x, input_y, input_dout, False)
54 result_on = get_minimum_grad_output(input_x, input_y, input_dout, True)
Dtest_optimize_assign.py58 input_y = np.random.normal(0, 1, [2, 2, 2]).astype(np.float32)
63 result_open_gk = net(Tensor(input_x), Tensor(input_y))
68 result_close_gk = net_beta(Tensor(input_x), Tensor(input_y))
76 input_y = np.random.normal(0, 1, [2, 2, 2]).astype(np.float32)
81 result_open_gk = net(Tensor(input_x), Tensor(input_y))
86 result_close_gk = net_beta(Tensor(input_x), Tensor(input_y))
Dtest_atomic_add.py78 input_y = np.random.normal(0, 1, [2, 2, 2, 256]).astype(np.float32)
80 expect = np.sum(input_x * input_y) + input_x
83 result = net(Tensor(input_x), Tensor(input_y))
92 input_y = np.random.normal(0, 1, [2, 2, 2, 256]).astype(np.float32)
94 expect = np.sum(np.square(input_x + input_y)) + (input_x + input_y)
97 result = net(Tensor(input_x), Tensor(input_y))
Dtest_user_define.py110 input_y = np.random.normal(0, 1, [4, 4]).astype(np.float32)
113 output = test(Tensor(input_x), Tensor(input_y))
114 expect = np.matmul(input_x, input_y)
122 input_y = np.random.normal(0, 1, shape).astype(np.float16)
125 output = test(Tensor(input_x), Tensor(input_y))
126 assert np.allclose(input_x + input_y, output.asnumpy(), 0.001, 0.001)
/third_party/mindspore/mindspore/ccsrc/backend/kernel_compiler/cpu/
Dbinary_cross_entropy_cpu_kernel.cc52 const auto *input_y = reinterpret_cast<T *>(inputs[1]->addr); in LaunchKernel() local
63 …-weight[i] * (input_y[i] * log(input_x[i] + epsilon) + (one - input_y[i]) * log(one - input_x[i] +… in LaunchKernel()
69 … -(input_y[i] * log(input_x[i] + epsilon) + (one - input_y[i]) * log(one - input_x[i] + epsilon))); in LaunchKernel()
77 …-weight[i] * (input_y[i] * log(input_x[i] + epsilon) + (one - input_y[i]) * log(one - input_x[i] +… in LaunchKernel()
83 … -(input_y[i] * log(input_x[i] + epsilon) + (one - input_y[i]) * log(one - input_x[i] + epsilon))); in LaunchKernel()
Dminimum_cpu_kernel.cc84 void MinimumCPUKernel<T>::BroadcastArith(const T *input_x, const T *input_y, T *output) const { in BroadcastArith() argument
86 MS_EXCEPTION_IF_NULL(input_y); in BroadcastArith()
96 input_y, output); in BroadcastArith()
99 BroadcastArithOneScalarOneTensor(input_x, input_y, output); in BroadcastArith()
101 BroadcastArithTensors(input_x, input_y, output); in BroadcastArith()
157 … const size_t d6, const T *input_x, const T *input_y, T *output) const { in BroadcastArithKernel() argument
181 output[pos] = MinimumFunc(input_x[l_index], input_y[r_index]); in BroadcastArithKernel()
186 void MinimumCPUKernel<T>::BroadcastArithOneScalarOneTensor(const T *input_x, const T *input_y, T *o… in BroadcastArithOneScalarOneTensor() argument
189 output[i] = MinimumFunc(input_x[0], input_y[i]); in BroadcastArithOneScalarOneTensor()
193 output[i] = MinimumFunc(input_x[i], input_y[0]); in BroadcastArithOneScalarOneTensor()
[all …]
Dmaximum_cpu_kernel.cc84 void MaximumCPUKernel<T>::BroadcastArith(const T *input_x, const T *input_y, T *output) const { in BroadcastArith() argument
86 MS_EXCEPTION_IF_NULL(input_y); in BroadcastArith()
96 input_y, output); in BroadcastArith()
99 BroadcastArithOneScalarOneTensor(input_x, input_y, output); in BroadcastArith()
101 BroadcastArithTensors(input_x, input_y, output); in BroadcastArith()
157 … const size_t d6, const T *input_x, const T *input_y, T *output) const { in BroadcastArithKernel() argument
181 output[pos] = MaximumFunc(input_x[l_index], input_y[r_index]); in BroadcastArithKernel()
186 void MaximumCPUKernel<T>::BroadcastArithOneScalarOneTensor(const T *input_x, const T *input_y, T *o… in BroadcastArithOneScalarOneTensor() argument
189 output[i] = MaximumFunc(input_x[0], input_y[i]); in BroadcastArithOneScalarOneTensor()
193 output[i] = MaximumFunc(input_x[i], input_y[0]); in BroadcastArithOneScalarOneTensor()
[all …]
/third_party/mindspore/mindspore/_extends/graph_kernel/expanders/
Dtanh_grad.py24 input_y, input_dy = self.inputs
26 const_one = graph_builder.value(input_y.dtype, 1)
27 double_y = graph_builder.emit('Mul', [input_y, input_y])
Dsigmoid_grad.py24 input_y, dy = self.inputs
27 const_one = graph_builder.value(input_y.dtype, 1.0)
28 one_mins_y = graph_builder.emit('Sub', [const_one, input_y])
29 y_mul_dy = graph_builder.emit('Mul', [input_y, dy])
Dmaximum_grad.py31 input_x, input_y, input_dout = self.inputs
32 ge_result = graph_builder.emit('GreaterEqual', [input_x, input_y])
38 reduce_axis_y = MinimumGrad.get_reduce_axis(input_y.shape, dy.shape)
50 if dy_reduce.shape != input_y.shape:
51 dy_out = graph_builder.emit('Reshape', [dy_reduce], attrs={'shape': input_y.shape})
Dminimum_grad.py30 input_x, input_y, input_dout = self.inputs
32 le_result = graph_builder.emit('LessEqual', [input_x, input_y])
41 reduce_axis_y = self.get_reduce_axis(input_y.shape, dy.shape)
53 if dy_reduce.shape != input_y.shape:
54 dy_out = graph_builder.emit('Reshape', [dy_reduce], attrs={'shape': input_y.shape})

1234