/external/clang/test/SemaCXX/ |
D | warn-consumed-analysis.cpp | 83 ConsumableClass<int> var0; 87 ConsumableClass<int> var4(var0); // copy consumed value 89 …*var0; // expected-warning {{invalid invocation of method 'operator*' on object 'var0' while it is… 95 var0 = ConsumableClass<int>(42); 96 *var0; 98 var0 = var1; 99 …*var0; // expected-warning {{invalid invocation of method 'operator*' on object 'var0' while it is… 101 if (var0.isValid()) { 102 *var0; 106 …*var0; // expected-warning {{invalid invocation of method 'operator*' on object 'var0' while it is… [all …]
|
/external/tensorflow/tensorflow/python/training/ |
D | optimizer_test.py | 42 var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype, 47 return 5 * var0 + 3 * var1 # pylint: disable=cell-var-from-loop 56 self.assertAllClose([1.0, 2.0], self.evaluate(var0)) 59 opt_op = sgd_op.minimize(loss, global_step, [var0, var1]) 62 self.assertAllClose([-14., -13.], self.evaluate(var0)) 69 var0 = variables.Variable([1.0, 2.0], dtype=dtype) 71 cost = 5 * var0 + 3 * var1 77 global_step, [var0, var1], 83 self.assertAllClose([1.0, 2.0], self.evaluate(var0)) 88 self.assertAllClose([-14., -13.], self.evaluate(var0)) [all …]
|
D | gradient_descent_test.py | 43 var0 = variables.Variable([1.0, 2.0], dtype=dtype) 49 zip([grads0, grads1], [var0, var1])) 52 self.assertAllCloseAccordingToType([1.0, 2.0], self.evaluate(var0)) 58 self.evaluate(var0)) 67 var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype) 72 zip([grads0, grads1], [var0, var1])) 77 resources.initialize_resources([var0, var1]).run() 79 self.assertAllCloseAccordingToType([1.0, 2.0], self.evaluate(var0)) 85 self.evaluate(var0)) 93 var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype) [all …]
|
D | momentum_test.py | 50 var0 = resource_variable_ops.ResourceVariable( 55 var0 = variables.Variable([1.0, 2.0], dtype=dtype) 67 zip([grads0, grads1], [var0, var1])) 72 self.assertAllClose([1.0, 2.0], self.evaluate(var0)) 77 slot0 = mom_opt.get_slot(var0, "momentum") 78 self.assertEquals(slot0.get_shape(), var0.get_shape()) 97 self.evaluate(var0)) 103 mom_opt.apply_gradients(zip([grads0, grads1], [var0, var1])) 118 ]), self.evaluate(var0)) 140 var0 = resource_variable_ops.ResourceVariable( [all …]
|
D | proximal_adagrad_test.py | 40 var0 = variables.Variable([0.0, 0.0]) 49 update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) 52 v0_val, v1_val = self.evaluate([var0, var1]) 60 v0_val, v1_val = self.evaluate([var0, var1]) 64 self.assertStartsWith(opt_vars[0].name, var0._shared_name) 79 var0 = variables.Variable([1.0, 2.0]) 89 update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) 92 v0_val, v1_val = self.evaluate([var0, var1]) 99 v0_val, v1_val = self.evaluate([var0, var1]) 107 var0 = resource_variable_ops.ResourceVariable([[1.0, 2.0]], dtype=dtype) [all …]
|
D | adagrad_test.py | 45 var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype) 48 var0 = variables.Variable([1.0, 2.0], dtype=dtype) 62 zip([grads0, grads1], [var0, var1])) 66 v0_val, v1_val = self.evaluate([var0, var1]) 75 ada_opt.apply_gradients(zip([grads0, grads1], [var0, var1])) 78 v0_val, v1_val = self.evaluate([var0, var1]) 103 var0 = resource_variable_ops.ResourceVariable( 106 pred = math_ops.matmul(embedding_ops.embedding_lookup([var0], [0]), x) 112 self.evaluate(var0)) 117 self.evaluate(var0), [all …]
|
D | ftrl_test.py | 44 var0 = resource_variable_ops.ResourceVariable([0.0, 0.0], dtype=dtype) 47 var0 = variables.Variable([0.0, 0.0], dtype=dtype) 56 update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) 59 v0_val, v1_val = self.evaluate([var0, var1]) 67 v0_val, v1_val = self.evaluate([var0, var1]) 85 var0 = variables.Variable([1.0, 2.0], dtype=dtype) 95 update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) 98 v0_val, v1_val = self.evaluate([var0, var1]) 105 v0_val, v1_val = self.evaluate([var0, var1]) 115 var0 = resource_variable_ops.ResourceVariable([[1.0, 2.0]], dtype=dtype) [all …]
|
D | proximal_gradient_descent_test.py | 42 var0 = resource_variable_ops.ResourceVariable([0.0, 0.0]) 45 var0 = variables.Variable([0.0, 0.0]) 51 update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) 54 v0_val, v1_val = self.evaluate([var0, var1]) 62 v0_val, v1_val = self.evaluate([var0, var1]) 77 var0 = variables.Variable([1.0, 2.0]) 84 update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) 87 v0_val, v1_val = self.evaluate([var0, var1]) 95 v0_val, v1_val = self.evaluate([var0, var1]) 103 var0 = resource_variable_ops.ResourceVariable([[1.0, 2.0]], dtype=dtype) [all …]
|
D | adagrad_da_test.py | 41 var0 = resource_variable_ops.ResourceVariable([0.0, 0.0], dtype=dtype) 44 var0 = variables.Variable([0.0, 0.0], dtype=dtype) 55 zip([grads0, grads1], [var0, var1]), global_step=global_step) 58 v0_val, v1_val = self.evaluate([var0, var1]) 65 v0_val, v1_val = self.evaluate([var0, var1]) 89 var0 = resource_variable_ops.ResourceVariable([[1.0, 2.0]], dtype=dtype) 93 pred = math_ops.matmul(embedding_ops.embedding_lookup([var0], [0]), x) 99 self.assertAllCloseAccordingToType([[1.0, 2.0]], self.evaluate(var0)) 104 self.evaluate(var0), 112 var0 = variables.Variable([1.0, 2.0], dtype=dtype) [all …]
|
D | rmsprop_test.py | 105 var0 = resource_variable_ops.ResourceVariable(var0_np) 108 var0 = variables.Variable(var0_np) 119 update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) 122 mg0 = opt.get_slot(var0, "mg") 126 rms0 = opt.get_slot(var0, "rms") 130 mom0 = opt.get_slot(var0, "momentum") 143 self.assertAllClose([1.0, 2.0], self.evaluate(var0)) 165 self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0)) 172 var0 = resource_variable_ops.ResourceVariable([[1.0, 2.0]], dtype=dtype) 174 pred = math_ops.matmul(embedding_ops.embedding_lookup([var0], [0]), x) [all …]
|
D | adadelta_test.py | 45 var0 = resource_variable_ops.ResourceVariable( 50 var0 = variables.Variable(var0_init, dtype=dtype) 71 zip([grads, grads], [var0, var1])) 78 self.assertStartsWith(opt_vars[0].name, var0._shared_name) 79 self.assertStartsWith(opt_vars[1].name, var0._shared_name) 88 slot[0] = adadelta_opt.get_slot(var0, "accum") 89 self.assertEquals(slot[0].get_shape(), var0.get_shape()) 92 slot_update[0] = adadelta_opt.get_slot(var0, "accum_update") 93 self.assertEquals(slot_update[0].get_shape(), var0.get_shape()) 105 self.assertAllClose(var0_init, self.evaluate(var0)) [all …]
|
D | adam_test.py | 68 var0 = resource_variable_ops.ResourceVariable(var0_np) 71 var0 = variables.RefVariable(var0_np) 82 update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) 86 self.assertAllClose([1.0, 2.0], self.evaluate(var0)) 102 self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0)) 172 var0 = resource_variable_ops.ResourceVariable( 177 var0 = variables.RefVariable(var0_np) 193 update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) 215 self.assertAllClose([1.0, 2.0], self.evaluate(var0)) 225 opt.apply_gradients(zip([grads0, grads1], [var0, var1])) [all …]
|
/external/tensorflow/tensorflow/compiler/tests/ |
D | ftrl_test.py | 35 var0 = resource_variable_ops.ResourceVariable([0.0, 0.0], dtype=dtype) 40 return var0, var1, grads0, grads1 43 var0, var1, grads0, grads1 = self.initVariableAndGradient(dtype) 50 ftrl_update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) 53 self.assertAllClose([0.0, 0.0], self.evaluate(var0)) 60 return self.evaluate(var0), self.evaluate(var1) 63 var0, var1, grads0, grads1 = self.initVariableAndGradient(dtype) 65 adagrad_update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) 68 self.assertAllClose([0.0, 0.0], self.evaluate(var0)) 75 return self.evaluate(var0), self.evaluate(var1) [all …]
|
D | proximal_gradient_descent_test.py | 36 var0 = resource_variable_ops.ResourceVariable([0.0, 0.0]) 42 update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) 45 self.assertAllClose([0.0, 0.0], self.evaluate(var0)) 52 self.assertAllClose(np.array([-0.9, -1.8]), self.evaluate(var0)) 57 var0 = resource_variable_ops.ResourceVariable([1.0, 2.0]) 64 update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) 67 self.assertAllClose([1.0, 2.0], self.evaluate(var0)) 74 self.assertAllClose(np.array([0.1, 0.2]), self.evaluate(var0)) 79 var0 = resource_variable_ops.ResourceVariable([1.0, 2.0]) 86 update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) [all …]
|
D | proximal_adagrad_test.py | 36 var0 = resource_variable_ops.ResourceVariable([0.0, 0.0]) 45 update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) 48 self.assertAllClose([0.0, 0.0], self.evaluate(var0)) 56 np.array([-2.60260963, -4.29698515]), self.evaluate(var0)) 60 self.assertStartsWith(opt_vars[0].name, var0._shared_name) 66 var0 = resource_variable_ops.ResourceVariable([1.0, 2.0]) 76 update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) 79 self.assertAllClose([1.0, 2.0], self.evaluate(var0)) 85 self.assertAllClose(np.array([-1.60261, -2.296985]), self.evaluate(var0)) 90 var0 = resource_variable_ops.ResourceVariable([1.0, 2.0]) [all …]
|
D | adagrad_test.py | 36 var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype) 42 zip([grads0, grads1], [var0, var1])) 45 self.assertAllClose([1.0, 2.0], self.evaluate(var0)) 53 self.evaluate(var0), 63 var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype) 70 zip([grads0, grads1], [var0, var1])) 73 self.assertAllClose([1.0, 2.0], self.evaluate(var0)) 81 self.evaluate(var0), 91 var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype) 99 zip([grads0, grads1], [var0, var1])) [all …]
|
D | momentum_test.py | 45 var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype) 52 zip([grads0, grads1], [var0, var1])) 56 slot0 = mom_opt.get_slot(var0, "momentum") 57 self.assertEquals(slot0.get_shape(), var0.get_shape()) 64 self.assertAllClose([1.0, 2.0], self.evaluate(var0)) 77 self.evaluate(var0)) 95 ]), self.evaluate(var0)) 105 var0 = resource_variable_ops.ResourceVariable([0.1, 0.2], dtype=dtype) 111 cost = 0.4 * var0 * var0 + 0.9 * var1 116 opt_op = mom_op.minimize(cost, global_step, [var0, var1]) [all …]
|
D | adagrad_da_test.py | 39 var0 = resource_variable_ops.ResourceVariable([0.0, 0.0], dtype=dtype) 50 zip([grads0, grads1], [var0, var1]), global_step=global_step) 53 self.assertAllClose([0.0, 0.0], self.evaluate(var0)) 66 np.array([-0.904534, -1.603567]), self.evaluate(var0)) 75 var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype) 87 zip([grads0, grads1], [var0, var1]), global_step=global_step) 90 self.assertAllCloseAccordingToType([1.0, 2.0], self.evaluate(var0)) 97 np.array([-0.904534, -1.603567]), self.evaluate(var0)) 106 var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype) 118 zip([grads0, grads1], [var0, var1]), global_step=global_step) [all …]
|
D | adadelta_test.py | 49 var0 = resource_variable_ops.ResourceVariable( 65 zip([grads, grads], [var0, var1])) 68 self.assertStartsWith(opt_vars[0].name, var0._shared_name) 69 self.assertStartsWith(opt_vars[1].name, var0._shared_name) 78 slot[0] = adadelta_opt.get_slot(var0, "accum") 79 self.assertEqual(slot[0].get_shape(), var0.get_shape()) 82 slot_update[0] = adadelta_opt.get_slot(var0, "accum_update") 83 self.assertEqual(slot_update[0].get_shape(), var0.get_shape()) 95 self.assertAllClose(var0_init, self.evaluate(var0)) 130 self.evaluate(var0),
|
/external/tensorflow/tensorflow/python/keras/optimizer_v2/ |
D | gradient_descent_test.py | 45 var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype) 50 sgd_op = sgd.apply_gradients(zip([grads0, grads1], [var0, var1])) 56 self.evaluate(var0)) 61 var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype) 66 sgd_op = sgd.apply_gradients(zip([grads0, grads1], [var0, var1])) 72 sgd.apply_gradients(zip([grads0, grads1], [var0, var1])) 75 self.evaluate(var0)) 82 sgd.apply_gradients(zip([grads0, grads1], [var0, var1])) 86 self.evaluate(var0)) 119 var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype) [all …]
|
D | ftrl_test.py | 43 var0 = resource_variable_ops.ResourceVariable([0.0, 0.0], dtype=dtype) 46 var0 = variables.Variable([0.0, 0.0], dtype=dtype) 55 update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) 58 v0_val, v1_val = self.evaluate([var0, var1]) 66 v0_val, v1_val = self.evaluate([var0, var1]) 84 var0 = variables.Variable([1.0, 2.0], dtype=dtype) 94 update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) 97 v0_val, v1_val = self.evaluate([var0, var1]) 104 v0_val, v1_val = self.evaluate([var0, var1]) 114 var0 = resource_variable_ops.ResourceVariable([[1.0, 2.0]], dtype=dtype) [all …]
|
D | adagrad_test.py | 79 var0 = resource_variable_ops.ResourceVariable(var0_np) 95 zip([grads0, grads1], [var0, var1])) 99 v0_val, v1_val = self.evaluate([var0, var1]) 108 ada_opt.apply_gradients(zip([grads0, grads1], [var0, var1])) 113 self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0)) 130 var0 = resource_variable_ops.ResourceVariable(var0_np) 145 zip([grads0, grads1], [var0, var1])) 149 v0_val, v1_val = self.evaluate([var0, var1]) 158 ada_opt.apply_gradients(zip([grads0, grads1], [var0, var1])) 164 self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0)) [all …]
|
D | rmsprop_test.py | 114 var0 = resource_variable_ops.ResourceVariable(var0_np, dtype=dtype) 125 update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) 129 mg0 = opt.get_slot(var0, "mg") 136 mom0 = opt.get_slot(var0, "momentum") 142 rms0 = opt.get_slot(var0, "rms") 155 self.assertAllClose([1.0, 2.0], self.evaluate(var0)) 178 self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0)) 188 var0 = resource_variable_ops.ResourceVariable(var0_np) 206 update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) 209 rms0 = opt.get_slot(var0, "rms") [all …]
|
D | adadelta_test.py | 50 var0 = resource_variable_ops.ResourceVariable( 55 var0 = variables.Variable(var0_init, dtype=dtype) 76 zip([grads, grads], [var0, var1])) 82 slot[0] = adadelta_opt.get_slot(var0, "accum_grad") 83 self.assertEqual(slot[0].shape, var0.shape) 85 slot_update[0] = adadelta_opt.get_slot(var0, "accum_var") 86 self.assertEqual(slot_update[0].shape, var0.shape) 95 self.assertAllClose(var0_init, self.evaluate(var0)) 105 adadelta_opt.apply_gradients(zip([grads, grads], [var0, var1])) 137 self.evaluate(var0), [all …]
|
/external/deqp/external/vulkancts/data/vulkan/glsl/440/ |
D | linkage.test | 23 layout(location = 0) out vec2 var0; 28 var0 = in0 + in0; 36 layout(location = 0) in vec2 var0; 41 vec2 out0 = var0; 64 layout(location = 0) out vec2 var0; 69 var0 = in0_3 * in0 + in0; 77 layout(location = 0) in vec2 var0; 82 vec2 out0 = var0; 107 layout(location = 0) out vec3 var0; 112 var0 = in0 + in0; [all …]
|