Searched refs:n_tensors (Results 1 – 12 of 12) sorted by relevance
42 size_t n_tensors = params.size(); in _fused_adam_kernel_cpu_() local43 TORCH_CHECK(grads.size() == n_tensors); in _fused_adam_kernel_cpu_()44 TORCH_CHECK(exp_avgs.size() == n_tensors); in _fused_adam_kernel_cpu_()45 TORCH_CHECK(exp_avg_sqs.size() == n_tensors); in _fused_adam_kernel_cpu_()47 TORCH_CHECK(max_exp_avg_sqs.size() == n_tensors); in _fused_adam_kernel_cpu_()51 TORCH_CHECK(state_steps.size() == n_tensors); in _fused_adam_kernel_cpu_()53 for (size_t i = 0; i < n_tensors; i++){ in _fused_adam_kernel_cpu_()118 size_t n_tensors = params.size(); in _fused_adamw_kernel_cpu_() local119 TORCH_CHECK(grads.size() == n_tensors); in _fused_adamw_kernel_cpu_()120 TORCH_CHECK(exp_avgs.size() == n_tensors); in _fused_adamw_kernel_cpu_()[all …]
36 size_t n_tensors = params.size(); in _fused_adagrad_kernel_cpu_() local37 TORCH_CHECK(grads.size() == n_tensors); in _fused_adagrad_kernel_cpu_()38 TORCH_CHECK(state_sums.size() == n_tensors); in _fused_adagrad_kernel_cpu_()39 TORCH_CHECK(state_steps.size() == n_tensors); in _fused_adagrad_kernel_cpu_()40 for (size_t i = 0; i < n_tensors; i++){ in _fused_adagrad_kernel_cpu_()
38 size_t n_tensors = params.size(); in _fused_sgd_kernel_cpu_() local39 TORCH_CHECK(grads.size() == n_tensors); in _fused_sgd_kernel_cpu_()44 TORCH_CHECK(momentum_buffer_list.size() == n_tensors); in _fused_sgd_kernel_cpu_()46 for (size_t i = 0; i < n_tensors; i++){ in _fused_sgd_kernel_cpu_()
72 n_tensors: int,82 [(torch.rand(*sizes[i]) - 0.5).to(dtype) for i in range(n_tensors)]91 n_tensors: int,100 [(torch.rand(*sizes[i]) - 0.5).to(dtype) for i in range(n_tensors)]143 n_tensors=n_model_inputs,152 n_tensors=n_model_outputs,205 n_tensors=n_model_inputs,
84 int64_t n_tensors = self.size(0); in chunk_nested_tensor() local103 for (int64_t i : c10::irange(n_tensors)) { in chunk_nested_tensor()140 int64_t n_tensors = self.size(0); in split_with_sizes_nested() local158 for (int64_t i : c10::irange(n_tensors)) { in split_with_sizes_nested()
134 const size_t n_tensors = tensor_lists[0].size(); in multi_tensor_apply() local140 for (size_t t = 0; t < n_tensors; t++) { in multi_tensor_apply()225 const size_t n_tensors = tensor_lists[0].size(); in multi_tensor_apply() local231 for (size_t t = 0; t < n_tensors; t++) { in multi_tensor_apply()
91 const int64_t n_tensors = tensor_strides.size(0); in is_safe_to_get_storage_as_tensor() local97 if (n_tensors <= 1) { in is_safe_to_get_storage_as_tensor()123 for (int i{1}; i < n_tensors; i++) { in is_safe_to_get_storage_as_tensor()142 for (int64_t i = 2; i < n_tensors; i++) { in is_safe_to_get_storage_as_tensor()
167 const int64_t n_tensors = param.size(0); in check_for_seq_len_0_and_consistent_head_dim_nested_tensor_helper() local171 for (const auto i : c10::irange(n_tensors)) { in check_for_seq_len_0_and_consistent_head_dim_nested_tensor_helper()
743 const int64_t n_tensors = params.query.size(0); in check_for_seq_len_1_nested_tensor() local747 for (const auto i : c10::irange(n_tensors)) { in check_for_seq_len_1_nested_tensor()
346 n_tensors = offsets.size(0) - 1347 if n_tensors <= 1:
168 size_t n_scalars = 0, n_tensors = 0; in lint_python() local173 n_tensors++; in lint_python()180 AT_ASSERT(n_tensors == inputs().size()); in lint_python()
4043 n_tensors = 1004045 elems = torch.arange(n_tensors * n_tensor_elems, dtype=torch.float32)4048 for i in range(0, n_tensors - 1):4053 for i in range(0, n_tensors - 1):