• Home
  • Raw
  • Download

Lines Matching full:unbind

117     tensors = input.unbind()
214 for t in other.unbind():
309 nested_tensor_list = nested_tensor.unbind()
331 nested_tensor_list = nested_tensor.unbind()
355 nested_tensor_list = nested_tensor.unbind()
364 a1, b1 = nt.unbind()
369 a1, b1 = nt.unbind(0)
375 self.assertEqual(a, nt.unbind(0)[0])
403 _test_fn(lambda x, dim: x.unbind(dim))
405 # _test_fn(lambda x, dim: torch.unbind(x, dim))
666 for nt_ub, nt_copy_ub in zip(nt.unbind(), nt_copy):
681 for nt_ub, nt_copy_ub in zip(nt.unbind(), nt_copy):
686 for nt_ub, nt_copy_ub in zip(nt.unbind(), nt_copy):
693 for nt_ub in nt.unbind():
706 for nt_ub in nt.unbind():
715 for nt_ub in nt.unbind():
732 for nt_ub in nt_like.unbind():
743 output.unbind(), itertools.chain(x.unbind(), y.unbind())
755 output.unbind(), x.unbind(), y.unbind()
767 output.unbind(), x.unbind(), y.unbind()
904 ub_contiguous = nt_contiguous.unbind()
905 ub_noncontiguous = nt_noncontiguous.unbind()
915 ts = list(torch.unbind(t))
922 for t1, t2 in zip(nt.unbind(), nt_to.unbind()):
939 for nt_subresult, t in zip(nt_result.unbind(), ts):
951 for nt_subresult, t in zip(nt_result.unbind(), ts):
973 for nt_subresult, t in zip(nt_result.unbind(), ts):
980 for nt_subresult, t in zip(nt_result.unbind(), ts):
1031 ys = y.unbind()
1043 mask = torch.nested.nested_tensor([m < 0 for m in mask.unbind()])
1045 [t.masked_fill(m, 0) for (t, m) in zip(nt.unbind(), mask.unbind())]
1053 ts = list(torch.unbind(t))
1073 ts = list(torch.unbind(t))
1251 for t, t_res in zip(nt.unbind(), nested_result.unbind()):
1270 for t, t_res in zip(nt.unbind(), nested_result.unbind()):
1421 [t1 + t2 for (t1, t2) in zip(nt1.unbind(), nt2.unbind())]
1441 [t1 - t2 for (t1, t2) in zip(nt1.unbind(), nt2.unbind())]
1453 [t1 + t2 for (t1, t2) in zip(nt.unbind(), t.unbind())]
1456 [t1 * t2 for (t1, t2) in zip(nt.unbind(), t.unbind())]
1483 [t1 * t2 for (t1, t2) in zip(nt1.unbind(), nt2.unbind())]
1490 ref = torch.nested.nested_tensor([t * number for t in nt1.unbind()])
1518 ref = torch.nested.nested_tensor([t / scale for t in nt.unbind()])
1526 [t / t2 for (t, t2) in zip(nt.unbind(), nt2.unbind())]
1535 [t.transpose(0, 1) for t in nt.unbind()]
1560 [t1 + t2 for (t1, t2) in zip(nt1.unbind(), nt2.unbind())]
1572 [t1 * t2 for (t1, t2) in zip(nt1.unbind(), nt2.unbind())]
1579 ref = torch.nested.nested_tensor([t * number for t in nt1.unbind()])
1613 ub2 = nt2.unbind()
1689 ub1 = nt1.unbind()
1690 ub2 = nt2.unbind()
1978 t1s = nt1.unbind()
1979 t2s = nt2.unbind()
2582 for nc, c in zip(narrowed.unbind(), nt.unbind()[start:end]):
2650 for q, k, v in zip(query.unbind(), key.unbind(), value.unbind()):
3297 unbound = ntT.unbind()
3313 unbound = splits[1].unbind()
3863 for in_component, out_component in zip(nt.unbind(), output.unbind()):
3866 # dim=2 -> dim=1 after unbind
4337 [func(t, dim=(reduce_dim[0] - 1)).unsqueeze(0) for t in nt.unbind()]
4376 for t in nt.unbind()
4459 for t in nt.unbind()
4601 for t in nt_transposed.unbind()
5230 for n1, n2 in zip(nt.unbind(), ref_nt.unbind()):
5593 list(t.unbind(0)), device=device, dtype=dtype, layout=layout
5659 out = nt.unbind()
5672 out = nt.unbind()
5684 out = nt.unbind()
5703 out = nt.unbind()
5721 out = nt.unbind()
5738 r"unbind\(\): nested tensor offsets and lengths.*",
5739 lambda: nt.unbind(),
5755 out = nt.unbind()
5774 out = nt.unbind()
5798 r"unbind\(\): nested tensor.*out of bounds",
5799 lambda: nt.unbind(),
5816 # TODO: Use this approach when unbind is functional
5817 # unbinded_nt = nt.unbind()
5913 for nt_ub in nt_like.unbind():
5927 for nt_ub in nt_like.unbind():
6297 attn_nt.unbind()[0].unsqueeze(0),
6307 # it is transposed. This is because today we cannot backward through view or unbind a
6373 attn_nts = attn_nt.unbind()
6391 unbound_nt_grads = nt_grad.unbind()
6509 attn_nts = attn_nt.unbind()
6592 input_packed.unbind(), output.transpose(-2, -3).unbind()
7156 a, b, c = nt.unbind()
7162 expected_grad.unbind()[1].add_(1.0)
7273 # clone() on non-contiguous with holes NJTs currently use unbind(), leading to