Home
last modified time | relevance | path

Searched defs:input_layout (Results 1 – 25 of 49) sorted by relevance

12

/third_party/mindspore/mindspore-src/source/tests/ut/python/parallel/
Dtest_prompt_flash_attention.py29 def generate_inputs(dims, optinal_inputs, input_layout='BSH', sparse_mode=0): argument
72 def generate_strategy(dp, mp, optinal_inputs, input_layout='BSH', sparse_mode=0, sp=1): argument
115 …_init__(self, num_heads, scale_value=1.0, pre_tokens=2147483547, next_tokens=0, input_layout='BSH', argument
143 def test_self_attention_standalone(input_layout): argument
163 def test_prompt_flash_attention_semi_auto_parallel(input_layout, strategys): argument
184 def test_prompt_flash_attention_auto_parallel(input_layout, search_mode): argument
204 def test_prompt_flash_attention_strategy_error(input_layout): argument
224 def test_prompt_flash_attention_semi_auto_parallel_sparsemode0(input_layout, strategys): argument
248 def test_prompt_flash_attention_semi_auto_parallel_sparsemode2(input_layout, strategys, if_atten_ma… argument
272 def test_prompt_flash_attention_semi_auto_parallel_sparsemode3(input_layout, strategys, if_atten_ma… argument
[all …]
Dtest_fused_infer_attention_score.py28 def generate_inputs(dims, optinal_inputs, input_layout='BSH', sparse_mode=0, is_ifa=False): argument
92 def generate_strategy(dp, mp, optinal_inputs, input_layout='BSH', sparse_mode=0, sp=1, is_ifa=False… argument
151 …_init__(self, num_heads, scale_value=1.0, pre_tokens=2147483547, next_tokens=0, input_layout='BSH', argument
189 def test_self_attention_standalone(input_layout): argument
209 def test_self_attention_standalone_ifa(input_layout): argument
230 def test_fused_infer_attention_score_semi_auto_parallel(input_layout, strategys): argument
252 def test_fused_infer_attention_score_semi_auto_parallel_ifa(input_layout, strategys): argument
273 def test_fused_infer_attention_score_strategy_error(input_layout): argument
294 def test_fused_infer_attention_score_semi_auto_parallel_sparsemode0(input_layout, strategys): argument
318 def test_fused_infer_attention_score_semi_auto_parallel_ifa_split_s(input_layout, strategys): argument
[all …]
Dtest_flash_attention_score_info.py38 def generate_inputs(B, N, S, D, input_layout, use_mqa=False, with_real_shift=True, sparse_mode=0): argument
102 def __init__(self, head_num, keep_prob=0.9, input_layout="BSH", sparse_mode=0, use_mqa=False, argument
183 def test_self_attention_standalone(keep_prob, input_layout, with_real_shift): argument
201 def test_self_attention_standalone_with_compressed_mask(input_layout, sparse_mode): argument
220 def test_flash_attention_semi_auto_parallel(input_layout, use_mqa, with_real_shift): argument
242 def test_flash_attention_semi_auto_parallel_with_compressed_mask(input_layout, sparse_mode): argument
263 def test_flash_attention_dp(keep_prob, input_layout, with_real_shift): argument
282 def test_flash_attention_auto_parallel(keep_prob, input_layout, use_mqa, with_real_shift): argument
299 def test_flash_attention_with_seq_parallel(input_layout, use_mqa, with_real_shift): argument
322 def test_flash_attention_compressed_mask_with_seq_parallel(input_layout, sparse_mode): argument
[all …]
Dtest_incre_flash_attention.py38 input_layout, argument
89 input_layout="BSH", argument
143 def test_self_attention_standalone(input_layout): argument
161 def test_incre_flash_attention_semi_auto_parallel(input_layout, strategys): argument
197 def test_incre_flash_attention_auto_parallel(input_layout, search_mode): argument
224 def test_incre_flash_attention_strategy_error(input_layout): argument
Dtest_auto_parallel_sapp_flash_attention.py28 def __init__(self, input_layout, batch_size, head_num, seq_len, head_size): argument
98 def generate_inputs_for_layout(input_layout, batch_size, head_num, seq_len, head_size): argument
102 def get_net_strategies(x, w, attn_mask, input_layout, batch_size, head_num, seq_len, head_size): argument
119 def get_layout_indexes(input_layout): argument
133 def check_valid_fa_strategy(input_layout, fa_stra): argument
160 def run_layout_test(input_layout, mem_coef): argument
Dtest_ring_attention.py33 def generate_inputs(B, N, S, D, input_layout, use_mqa=False, with_real_shift=False, sparse_mode=0): argument
81 def __init__(self, head_num, keep_prob=1.0, input_layout="BSH", sparse_mode=0, use_mqa=False, argument
138 def test_ring_attention_semi_auto_parallel(input_layout): argument
Dtest_model_flops_cal.py284 def generate_inputs(B, N, S, D, input_layout, use_mqa=False, with_real_shift=True, sparse_mode=0): argument
312 def test_flash_attention_semi_auto_parallel_flops(input_layout, use_mqa, with_real_shift): argument
340 … def __init__(self, head_num, keep_prob=0.9, input_layout="BSH", sparse_mode=0, use_mqa=False, argument
/third_party/mindspore/mindspore-src/source/tests/st/auto_parallel/
Dtest_prompt_flash_attention_info.py30 def generate_inputs(dims, optinal_inputs, input_layout='BSH', sparse_mode=0): argument
87 def generate_strategy(dp, mp, optinal_inputs, input_layout='BSH', sparse_mode=0, sp=1): argument
122 …_init__(self, num_heads, scale_value=1.0, pre_tokens=2147483547, next_tokens=0, input_layout='BSH', argument
151 def test_prompt_flash_attention_semi_auto_parallel_sparsemode0(input_layout, strategys): argument
Dflash_attention_score.py29 def __init__(self, head_num, keep_prob=0.9, input_layout="BSH", sparse_mode=0, use_mqa=False, argument
116 def generate_inputs(B, N1, N2, S1, S2, D, input_layout, dtype, return_tensor=True): argument
156 def test_flash_attention_score_tnd(mode, dtype, input_layout): argument
/third_party/mindspore/mindspore-src/source/tests/st/ops/
Dtest_ops_flash_attention_score.py30 … actual_seq_kvlen=None, keep_prob=0.9, input_layout='BSH', pre_tokens=65536, argument
93input_layout='TND', real_shift=None, drop_mask=None, padding_mask=None, argument
100 def generate_inputs(B, N1, N2, S1, S2, D, input_layout, dtype, return_tensor=True): argument
272 def test_ops_flash_attention_score_dynamic(input_layout): argument
/third_party/mindspore/mindspore-src/source/tests/st/ops/ascend/
Dtest_fused_infer_attention_score.py32 … pre_tokens=2147483547, next_tokens=0, input_layout='BSH', num_key_value_heads=0, sparse_mode=0, argument
43 def __init__(self, num_heads, input_layout, scale_value, num_key_value_heads): argument
62 def __init__(self, num_heads, input_layout='BSH', scale_value=1.0, num_key_value_heads=0, argument
Dtest_incre_flash_attention.py157 input_layout="BSH", argument
390 input_layout, argument
Dtest_prompt_flash_attention.py31 … pre_tokens=2147483547, next_tokens=0, input_layout='BSH', num_key_value_heads=0, sparse_mode=0): argument
/third_party/mindspore/mindspore-src/source/mindspore/ccsrc/plugin/device/ascend/kernel/pyboost/customize/
Dflash_attention_score.cc50 const Int64ImmPtr next_tokens, const Int64ImmPtr inner_precise, const Int64ImmPtr input_layout, in FlashAttentionScoreAscendCall()
105 const Int64ImmPtr inner_precise, const Int64ImmPtr input_layout, const Int64ImmPtr sparse_mode) { in FlashAttentionScoreAscendCustomize()
Dflash_attention_score_grad.cc39 const Int64ImmPtr inner_precise, const Int64ImmPtr input_layout, const Int64ImmPtr sparse_mode, in FlashAttentionScoreGradAscendCall()
93 const Int64ImmPtr inner_precise, const Int64ImmPtr input_layout, const Int64ImmPtr sparse_mode) { in FlashAttentionScoreGradAscendCustomize()
/third_party/mindspore/mindspore-src/source/mindspore/core/ops/symbol_ops_impl/
Dflash_attention_score.cc45 …auto input_layout = b->GetInputValue(mindspore::ops::kFlashAttentionScoreInputLayoutIndex)->as_spt… in FlashAttentionScoreShapeBuilder() local
/third_party/mindspore/mindspore-src/source/tests/st/ops/ascend/test_acl_ops/
Dtest_incre_flash_attention.py27 def __init__(self, num_heads, input_layout, scale_value, num_key_value_heads): argument
/third_party/mindspore/mindspore-src/source/mindspore/core/ops/ops_func_impl/
Dfused_infer_attention_score.cc53 auto input_layout = input_layout_opt.value(); in InferShape() local
Dflash_attention_score.cc131 std::vector<int64_t> GetFASInfoFromInputLayout(int64_t input_layout, int64_t q_head_num, const std:… in GetFASInfoFromInputLayout()
222 auto input_layout = input_layout_opt.value(); in InferShape() local
/third_party/mindspore/mindspore-src/source/mindspore/ccsrc/plugin/device/ascend/kernel/opapi/aclnn/
Dflash_attention_score_grad_aclnn_kernel.h90 auto input_layout = inputs[kIndex21]; in DEFINE_GET_WORKSPACE_FOR_RESIZE() local
Dflash_attention_score_aclnn_kernel.cc61 auto input_layout = inputs[kIndex16]; in GetWorkSpaceInfo() local
Dflash_attention_score_grad_aclnn_kernel.cc61 auto input_layout = inputs[kIndex21]; in GetWorkSpaceInfo() local
Dflash_attention_score_aclnn_kernel.h90 auto input_layout = inputs[kIndex16]; in DEFINE_GET_WORKSPACE_FOR_RESIZE() local
/third_party/mindspore/mindspore-src/source/mindspore/ccsrc/frontend/parallel/ops_info/
Dstand_alone_info.cc74 TensorLayout input_layout; in InferTensorInfo() local
Dreshape_info.h50 void SetInputLayout(const TensorLayout &input_layout) { in SetInputLayout()

12