Home
last modified time | relevance | path

Searched refs:layerwise_parallel (Results 1 – 12 of 12) sorted by relevance

/third_party/mindspore/mindspore/graph_utils/
Dgraph_pattern.py181 def __init__(self, para_name, default_tensor, requires_grad=False, layerwise_parallel=False): argument
195 self.layerwise_parallel = layerwise_parallel
197 isinstance(layerwise_parallel, bool):
199 self.layerwise_parallel)
202 layerwise_parallel(bool), got : {para_name}, {default_tensor}, \
203 {requires_grad}, {layerwise_parallel}")
/third_party/mindspore/tests/ut/python/nn/
Dtest_parameter.py28 Parameter(tensor, name="testParameter", requires_grad=True, layerwise_parallel=False)
103 Parameter(tensor, name=data_str, requires_grad=data_bool, layerwise_parallel=data_bool)
105 Parameter(tensor, name=data_str, requires_grad=data_bool, layerwise_parallel=dat)
107 Parameter(tensor, name=data_str, requires_grad=data_bool, layerwise_parallel=tensor)
109 Parameter(tensor, name=data_str, requires_grad=data_bool, layerwise_parallel=data_none)
111 Parameter(tensor, name=data_str, requires_grad=data_bool, layerwise_parallel=data_str)
113 Parameter(tensor, name=data_str, requires_grad=data_bool, layerwise_parallel=data_int)
115 Parameter(tensor, name=data_str, requires_grad=data_bool, layerwise_parallel=data_list)
117 Parameter(tensor, name=data_str, requires_grad=data_bool, layerwise_parallel=data_tuple)
/third_party/mindspore/mindspore/core/ir/
Dparam_info.h48 bool layerwise_parallel() const { return layerwise_parallel_; } in layerwise_parallel() function
49 void set_layerwise_parallel(bool layerwise_parallel) { layerwise_parallel_ = layerwise_parallel; } in set_layerwise_parallel() argument
/third_party/mindspore/mindspore/common/
Dparameter.py131 Parameter, (data, self.name, self.requires_grad, self.layerwise_parallel))
133 …def __init__(self, default_input, name=None, requires_grad=True, layerwise_parallel=False, paralle… argument
139 self.layerwise_parallel = layerwise_parallel
410 def layerwise_parallel(self): member in Parameter
415 return self.param_info.layerwise_parallel
417 @layerwise_parallel.setter
418 def layerwise_parallel(self, value=True): member in Parameter
421 self.param_info.layerwise_parallel = value
/third_party/mindspore/mindspore/ccsrc/pybind_api/ir/
Dparam_info_py.cc30 .def_property("layerwise_parallel", &ParamInfo::layerwise_parallel, in __anon2c0500310102()
43 … return py::make_tuple(p.name(), p.requires_grad(), p.layerwise_parallel()); in __anon2c0500310102()
/third_party/mindspore/mindspore/ccsrc/frontend/optimizer/
Dpy_pass.cc43 bool requires_grad, bool layerwise_parallel);
112 new_para_pattern->layerwise_parallel()); in BuildNewParameter()
200 bool requires_grad, bool layerwise_parallel) { in ReflectParamBackToPython() argument
214 …ect new_parameter = parameter_class(default_tensor, param_name, requires_grad, layerwise_parallel); in ReflectParamBackToPython()
284 new_para_pattern->layerwise_parallel()); in Run()
Dpattern.h218 …er(string para_name, tensor::TensorPtr default_tensor, bool requires_grad, bool layerwise_parallel) in NewParameter() argument
219 … : para_name_(para_name), requires_grad_(requires_grad), layerwise_parallel_(layerwise_parallel) { in NewParameter()
232 bool layerwise_parallel() { return layerwise_parallel_; } in layerwise_parallel() function
/third_party/mindspore/tests/ut/python/parallel/
Dtest_optimizer.py34 self.weight1 = Parameter(Tensor(weight_init1), "loss_weight1", layerwise_parallel=True)
35 self.weight2 = Parameter(Tensor(weight_init2), "loss_weight2", layerwise_parallel=True)
Dtest_broadcast_dict.py28 self.weight = Parameter(Tensor(weight_init), name="weight", layerwise_parallel=False)
40 self.weight = Parameter(Tensor(weight_init), name="weight", layerwise_parallel=True)
/third_party/mindspore/mindspore/train/
Dserialization.py1290 layerwise_parallel = sliced_parameters[0].layerwise_parallel
1296 … merged_parameter = Parameter(merged_tensor, parameter_name, requires_grad, layerwise_parallel)
1303 … merged_parameter = Parameter(merged_tensor, parameter_name, requires_grad, layerwise_parallel)
1423 split_param.requires_grad, split_param.layerwise_parallel)
1541 layerwise_parallel = merged_param.layerwise_parallel
1542 split_param = Parameter(split_tensor, param_name, requires_grad, layerwise_parallel)
/third_party/mindspore/mindspore/nn/wrap/
Dgrad_reducer.py377 self.allreduce_filter = tuple(x.layerwise_parallel is False for x in parameters)
/third_party/mindspore/mindspore/nn/
Dcell.py907 if param.layerwise_parallel is False: