| /third_party/rust/rust/src/tools/rust-analyzer/.github/workflows/ |
| D | autopublish.yaml | 25 - name: Install cargo-workspaces 26 run: cargo install cargo-workspaces 38 cargo workspaces rename --from base-db base_db 39 cargo workspaces rename --from hir-def hir_def 40 cargo workspaces rename --from hir-expand hir_expand 41 cargo workspaces rename --from hir-ty hir_ty 42 cargo workspaces rename --from ide-assists ide_assists 43 cargo workspaces rename --from ide-completion ide_completion 44 cargo workspaces rename --from ide-db ide_db 45 cargo workspaces rename --from ide-diagnostics ide_diagnostics [all …]
|
| D | publish-libs.yaml | 23 - name: Install cargo-workspaces 24 run: cargo install cargo-workspaces 33 # Remove r-a crates from the workspaces so we don't auto-publish them as well 35 cargo workspaces publish --yes --exact --from-git --no-git-commit --allow-dirty
|
| /third_party/rust/rust/src/tools/rust-analyzer/crates/rust-analyzer/src/ |
| D | reload.rs | 149 for ws in self.workspaces.iter() { in current_status() 177 message.push_str("Failed to load workspaces.\n\n"); in current_status() 187 tracing::info!(%cause, "will fetch workspaces"); in fetch_workspaces() 206 let mut workspaces = linked_projects in fetch_workspaces() localVariable 228 while i < workspaces.len() { in fetch_workspaces() 229 if let Ok(w) = &workspaces[i] { in fetch_workspaces() 230 let dupes: Vec<_> = workspaces in fetch_workspaces() 239 _ = workspaces.remove(d); in fetch_workspaces() 246 workspaces.push(project_model::ProjectWorkspace::load_detached_files( in fetch_workspaces() 252 tracing::info!("did fetch workspaces {:?}", workspaces); in fetch_workspaces() [all …]
|
| D | global_state.rs | 89 /// `workspaces` field stores the data we actually use, while the `OpQueue` 114 pub(crate) workspaces: Arc<Vec<ProjectWorkspace>>, field 134 pub(crate) workspaces: Arc<Vec<ProjectWorkspace>>, field 200 workspaces: Arc::new(Vec::new()), in new() 341 workspaces: Arc::clone(&self.workspaces), in snapshot() 464 self.workspaces.iter().find_map(|ws| match ws { in cargo_target_for_crate_root()
|
| /third_party/mindspore/mindspore-src/source/mindspore/ccsrc/plugin/device/cpu/kernel/ |
| D | topk_cpu_kernel.cc | 33 const std::vector<KernelTensor *> &workspaces, in LaunchKernel() argument 47 size_t *workspace = GetDeviceAddress<size_t>(workspaces, 0); in LaunchKernel() 124 const std::vector<kernel::KernelTensor *> &workspaces, in Launch() argument 129 LaunchKernel<float16>(inputs, workspaces, outputs); in Launch() 131 LaunchKernel<float>(inputs, workspaces, outputs); in Launch() 133 LaunchKernel<int>(inputs, workspaces, outputs); in Launch() 135 LaunchKernel<uint32_t>(inputs, workspaces, outputs); in Launch() 137 LaunchKernel<int8_t>(inputs, workspaces, outputs); in Launch() 139 LaunchKernel<uint8_t>(inputs, workspaces, outputs); in Launch() 141 LaunchKernel<int16_t>(inputs, workspaces, outputs); in Launch() [all …]
|
| D | fused_ada_factor_cpu_kernel.cc | 126 const std::vector<KernelTensor *> &workspaces) const { in FactorUpdate() 131 auto r_factor = reinterpret_cast<float *>(workspaces[kWorkSpaceRFactorIndex]->device_ptr()); in FactorUpdate() 132 auto c_factor = reinterpret_cast<float *>(workspaces[kWorkSpaceCFactorIndex]->device_ptr()); in FactorUpdate() 205 const std::vector<KernelTensor *> &workspaces, in LaunchKernel() argument 217 auto update = reinterpret_cast<float *>(workspaces[kWorkSpaceUpdateIndex]->device_ptr()); in LaunchKernel() 245 FactorUpdate<T>(update, inputs, workspaces); in LaunchKernel() 288 const std::vector<kernel::KernelTensor *> &workspaces, in Launch() argument 300 CheckWorkspaceAddresses(workspaces); in Launch() 302 LaunchKernel<float16>(inputs, workspaces, outputs); in Launch() 304 LaunchKernel<float>(inputs, workspaces, outputs); in Launch() [all …]
|
| D | fused_ada_factor_cpu_kernel.h | 36 …ol Launch(const std::vector<KernelTensor *> &inputs, const std::vector<KernelTensor *> &workspaces, 45 void CheckWorkspaceAddresses(const std::vector<KernelTensor *> &workspaces) const; 48 …nchKernel(const std::vector<KernelTensor *> &inputs, const std::vector<KernelTensor *> &workspaces, 56 const std::vector<KernelTensor *> &workspaces) const;
|
| D | uniform_candidate_sampler_cpu_kernel.h | 47 …ol Launch(const std::vector<KernelTensor *> &inputs, const std::vector<KernelTensor *> &workspaces, in Launch() argument 49 return kernel_func_(this, inputs, workspaces, outputs); in Launch() 71 …nchKernel(const std::vector<KernelTensor *> &inputs, const std::vector<KernelTensor *> &workspaces,
|
| D | topk_cpu_kernel.h | 38 …ol Launch(const std::vector<KernelTensor *> &inputs, const std::vector<KernelTensor *> &workspaces, 46 …nchKernel(const std::vector<KernelTensor *> &inputs, const std::vector<KernelTensor *> &workspaces,
|
| /third_party/mindspore/mindspore-src/source/mindspore/ccsrc/plugin/device/gpu/kernel/nn/ |
| D | lamb_gpu_kernel.h | 47 // workspaces param index 65 …ol Launch(const std::vector<KernelTensor *> &inputs, const std::vector<KernelTensor *> &workspaces, in Launch() argument 81 float *update = GetDeviceAddress<float>(workspaces, kUpdateIndex); in Launch() 82 float *var_float = GetDeviceAddress<float>(workspaces, kVarFloatIndex); in Launch() 83 float *grad_float = GetDeviceAddress<float>(workspaces, kGradFloatIndex); in Launch() 84 float *g_hat_var = GetDeviceAddress<float>(workspaces, kGHatValIndex); in Launch() 91 CalcTrustRatio(workspaces, var_float, grad_float, g_hat_var, stream_ptr, &trust_ratio); in Launch() 93 float *trust_ratio_ptr = GetDeviceAddress<float>(workspaces, kTrustRatioIndex); in Launch() 264 …void CalcTrustRatio(const std::vector<KernelTensor *> &workspaces, float *var_float, float *grad_f… in CalcTrustRatio() argument 279 …float *reduce_workspace_addr = GetPossiblyNullDeviceAddress<float>(workspaces, kReduceWorkspaceInd… in CalcTrustRatio() [all …]
|
| /third_party/mindspore/mindspore-src/source/mindspore/ccsrc/plugin/device/gpu/kernel/random/ |
| D | random_choice_with_mask_gpu_kernel.cc | 84 const std::vector<KernelTensor *> &workspaces, in LaunchKernel() argument 95 S *index_buff = GetDeviceAddress<S>(workspaces, 0); in LaunchKernel() 96 S *mask_buff = GetDeviceAddress<S>(workspaces, 1); in LaunchKernel() 97 S *rank_buff = GetDeviceAddress<S>(workspaces, 2); in LaunchKernel() 98 S *Tnum_buff = GetDeviceAddress<S>(workspaces, 3); in LaunchKernel() 99 S *tmp_buff = GetDeviceAddress<S>(workspaces, 4); in LaunchKernel() 100 void *States = GetDeviceAddress<void *>(workspaces, 5); in LaunchKernel()
|
| D | random_categorical_gpu_kernel.h | 40 …ol Launch(const std::vector<KernelTensor *> &inputs, const std::vector<KernelTensor *> &workspaces, in Launch() argument 55 host_cdf[i] = GetDeviceAddress<double>(workspaces, i); in Launch() 58 host_rand[i] = GetDeviceAddress<double>(workspaces, batch_size_ + 1 + i); in Launch() 61 double **dev_cdf = GetDeviceAddress<double *>(workspaces, batch_size_); in Launch() 62 double **dev_rand = GetDeviceAddress<double *>(workspaces, batch_size_ * 2 + 1); in Launch()
|
| /third_party/mindspore/mindspore-src/source/mindspore/ccsrc/kernel/graph_kernel/kernel_packet/ |
| D | kernel_packet_kernel_mod.cc | 126 MS_LOG(DEBUG) << "Inner kernel workspaces size: " << workspace.size(); in Resize() 151 const std::vector<KernelTensor *> &workspaces, in Launch() argument 154 auto [inner_inputs, inner_workspaces] = GetLaunchArgs(inputs, workspaces, stream_ptr); in Launch() 171 … const std::vector<KernelTensor *> &workspaces, in GetLaunchArgs() argument 184 // set the device_ptr of workspaces to res_input in GetLaunchArgs() 185 res_inputs[i]->set_pointer_ref_count(workspaces[j]->pointer_ref_count()); in GetLaunchArgs() 197 MS_LOG(DEBUG) << "Worspaces size: " << workspaces.size(); in GetLaunchArgs() 199 …std::vector<KernelTensor *> res_workspace(workspaces.begin() + input_workspace_map_.size(), worksp… in GetLaunchArgs()
|
| /third_party/mindspore/mindspore-src/source/mindspore/ccsrc/plugin/device/gpu/kernel/debug/ |
| D | assert_gpu_kernel.h | 36 …ol Launch(const std::vector<KernelTensor *> &inputs, const std::vector<KernelTensor *> &workspaces, in Launch() argument 39 void **inputs_device = GetDeviceAddress<void *>(workspaces, 0); in Launch() 40 int *summarizes_device = GetDeviceAddress<int>(workspaces, 1); in Launch() 41 int *types_device = GetDeviceAddress<int>(workspaces, 2); in Launch()
|
| /third_party/parse5/ |
| D | package.json | 5 "workspaces": [ array 29 "build": "npm run build:esm && npm run build:cjs --workspaces --if-present", 46 "publish": "npm publish --workspaces",
|
| /third_party/mindspore/mindspore-src/source/mindspore/ccsrc/runtime/pynative/ |
| D | op_runner.cc | 381 std::vector<kernel::KernelTensor *> workspaces; in GetWorkspaceKernelTensors() local 391 (void)workspaces.emplace_back(device_address->kernel_tensor().get()); in GetWorkspaceKernelTensors() 392 MS_EXCEPTION_IF_NULL(workspaces.back()); in GetWorkspaceKernelTensors() 393 MS_LOG(DEBUG) << "workspace[" << i << "]:" << workspaces.back()->device_ptr() in GetWorkspaceKernelTensors() 394 << " size:" << workspaces.back()->size(); in GetWorkspaceKernelTensors() 396 return workspaces; in GetWorkspaceKernelTensors() 413 // Resize of workspaces, because of the dynamic size of workspace. in GetWorkspaceKernelTensors() 436 std::vector<kernel::KernelTensor *> workspaces = in GetWorkspaceKernelTensors() local 447 (void)workspaces.emplace_back(device_address->kernel_tensor().get()); in GetWorkspaceKernelTensors() 448 MS_LOG(DEBUG) << "workspace[" << i << "]:" << workspaces.back()->device_ptr() in GetWorkspaceKernelTensors() [all …]
|
| /third_party/grpc/tools/run_tests/helper_scripts/ |
| D | bundle_install_wrapper.sh | 31 # in different workspaces. That should work fine since the workspaces
|
| /third_party/mindspore/mindspore-src/source/mindspore/ccsrc/plugin/device/gpu/kernel/arrays/ |
| D | topk_gpu_kernel.h | 39 …ol Launch(const std::vector<KernelTensor *> &inputs, const std::vector<KernelTensor *> &workspaces, in Launch() argument 57 float *casted_float32_input = GetDeviceAddress<float>(workspaces, 0); in Launch() 58 float *casted_float32_top_k_output = GetDeviceAddress<float>(workspaces, 1); in Launch()
|
| /third_party/rust/rust/src/tools/rust-analyzer/crates/rust-analyzer/src/handlers/ |
| D | notification.rs | 125 // Re-fetch workspaces if a workspace related file has changed in handle_did_save_text_document() 208 state.fetch_workspaces_queue.request_op("client workspaces changed".to_string(), false) in handle_did_change_workspace_folders() 234 // Trigger flychecks for all workspaces that depend on the saved file in run_flycheck() 260 // Find all workspaces that have at least one target containing the saved file in run_flycheck() 261 let workspace_ids = world.workspaces.iter().enumerate().filter(|(_, ws)| match ws { in run_flycheck()
|
| /third_party/mindspore/mindspore-src/source/mindspore/ccsrc/plugin/device/ascend/kernel/pyboost/customize/ |
| D | identity.cc | 84 auto workspaces = PyBoostUtils::GetKernelTensorFromAddress(workspace_address); in IdentityCustomizeCallWithoutContigous() local 85 …if (!identity_kernel->Launch(input_kernel_tensors, workspaces, output_kernel_tensors, stream_ptr))… in IdentityCustomizeCallWithoutContigous() 132 auto workspaces = PyBoostUtils::GetKernelTensorFromAddress(workspace_address); in IdentityCustomizeCall() local 133 …if (!identity_kernel->Launch(input_kernel_tensors, workspaces, output_kernel_tensors, stream_ptr))… in IdentityCustomizeCall()
|
| /third_party/mindspore/mindspore-src/source/mindspore/ccsrc/plugin/device/gpu/kernel/rl/ |
| D | buffer_sample_gpu_kernel.cc | 85 const std::vector<KernelTensor *> &workspaces, in Launch() argument 106 auto indexes = GetDeviceAddress<unsigned int>(workspaces, 0); in Launch() 108 auto key = GetDeviceAddress<unsigned int>(workspaces, 1); in Launch()
|
| /third_party/rust/rust/src/tools/rust-analyzer/crates/rust-analyzer/src/bin/ |
| D | main.rs | 190 .map(|workspaces| { in run_server() 191 workspaces in run_server() 198 .filter(|workspaces| !workspaces.is_empty()) in run_server()
|
| /third_party/rust/rust/src/tools/rust-analyzer/editors/code/src/ |
| D | ctx.ts | 212 const workspaces: JsonProject[] = await Promise.all( constant 222 this.addToDiscoveredWorkspaces(workspaces); 228 // we only want to set discovered workspaces on the right key 367 addToDiscoveredWorkspaces(workspaces: JsonProject[]) { 368 for (const workspace of workspaces) {
|
| /third_party/rust/rust/src/tools/rust-analyzer/crates/project-model/src/ |
| D | build_scripts.rs | 163 /// This populates the outputs for all passed in workspaces. 166 workspaces: &[&CargoWorkspace], in run_once() 185 // some workspaces might depend on the same crates, so we need to duplicate the outputs in run_once() 188 let mut res: Vec<_> = workspaces in run_once() 210 cb(&workspaces[workspace][package].name, &mut res[workspace].outputs[package]); in run_once() 223 for (idx, workspace) in workspaces.iter().enumerate() { in run_once()
|
| /third_party/rust/rust/src/tools/rustfmt/src/cargo-fmt/test/ |
| D | targets.rs | 75 mod workspaces { module 113 "workspaces/path-dep-above", in assert_correct_targets_loaded()
|