#include #ifdef USE_CUDA #include #endif #include #include #include namespace torch::inductor { void initAOTIRunnerBindings(PyObject* module) { auto rootModule = py::handle(module).cast(); auto m = rootModule.def_submodule("_aoti"); py::class_(m, "AOTIModelContainerRunnerCpu") .def(py::init()) .def("run", &AOTIModelContainerRunnerCpu::run) .def("get_call_spec", &AOTIModelContainerRunnerCpu::get_call_spec) .def( "get_constant_names_to_original_fqns", &AOTIModelContainerRunnerCpu::getConstantNamesToOriginalFQNs) .def( "get_constant_names_to_dtypes", &AOTIModelContainerRunnerCpu::getConstantNamesToDtypes); #ifdef USE_CUDA py::class_(m, "AOTIModelContainerRunnerCuda") .def(py::init()) .def(py::init()) .def(py::init< const std::string&, int, const std::string&, const std::string&>()) .def("run", &AOTIModelContainerRunnerCuda::run) .def("get_call_spec", &AOTIModelContainerRunnerCuda::get_call_spec) .def( "get_constant_names_to_original_fqns", &AOTIModelContainerRunnerCuda::getConstantNamesToOriginalFQNs) .def( "get_constant_names_to_dtypes", &AOTIModelContainerRunnerCuda::getConstantNamesToDtypes); #endif m.def( "unsafe_alloc_void_ptrs_from_tensors", [](std::vector& tensors) { std::vector handles = torch::aot_inductor::unsafe_alloc_new_handles_from_tensors(tensors); std::vector result( reinterpret_cast(handles.data()), reinterpret_cast(handles.data()) + handles.size()); return result; }); m.def("unsafe_alloc_void_ptr_from_tensor", [](at::Tensor& tensor) { return reinterpret_cast( torch::aot_inductor::new_tensor_handle(std::move(tensor))); }); m.def( "alloc_tensors_by_stealing_from_void_ptrs", [](std::vector& raw_handles) { return torch::aot_inductor::alloc_tensors_by_stealing_from_handles( reinterpret_cast(raw_handles.data()), raw_handles.size()); }); m.def("alloc_tensor_by_stealing_from_void_ptr", [](void* raw_handle) { return *torch::aot_inductor::tensor_handle_to_tensor_pointer( reinterpret_cast(raw_handle)); }); } } // namespace torch::inductor