//===- LowerGpuOpsToROCDLOps.cpp - MLIR GPU to ROCDL lowering passes ------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file implements a pass to generate ROCDLIR operations for higher-level // GPU operations. // //===----------------------------------------------------------------------===// #include "mlir/Conversion/GPUToROCDL/GPUToROCDLPass.h" #include "mlir/Conversion/StandardToLLVM/ConvertStandardToLLVMPass.h" #include "mlir/Conversion/VectorToLLVM/ConvertVectorToLLVM.h" #include "mlir/Conversion/VectorToROCDL/VectorToROCDL.h" #include "mlir/Dialect/GPU/GPUDialect.h" #include "mlir/Dialect/GPU/Passes.h" #include "mlir/Dialect/LLVMIR/ROCDLDialect.h" #include "mlir/Dialect/Vector/VectorOps.h" #include "mlir/Pass/Pass.h" #include "mlir/Transforms/DialectConversion.h" #include "mlir/Transforms/GreedyPatternRewriteDriver.h" #include "llvm/Support/FormatVariadic.h" #include "../GPUCommon/GPUOpsLowering.h" #include "../GPUCommon/IndexIntrinsicsOpLowering.h" #include "../GPUCommon/OpToFuncCallLowering.h" #include "../PassDetail.h" using namespace mlir; namespace { /// Import the GPU Ops to ROCDL Patterns. #include "GPUToROCDL.cpp.inc" // A pass that replaces all occurrences of GPU device operations with their // corresponding ROCDL equivalent. // // This pass only handles device code and is not meant to be run on GPU host // code. struct LowerGpuOpsToROCDLOpsPass : public ConvertGpuOpsToROCDLOpsBase { LowerGpuOpsToROCDLOpsPass() = default; LowerGpuOpsToROCDLOpsPass(unsigned indexBitwidth) { this->indexBitwidth = indexBitwidth; } void runOnOperation() override { gpu::GPUModuleOp m = getOperation(); /// Customize the bitwidth used for the device side index computations. LowerToLLVMOptions options = {/*useBarePtrCallConv =*/false, /*emitCWrappers =*/true, /*indexBitwidth =*/indexBitwidth, /*useAlignedAlloc =*/false}; LLVMTypeConverter converter(m.getContext(), options); OwningRewritePatternList patterns, llvmPatterns; populateGpuRewritePatterns(m.getContext(), patterns); applyPatternsAndFoldGreedily(m, std::move(patterns)); populateVectorToLLVMConversionPatterns(converter, llvmPatterns); populateVectorToROCDLConversionPatterns(converter, llvmPatterns); populateStdToLLVMConversionPatterns(converter, llvmPatterns); populateGpuToROCDLConversionPatterns(converter, llvmPatterns); LLVMConversionTarget target(getContext()); target.addIllegalDialect(); target.addIllegalOp(); target.addIllegalOp(); target.addLegalDialect(); // TODO: Remove once we support replacing non-root ops. target.addLegalOp(); if (failed(applyPartialConversion(m, target, std::move(llvmPatterns)))) signalPassFailure(); } }; } // anonymous namespace void mlir::populateGpuToROCDLConversionPatterns( LLVMTypeConverter &converter, OwningRewritePatternList &patterns) { populateWithGenerated(converter.getDialect()->getContext(), patterns); patterns.insert< GPUIndexIntrinsicOpLowering, GPUIndexIntrinsicOpLowering, GPUIndexIntrinsicOpLowering, GPUIndexIntrinsicOpLowering, GPUFuncOpLowering<5>, GPUReturnOpLowering>(converter); patterns.insert>(converter, "__ocml_fabs_f32", "__ocml_fabs_f64"); patterns.insert>(converter, "__ocml_ceil_f32", "__ocml_ceil_f64"); patterns.insert>(converter, "__ocml_cos_f32", "__ocml_cos_f64"); patterns.insert>(converter, "__ocml_exp_f32", "__ocml_exp_f64"); patterns.insert>(converter, "__ocml_floor_f32", "__ocml_floor_f64"); patterns.insert>(converter, "__ocml_log_f32", "__ocml_log_f64"); patterns.insert>(converter, "__ocml_log10_f32", "__ocml_log10_f64"); patterns.insert>(converter, "__ocml_log2_f32", "__ocml_log2_f64"); patterns.insert>(converter, "__ocml_sin_f32", "__ocml_sin_f64"); patterns.insert>(converter, "__ocml_tanh_f32", "__ocml_tanh_f64"); } std::unique_ptr> mlir::createLowerGpuOpsToROCDLOpsPass(unsigned indexBitwidth) { return std::make_unique(indexBitwidth); }