自定义C++运算扩展TorchScript
Contact me
- Blog -> https://cugtyt.github.io/blog/index
- Email -> cugtyt@qq.com
- GitHub -> Cugtyt@GitHub
本系列博客主页及相关见此处
本文来自pytorch官网
实现自定义C++运算
这里实现一个透视转换,将OpenCV的函数作为TorchScript的运算,定义一个op.cpp
:
#include <opencv2/opencv.hpp>
#include <torch/script.h>
torch::Tensor warp_perspective(torch::Tensor image, torch::Tensor warp) {
cv::Mat image_mat(/*rows=*/image.size(0),
/*cols=*/image.size(1),
/*type=*/CV_32FC1,
/*data=*/image.data<float>());
cv::Mat warp_mat(/*rows=*/warp.size(0),
/*cols=*/warp.size(1),
/*type=*/CV_32FC1,
/*data=*/warp.data<float>());
cv::Mat output_mat;
cv::warpPerspective(image_mat, output_mat, warp_mat, /*dsize=*/{8, 8});
torch::Tensor output = torch::from_blob(output_mat.ptr<float>(), /*sizes=*/{8, 8});
return output.clone();
}
注意,TorchScript只支持一部分数值类型,
torch::Tensor
,torch::Scalar
,double
,int64_t
,std::vector
,不包括其他类型,没有float
,int
,short
,long
。
cv::Mat
可以直接获得PyTorch tensor的指针,无需拷贝。倒数第二部要把Mat
转为torch::Tensor
。最后一步要做拷贝,原因是,torch::from_blob
返回的张量并不拥有数据,数据依然在OpenCV里,因此为了获得可用的数据,返回的时候需要进行拷贝。
使用TorchScript注册自定义运算
static auto registry =
torch::RegisterOperators("my_ops::warp_perspective", &warp_perspective);
如果需要注册多个,可以链接.op()
:
static auto registry =
torch::RegisterOperators("my_ops::warp_perspective", &warp_perspective)
.op("my_ops::another_op", &another_op)
.op("my_ops::and_another_op", &and_another_op);
编译自定义运算
目录结构:
warp-perspective/
op.cpp
CMakeLists.txt
CMakeLists.txt:
cmake_minimum_required(VERSION 3.1 FATAL_ERROR)
project(warp_perspective)
find_package(Torch REQUIRED)
find_package(OpenCV REQUIRED)
# Define our library target
add_library(warp_perspective SHARED op.cpp)
# Enable C++11
target_compile_features(warp_perspective PRIVATE cxx_range_for)
# Link against LibTorch
target_link_libraries(warp_perspective "${TORCH_LIBRARIES}")
# Link against OpenCV
target_link_libraries(warp_perspective opencv_core opencv_imgproc)
mkdir build
cd build
cmake -DCMAKE_PREFIX_PATH=/path/to/libtorch ..
make -j
可以直接运行查看:
import torch
>>> torch.ops.load_library("/path/to/libwarp_perspective.so")
>>> print(torch.ops.my_ops.warp_perspective)
<built-in method my_ops::warp_perspective of PyCapsule object at 0x7f618fc6fa50>
在Python中使用
import torch
torch.ops.load_library("libwarp_perspective.so")
torch.ops.my_ops.warp_perspective(torch.randn(32, 32), torch.rand(3, 3))
tensor([[0.0000, 0.3218, 0.4611, ..., 0.4636, 0.4636, 0.4636],
[0.3746, 0.0978, 0.5005, ..., 0.4636, 0.4636, 0.4636],
[0.3245, 0.0169, 0.0000, ..., 0.4458, 0.4458, 0.4458],
...,
[0.1862, 0.1862, 0.1692, ..., 0.0000, 0.0000, 0.0000],
[0.1862, 0.1862, 0.1692, ..., 0.0000, 0.0000, 0.0000],
[0.1862, 0.1862, 0.1692, ..., 0.0000, 0.0000, 0.0000]])
跟踪自定义的运算
torch.ops.load_library("libwarp_perspective.so")
def compute(x, y, z):
x = torch.ops.my_ops.warp_perspective(x, torch.eye(3))
return x.matmul(y) + torch.relu(z)
inputs = [torch.randn(4, 8), torch.randn(8, 5), torch.randn(8, 5)]
trace = torch.jit.trace(compute, inputs)
print(trace.graph)
在脚本中使用
除了跟踪,另一个方法是直接在TorchScript中写代码,可以通过加入装饰@torch.jit.script
来做到:
torch.ops.load_library("libwarp_perspective.so")
@torch.jit.script
def compute(x, y):
if bool(x[0] == 42):
z = 5
else:
z = 10
x = torch.ops.my_ops.warp_perspective(x, torch.eye(3))
return x.matmul(y) + z
当前TorchScript的结果表示还在变动中,后续可能会有改变。
在C++中使用TorchScript的自定义运算
#include <torch/script.h> // One-stop header.
#include <iostream>
#include <memory>
int main(int argc, const char* argv[]) {
if (argc != 2) {
std::cerr << "usage: example-app <path-to-exported-script-module>\n";
return -1;
}
// Deserialize the ScriptModule from a file using torch::jit::load().
std::shared_ptr<torch::jit::script::Module> module = torch::jit::load(argv[1]);
std::vector<torch::jit::IValue> inputs;
inputs.push_back(torch::randn({4, 8}));
inputs.push_back(torch::randn({8, 5}));
torch::Tensor output = module->forward(std::move(inputs)).toTensor();
std::cout << output << std::endl;
}
cmake_minimum_required(VERSION 3.1 FATAL_ERROR)
project(example_app)
find_package(Torch REQUIRED)
add_executable(example_app main.cpp)
target_link_libraries(example_app "${TORCH_LIBRARIES}")
target_compile_features(example_app PRIVATE cxx_range_for)
mkdir build
cd build
cmake -DCMAKE_PREFIX_PATH=/path/to/
make -j
./example_app
序列化在Python中的运算:
torch.ops.load_library("libwarp_perspective.so")
@torch.jit.script
def compute(x, y):
if bool(x[0][0] == 42):
z = 5
else:
z = 10
x = torch.ops.my_ops.warp_perspective(x, torch.eye(3))
return x.matmul(y) + z
compute.save("example.pt")
./example_app example.pt
terminate called after throwing an instance of 'torch::jit::script::ErrorReport'
what():
Schema not found for node. File a bug report.
Node: %16 : Dynamic = my_ops::warp_perspective(%0, %19)
这样会有错误,因为还没有链接自定义的运算库,目录结构改成这样:
example_app/
CMakeLists.txt
main.cpp
warp_perspective/
CMakeLists.txt
op.cpp
外层的CMakeLists.txt:
cmake_minimum_required(VERSION 3.1 FATAL_ERROR)
project(example_app)
find_package(Torch REQUIRED)
add_subdirectory(warp_perspective)
add_executable(example_app main.cpp)
target_link_libraries(example_app "${TORCH_LIBRARIES}")
target_link_libraries(example_app -Wl,--no-as-needed warp_perspective)
target_compile_features(example_app PRIVATE cxx_range_for)
内层的CMakeLists.txt:
find_package(OpenCV REQUIRED)
add_library(warp_perspective SHARED op.cpp)
target_compile_features(warp_perspective PRIVATE cxx_range_for)
target_link_libraries(warp_perspective PRIVATE "${TORCH_LIBRARIES}")
target_link_libraries(warp_perspective PRIVATE opencv_core opencv_photo)
mkdir build
cd build
cmake -DCMAKE_PREFIX_PATH=/path/to/libtorch ..
make -j
./example_app example.pt
11.4125 5.8262 9.5345 8.6111 12.3997
7.4683 13.5969 9.0850 11.0698 9.4008
7.4597 15.0926 12.5727 8.9319 9.0666
9.4834 11.1747 9.0162 10.9521 8.6269
10.0000 10.0000 10.0000 10.0000 10.0000
10.0000 10.0000 10.0000 10.0000 10.0000
10.0000 10.0000 10.0000 10.0000 10.0000
10.0000 10.0000 10.0000 10.0000 10.0000
[ Variable[CPUFloatType]{8,5} ]
其他编译做定义运算的方法 - 使用JIT编译
使用JIT编译有两个好处。第一个好处是,可以依然在一个独立的文件中定义运算,然后使用torch.utils.cpp_extension.load()
编译扩展。
import torch.utils.cpp_extension
torch.utils.cpp_extension.load(
name="warp_perspective",
sources=["op.cpp"],
extra_ldflags=["-lopencv_core", "-lopencv_imgproc"],
is_python_module=False,
verbose=True
)
print(torch.ops.my_ops.warp_perspective)
第二个好处是,JIT编译允许传入字符串作为自定义运算,这时候可以使用torch.utils.cpp_extension.load_inline
:
import torch
import torch.utils.cpp_extension
op_source = """
#include <opencv2/opencv.hpp>
#include <torch/script.h>
torch::Tensor warp_perspective(torch::Tensor image, torch::Tensor warp) {
cv::Mat image_mat(/*rows=*/image.size(0),
/*cols=*/image.size(1),
/*type=*/CV_32FC1,
/*data=*/image.data<float>());
cv::Mat warp_mat(/*rows=*/warp.size(0),
/*cols=*/warp.size(1),
/*type=*/CV_32FC1,
/*data=*/warp.data<float>());
cv::Mat output_mat;
cv::warpPerspective(image_mat, output_mat, warp_mat, /*dsize=*/{64, 64});
torch::Tensor output =
torch::from_blob(output_mat.ptr<float>(), /*sizes=*/{64, 64});
return output.clone();
}
static auto registry =
torch::RegisterOperators("my_ops::warp_perspective", &warp_perspective);
"""
torch.utils.cpp_extension.load_inline(
name="warp_perspective",
cpp_sources=op_source,
extra_ldflags=["-lopencv_core", "-lopencv_imgproc"],
is_python_module=False,
verbose=True,
)
print(torch.ops.my_ops.warp_perspective)
其他编译做定义运算的方法 - 使用setuptools
只需要一个setup.py
文件:
from setuptools import setup
from torch.utils.cpp_extension import BuildExtension, CppExtension
setup(
name="warp_perspective",
ext_modules=[
CppExtension(
"warp_perspective",
["example_app/warp_perspective/op.cpp"],
libraries=["opencv_core", "opencv_imgproc"],
)
],
cmdclass={"build_ext": BuildExtension.with_options(no_python_abi_suffix=True)},
)
运行一下:
$ python setup.py build develop
running build
running build_ext
building 'warp_perspective' extension
creating build
creating build/temp.linux-x86_64-3.7
gcc -pthread -B /root/local/miniconda/compiler_compat -Wl,--sysroot=/ -Wsign-compare -DNDEBUG -g -fwrapv -O3 -Wall -Wstrict-prototypes -fPIC -I/root/local/miniconda/lib/python3.7/site-packages/torch/lib/include -I/root/local/miniconda/lib/python3.7/site-packages/torch/lib/include/torch/csrc/api/include -I/root/local/miniconda/lib/python3.7/site-packages/torch/lib/include/TH -I/root/local/miniconda/lib/python3.7/site-packages/torch/lib/include/THC -I/root/local/miniconda/include/python3.7m -c op.cpp -o build/temp.linux-x86_64-3.7/op.o -DTORCH_API_INCLUDE_EXTENSION_H -DTORCH_EXTENSION_NAME=warp_perspective -D_GLIBCXX_USE_CXX11_ABI=0 -std=c++11
cc1plus: warning: command line option ‘-Wstrict-prototypes’ is valid for C/ObjC but not for C++
creating build/lib.linux-x86_64-3.7
g++ -pthread -shared -B /root/local/miniconda/compiler_compat -L/root/local/miniconda/lib -Wl,-rpath=/root/local/miniconda/lib -Wl,--no-as-needed -Wl,--sysroot=/ build/temp.linux-x86_64-3.7/op.o -lopencv_core -lopencv_imgproc -o build/lib.linux-x86_64-3.7/warp_perspective.so
running develop
running egg_info
creating warp_perspective.egg-info
writing warp_perspective.egg-info/PKG-INFO
writing dependency_links to warp_perspective.egg-info/dependency_links.txt
writing top-level names to warp_perspective.egg-info/top_level.txt
writing manifest file 'warp_perspective.egg-info/SOURCES.txt'
reading manifest file 'warp_perspective.egg-info/SOURCES.txt'
writing manifest file 'warp_perspective.egg-info/SOURCES.txt'
running build_ext
copying build/lib.linux-x86_64-3.7/warp_perspective.so ->
Creating /root/local/miniconda/lib/python3.7/site-packages/warp-perspective.egg-link (link to .)
Adding warp-perspective 0.0.0 to easy-install.pth file
Installed /warp_perspective
Processing dependencies for warp-perspective==0.0.0
Finished processing dependencies for warp-perspective==0.0.0
使用:
import torch
torch.ops.load_library("warp_perspective.so")
print(torch.ops.custom.warp_perspective)
<built-in method custom::warp_perspective of PyCapsule object at 0x7ff51c5b7bd0>