Use ncnn to deploy pytorch model to Android phone Open graphics card support when compiling NCNN. Vulkan is for gpu -DNCNN_VULKAN=ON MobileNetV3 Open CMAKE 0091 feature when compiling into MT cmake_minimum_required(VERSION 3.20.0) cmake_policy(SET CMP0091 NEW) set(CMAKE_MSVC_RUNTIME_LIBRARY "MultiThreaded$<$<CONFIG:Debug>:Debug>") project("client-project") Train YOLO \Envs\torch\Scripts\activate.ps1 python train.py --batch 6 --workers 2 --imgsz 960 --epochs 300 --data "\Core\yaml\data.yaml" --cfg "\Core\yaml\cfg.yaml" --weights \ Core\weights\best.pt --device 0 Conversion model from torch import nn import torch.utils.model_zoo as model_zoo import torch.onnx from libs import define from libs.net import Net from libs.dataset import ImageDataset import os test_data = ImageDataset(define.testPath,False) test_loader = torch.utils.data.DataLoader( test_data, batch_size=1, shuffle=True) device = torch.device("cuda" if torch.cuda.is_available() else "cpu") model = Net(out_dim=19).to(device) model.load_state_dict(torch.load( "./widget/last.pt" )) model.eval() def saveOnnx(): for data, target in test_loader: data, target = data.to(device), target.to(device) label = target.long() y = model(data) # Export the model torch.onnx.export(model, # model being run data, # model input (or a tuple for multiple inputs) "./widget/best.onnx", # where to save the model (can be a file or file-like object) export_params=True, # store the trained parameter weights inside the model file opset_version=10, # the ONNX version to export the model to do_constant_folding=True, # whether to execute constant folding for optimization input_names = ['input'], # the model's input names output_names = ['output'], # the model's output names dynamic_axes={'input': {0:'batch_size'}, # variable lenght axes 'output': {0:'batch_size'}}) traced_script_module = torch.jit.trace(model, data) return saveOnnx() # Conversion os.system("python -m onnxsim ./widget/best.onnx ./widgetbest-sim.onnx") os.system("./bin/onnx2ncnn.exe ./widget/best-sim.onnx ./widget/best.param ./widget/best.bin") os.system("./bin/ncnnoptimize.exe ./widget/best.param ./widget/best.bin ./widget/best-opt.param ./widget/best-opt.bin 65536") python .\export.py --weights weights/best.pt --img 960 --batch 1 --train python -m onnxsim best.onnx best-sim.onnx .\onnx2ncnn.exe best-sim.onnx best.param best.bin ncnnoptimize best.param best.bin best-opt.param best-opt.bin 65536 Git clone ncnn repo with submodule $ git clone https://github.com/Tencent/ncnn.git $ cd ncnn $ git submodule update --init Build for Linux / NVIDIA Jetson / Raspberry Pi Build for Windows x64 using VS2017 Build for macOS Build for ARM Cortex-A family with cross-compiling Build for Hisilicon platform with cross-compiling Build for Android Build for iOS on macOS with xcode Build for WebAssembly Build for AllWinner D1 Build for Loongson 2K1000 Build for Termux on Android Build for Linux Install required build dependencies:
...