From 3d318341936a9b0ccd52a961dd29c953ffe45098 Mon Sep 17 00:00:00 2001 From: yeliang2258 <30516196+yeliang2258@users.noreply.github.com> Date: Wed, 8 Mar 2023 13:47:40 +0800 Subject: [PATCH] [Doc] Fix docs for KunlunXin, A311D and issue template (#1551) fix docs --- .../\346\212\245\345\221\212issue.md" | 3 +-- docs/cn/build_and_install/a311d.md | 8 +++---- docs/cn/build_and_install/kunlunxin.md | 21 ++++++------------- docs/cn/build_and_install/rv1126.md | 6 +++--- docs/en/build_and_install/a311d.md | 2 +- docs/en/build_and_install/kunlunxin.md | 11 +--------- 6 files changed, 16 insertions(+), 35 deletions(-) mode change 100644 => 100755 ".github/ISSUE_TEMPLATE/\346\212\245\345\221\212issue.md" diff --git "a/.github/ISSUE_TEMPLATE/\346\212\245\345\221\212issue.md" "b/.github/ISSUE_TEMPLATE/\346\212\245\345\221\212issue.md" old mode 100644 new mode 100755 index 691d197cb5..cafba1dbc4 --- "a/.github/ISSUE_TEMPLATE/\346\212\245\345\221\212issue.md" +++ "b/.github/ISSUE_TEMPLATE/\346\212\245\345\221\212issue.md" @@ -21,7 +21,7 @@ assignees: '' ## 问题日志及出现问题的操作流程 - 附上详细的问题日志有助于快速定位分析 -- 【模型跑不同】 +- 【模型跑不通】 - - 先执行`examples`下的部署示例,包括使用examples提供的模型,确认是否可以正确执行 - - 如若`examples`下的代码可以运行,但自己的模型,或自己的代码不能运行 - - - 提供复现问题的 代码+模型+错误log,供工程师快速定位问题 @@ -33,4 +33,3 @@ assignees: '' - - 注意性能测试,循环跑N次,取后80%的用时平均(模型启动时,刚开始受限于资源分配,速度会较慢) - - FastDeploy的Predict包含模型本身之外的数据前后处理用时 - - - 提供复现问题的 代码+模型+错误log,供工程师快速定位问题 - diff --git a/docs/cn/build_and_install/a311d.md b/docs/cn/build_and_install/a311d.md index 7066ca9d56..03b0a05ef0 100755 --- a/docs/cn/build_and_install/a311d.md +++ b/docs/cn/build_and_install/a311d.md @@ -20,9 +20,9 @@ FastDeploy 基于 Paddle Lite 后端支持在晶晨 NPU 上进行部署推理。 相关编译选项说明如下: |编译选项|默认值|说明|备注| |:---|:---|:---|:---| -|ENABLE_LITE_BACKEND|OFF|编译A311D部署库时需要设置为ON| - | -|WITH_TIMVX|OFF|编译A311D部署库时需要设置为ON| - | -|TARGET_ABI|NONE|编译RK库时需要设置为arm64| - | +|ENABLE_LITE_BACKEND|OFF|编译 A311D 部署库时需要设置为 ON | - | +|WITH_TIMVX|OFF|编译 A311D 部署库时需要设置为 ON | - | +|TARGET_ABI|NONE|编译 A311D 库时需要设置为 arm64 | - | 更多编译选项请参考[FastDeploy编译选项说明](./README.md) @@ -100,7 +100,7 @@ dmesg | grep Galcore wget https://paddlelite-demo.bj.bcebos.com/devices/generic/PaddleLite-generic-demo.tar.gz tar -xf PaddleLite-generic-demo.tar.gz ``` -2. 使用 `uname -a` 查看 `Linux Kernel` 版本,确定为 `Linux` 系统 4.19.111 版本, +2. 使用 `uname -a` 查看 `Linux Kernel` 版本,确定为 `Linux` 系统 4.19.113 版本, 3. 将 `PaddleLite-generic-demo/libs/PaddleLite/linux/arm64/lib/verisilicon_timvx/viv_sdk_6_4_4_3/lib/a311d/4.9.113` 路径下的 `galcore.ko` 上传至开发板。 4. 登录开发板,命令行输入 `sudo rmmod galcore` 来卸载原始驱动,输入 `sudo insmod galcore.ko` 来加载传上设备的驱动。(是否需要 sudo 根据开发板实际情况,部分 adb 链接的设备请提前 adb root)。此步骤如果操作失败,请跳转至方法 2。 diff --git a/docs/cn/build_and_install/kunlunxin.md b/docs/cn/build_and_install/kunlunxin.md index 375626dab0..bffcb5cfcc 100755 --- a/docs/cn/build_and_install/kunlunxin.md +++ b/docs/cn/build_and_install/kunlunxin.md @@ -10,19 +10,14 @@ FastDeploy 基于 Paddle Lite 后端支持在昆仑芯 XPU 上进行部署推理 相关编译选项说明如下: |编译选项|默认值|说明|备注| |:---|:---|:---|:---| -| WITH_KUNLUNXIN| OFF | 需要在昆仑芯XPU上部署时需要设置为ON | - | -| ENABLE_ORT_BACKEND | OFF | 是否编译集成ONNX Runtime后端 | - | -| ENABLE_PADDLE_BACKEND | OFF | 是否编译集成Paddle Inference后端 | - | -| ENABLE_OPENVINO_BACKEND | OFF | 是否编译集成OpenVINO后端 | - | +| WITH_KUNLUNXIN| OFF | 需要在昆仑芯 XPU 上部署时需要设置为 ON | - | | ENABLE_VISION | OFF | 是否编译集成视觉模型的部署模块 | - | -| ENABLE_TEXT | OFF | 是否编译集成文本NLP模型的部署模块 | - | +| ENABLE_TEXT | OFF | 是否编译集成文本 NLP 模型的部署模块 | - | 第三方库依赖指定(不设定如下参数,会自动下载预编译库) | 选项 | 说明 | | :---------------------- | :--------------------------------------------------------------------------------------------- | -| ORT_DIRECTORY | 当开启ONNX Runtime后端时,用于指定用户本地的ONNX Runtime库路径;如果不指定,编译过程会自动下载ONNX Runtime库 | -| OPENCV_DIRECTORY | 当ENABLE_VISION=ON时,用于指定用户本地的OpenCV库路径;如果不指定,编译过程会自动下载OpenCV库 | -| OPENVINO_DIRECTORY | 当开启OpenVINO后端时, 用于指定用户本地的OpenVINO库路径;如果不指定,编译过程会自动下载OpenVINO库 | +| OPENCV_DIRECTORY | 当 ENABLE_VISION=ON 时,用于指定用户本地的 OpenCV 库路径;如果不指定,编译过程会自动下载 OpenCV 库 | 更多编译选项请参考[FastDeploy编译选项说明](./README.md) @@ -31,7 +26,7 @@ FastDeploy 基于 Paddle Lite 后端支持在昆仑芯 XPU 上进行部署推理 - gcc/g++: version >= 8.2 - cmake: version >= 3.15 -此外更推荐开发者自行安装,编译时通过`-DOPENCV_DIRECTORY`来指定环境中的OpenCV(如若不指定-DOPENCV_DIRECTORY,会自动下载FastDeploy提供的预编译的OpenCV,但在**Linux平台**无法支持Video的读取,以及imshow等可视化界面功能) +此外更推荐开发者自行安装,编译时通过 `-DOPENCV_DIRECTORY` 来指定环境中的 OpenCV(如若不指定 -DOPENCV_DIRECTORY,会自动下载 FastDeploy 提供的预编译的 OpenCV,但在 **Linux平台** 无法支持 Video 的读取,以及 imshow 等可视化界面功能) ``` sudo apt-get install libopencv-dev @@ -46,11 +41,9 @@ mkdir build && cd build # CMake configuration with KunlunXin xpu toolchain cmake -DWITH_KUNLUNXIN=ON \ -DWITH_GPU=OFF \ # 不编译 GPU - -DENABLE_ORT_BACKEND=ON \ # 可选择开启 ORT 后端 - -DENABLE_PADDLE_BACKEND=ON \ # 可选择开启 Paddle 后端 -DCMAKE_INSTALL_PREFIX=fastdeploy-kunlunxin \ -DENABLE_VISION=ON \ # 是否编译集成视觉模型的部署模块,可选择开启 - -DOPENCV_DIRECTORY=/usr/lib/x86_64-linux-gnu/cmake/opencv4 \ # 指定系统自带的opencv路径 + -DOPENCV_DIRECTORY=/usr/lib/x86_64-linux-gnu/cmake/opencv4 \ # 指定系统自带的 opencv 路径 .. # Build FastDeploy KunlunXin XPU C++ SDK @@ -66,10 +59,8 @@ git clone https://github.com/PaddlePaddle/FastDeploy.git cd FastDeploy/python export WITH_KUNLUNXIN=ON export WITH_GPU=OFF -export ENABLE_ORT_BACKEND=ON -export ENABLE_PADDLE_BACKEND=ON export ENABLE_VISION=ON -# OPENCV_DIRECTORY可选,不指定会自动下载FastDeploy提供的预编译OpenCV库 +# OPENCV_DIRECTORY 可选,不指定会自动下载 FastDeploy 提供的预编译 OpenCV 库 export OPENCV_DIRECTORY=/usr/lib/x86_64-linux-gnu/cmake/opencv4 python setup.py build diff --git a/docs/cn/build_and_install/rv1126.md b/docs/cn/build_and_install/rv1126.md index 090393e1d8..669050c308 100755 --- a/docs/cn/build_and_install/rv1126.md +++ b/docs/cn/build_and_install/rv1126.md @@ -20,9 +20,9 @@ FastDeploy基于 Paddle Lite 后端支持在瑞芯微(Rockchip)Soc 上进行 相关编译选项说明如下: |编译选项|默认值|说明|备注| |:---|:---|:---|:---| -|ENABLE_LITE_BACKEND|OFF|编译RK库时需要设置为ON| - | -|WITH_TIMVX|OFF|编译RK库时需要设置为ON| - | -|TARGET_ABI|NONE|编译RK库时需要设置为armhf| - | +|ENABLE_LITE_BACKEND|OFF|编译 RK 库时需要设置为 ON | - | +|WITH_TIMVX|OFF|编译 RK 库时需要设置为 ON | - | +|TARGET_ABI|NONE|编译 RK 库时需要设置为 armhf | - | 更多编译选项请参考[FastDeploy编译选项说明](./README.md) diff --git a/docs/en/build_and_install/a311d.md b/docs/en/build_and_install/a311d.md index 6d67147470..f431f55c28 100755 --- a/docs/en/build_and_install/a311d.md +++ b/docs/en/build_and_install/a311d.md @@ -88,7 +88,7 @@ There are two ways to modify the current NPU driver version: wget https://paddlelite-demo.bj.bcebos.com/devices/generic/PaddleLite-generic-demo.tar.gz tar -xf PaddleLite-generic-demo.tar.gz ``` -2. Use `uname -a` to check `Linux Kernel` version, it is determined to be version 4.19.111. +2. Use `uname -a` to check `Linux Kernel` version, it is determined to be version 4.19.113. 3. Upload `galcore.ko` under `PaddleLite-generic-demo/libs/PaddleLite/linux/arm64/lib/verisilicon_timvx/viv_sdk_6_4_4_3/lib/a311d/4.9.113` path to the development board. 4. Log in to the development board, enter `sudo rmmod galcore` on the command line to uninstall the original driver, and enter `sudo insmod galcore.ko` to load the uploaded device driver. (Whether sudo is needed depends on the actual situation of the development board. For some adb-linked devices, please adb root in advance). If this step fails, go to method 2. 5. Enter `dmesg | grep Galcore` in the development board to query the NPU driver version, and it is determined to be: 6.4.4.3 diff --git a/docs/en/build_and_install/kunlunxin.md b/docs/en/build_and_install/kunlunxin.md index 17f3251e8e..a384e0b8cb 100755 --- a/docs/en/build_and_install/kunlunxin.md +++ b/docs/en/build_and_install/kunlunxin.md @@ -9,20 +9,15 @@ This document describes how to compile the C++ FastDeploy library based on Paddl The relevant compilation options are described as follows: |Compile Options|Default Values|Description|Remarks| |:---|:---|:---|:---| -| ENABLE_LITE_BACKEND | OFF | It needs to be set to ON when compiling the RK library| - | +| ENABLE_LITE_BACKEND | OFF | It needs to be set to ON when compiling the KunlunXin XPU library| - | | WITH_KUNLUNXIN | OFF | It needs to be set to ON when compiling the KunlunXin XPU library| - | -| ENABLE_ORT_BACKEND | OFF | whether to intergrate ONNX Runtime backend | - | -| ENABLE_PADDLE_BACKEND | OFF | whether to intergrate Paddle Inference backend | - | -| ENABLE_OPENVINO_BACKEND | OFF | whether to intergrate OpenVINO backend | - | | ENABLE_VISION | OFF | whether to intergrate vision models | - | | ENABLE_TEXT | OFF | whether to intergrate text models | - | The configuration for third libraries(Optional, if the following option is not defined, the prebuilt third libraries will download automaticly while building FastDeploy). | Option | Description | | :---------------------- | :--------------------------------------------------------------------------------------------- | -| ORT_DIRECTORY | While ENABLE_ORT_BACKEND=ON, use ORT_DIRECTORY to specify your own ONNX Runtime library path. | | OPENCV_DIRECTORY | While ENABLE_VISION=ON, use OPENCV_DIRECTORY to specify your own OpenCV library path. | -| OPENVINO_DIRECTORY | While ENABLE_OPENVINO_BACKEND=ON, use OPENVINO_DIRECTORY to specify your own OpenVINO library path. | For more compilation options, please refer to [Description of FastDeploy compilation options](./README.md) @@ -46,8 +41,6 @@ mkdir build && cd build # CMake configuration with KunlunXin xpu toolchain cmake -DWITH_KUNLUNXIN=ON \ -DWITH_GPU=OFF \ - -DENABLE_ORT_BACKEND=ON \ - -DENABLE_PADDLE_BACKEND=ON \ -DCMAKE_INSTALL_PREFIX=fastdeploy-kunlunxin \ -DENABLE_VISION=ON \ -DOPENCV_DIRECTORY=/usr/lib/x86_64-linux-gnu/cmake/opencv4 \ @@ -66,8 +59,6 @@ git clone https://github.com/PaddlePaddle/FastDeploy.git cd FastDeploy/python export WITH_KUNLUNXIN=ON export WITH_GPU=OFF -export ENABLE_ORT_BACKEND=ON -export ENABLE_PADDLE_BACKEND=ON export ENABLE_VISION=ON # The OPENCV_DIRECTORY is optional, if not exported, a prebuilt OpenCV library will be downloaded export OPENCV_DIRECTORY=/usr/lib/x86_64-linux-gnu/cmake/opencv4