Skip to content

Commit

Permalink
Merge branch 'develop' into set_model_size
Browse files Browse the repository at this point in the history
  • Loading branch information
heliqi authored Mar 28, 2024
2 parents 0ae2909 + cc8d1f3 commit 15f0beb
Show file tree
Hide file tree
Showing 11 changed files with 14 additions and 12 deletions.
2 changes: 2 additions & 0 deletions c_api/fastdeploy_capi/core/fd_type.cc
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,8 @@ DECLARE_AND_IMPLEMENT_FD_TYPE_ONEDIMARRAY(OneDimArrayFloat)
DECLARE_AND_IMPLEMENT_FD_TYPE_ONEDIMARRAY(Cstr)
// FD_C_OneDimArrayCstr
DECLARE_AND_IMPLEMENT_FD_TYPE_TWODIMARRAY(OneDimArrayCstr, Cstr)
// FD_C_TwoDimArrayCstr
DECLARE_AND_IMPLEMENT_FD_TYPE_THREEDIMARRAY(TwoDimArrayCstr,OneDimArrayCstr)
// FD_C_TwoDimArraySize
DECLARE_AND_IMPLEMENT_FD_TYPE_TWODIMARRAY(TwoDimArraySize, OneDimArraySize)
// FD_C_TwoDimArrayInt8
Expand Down
2 changes: 1 addition & 1 deletion fastdeploy/function/gaussian_random.h
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ namespace function {
@param seed The seed of random generator.
@param dtype The data type of the output Tensor.
*/
void GaussianRandom(const std::vector<int64_t>& shape, FDTensor* out,
FASTDEPLOY_DECL void GaussianRandom(const std::vector<int64_t>& shape, FDTensor* out,
FDDataType dtype = FDDataType::FP32, float mean = 0.0f,
float std = 1.0f, int seed = 0);

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@ bool YOLOv5Postprocessor::Run(const std::vector<FDTensor>& tensors, std::vector<
}

if ((*results)[bs].boxes.size() == 0) {
return true;
continue;
}

utils::NMS(&((*results)[bs]), nms_threshold_);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -105,7 +105,7 @@ bool YOLOv5SegPostprocessor::Run(
}

if ((*results)[bs].boxes.size() == 0) {
return true;
continue;
}
// get box index after nms
std::vector<int> index;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ bool YOLOv7Postprocessor::Run(const std::vector<FDTensor>& tensors, std::vector<
}

if ((*results)[bs].boxes.size() == 0) {
return true;
continue;
}

utils::NMS(&((*results)[bs]), nms_threshold_);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,7 @@ bool YOLOv8Postprocessor::Run(
}

if ((*results)[bs].boxes.size() == 0) {
return true;
continue;
}

utils::NMS(&((*results)[bs]), nms_threshold_);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -114,7 +114,7 @@ bool CenterFacePostprocessor::Run(const std::vector<FDTensor>& infer_result,
}

if ((*results)[bs].boxes.size() == 0) {
return true;
continue;
}

utils::NMS(&((*results)[bs]), nms_threshold_);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@ bool Yolov7FacePostprocessor::Run(const std::vector<FDTensor>& infer_result,
}

if ((*results)[bs].boxes.size() == 0) {
return true;
continue;
}

utils::NMS(&((*results)[bs]), nms_threshold_);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -101,7 +101,7 @@ void GetFinalPredictions(const std::vector<float>& heatmap,
coords[j * 2 + 1] = idx / heatmap_width;
int px = int(coords[j * 2] + 0.5);
int py = int(coords[j * 2 + 1] + 0.5);
if (DARK && px > 1 && px < heatmap_width - 2) {
if (DARK && px > 1 && px < heatmap_width - 2 && py > 1 && py < heatmap_height - 2) {
utils::DarkParse(heatmap, dim, &coords, px, py, index, j);
} else {
if (px > 0 && px < heatmap_width - 1) {
Expand Down
4 changes: 2 additions & 2 deletions serving/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -22,15 +22,15 @@ FastDeploy builds an end-to-end serving deployment based on [Triton Inference Se
CPU images only support Paddle/ONNX models for serving deployment on CPUs, and supported inference backends include OpenVINO, Paddle Inference, and ONNX Runtime

```shell
docker pull registry.baidubce.com/paddlepaddle/fastdeploy:1.0.4-cpu-only-21.10
docker pull registry.baidubce.com/paddlepaddle/fastdeploy:1.0.7-cpu-only-21.10
```

#### GPU Image

GPU images support Paddle/ONNX models for serving deployment on GPU and CPU, and supported inference backends including OpenVINO, TensorRT, Paddle Inference, and ONNX Runtime

```
docker pull registry.baidubce.com/paddlepaddle/fastdeploy:1.0.4-gpu-cuda11.4-trt8.5-21.10
docker pull registry.baidubce.com/paddlepaddle/fastdeploy:1.0.7-gpu-cuda11.4-trt8.5-21.10
```

Users can also compile the image by themselves according to their own needs, referring to the following documents:
Expand Down
4 changes: 2 additions & 2 deletions serving/README_CN.md
Original file line number Diff line number Diff line change
Expand Up @@ -18,13 +18,13 @@ FastDeploy基于[Triton Inference Server](https://github.com/triton-inference-se
#### CPU镜像
CPU镜像仅支持Paddle/ONNX模型在CPU上进行服务化部署,支持的推理后端包括OpenVINO、Paddle Inference和ONNX Runtime
``` shell
docker pull registry.baidubce.com/paddlepaddle/fastdeploy:1.0.4-cpu-only-21.10
docker pull registry.baidubce.com/paddlepaddle/fastdeploy:1.0.7-cpu-only-21.10
```

#### GPU镜像
GPU镜像支持Paddle/ONNX模型在GPU/CPU上进行服务化部署,支持的推理后端包括OpenVINO、TensorRT、Paddle Inference和ONNX Runtime
```
docker pull registry.baidubce.com/paddlepaddle/fastdeploy:1.0.4-gpu-cuda11.4-trt8.5-21.10
docker pull registry.baidubce.com/paddlepaddle/fastdeploy:1.0.7-gpu-cuda11.4-trt8.5-21.10
```

用户也可根据自身需求,参考如下文档自行编译镜像
Expand Down

0 comments on commit 15f0beb

Please sign in to comment.