diff --git a/src/layer/mips/requantize_leakyrelu_pack4.h b/src/layer/mips/requantize_leakyrelu_pack4.h
deleted file mode 100644
index 89bc14bd08b..00000000000
--- a/src/layer/mips/requantize_leakyrelu_pack4.h
+++ /dev/null
@@ -1,267 +0,0 @@
-// Tencent is pleased to support the open source community by making ncnn available.
-//
-// Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved.
-//
-// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
-// in compliance with the License. You may obtain a copy of the License at
-//
-// https://opensource.org/licenses/BSD-3-Clause
-//
-// Unless required by applicable law or agreed to in writing, software distributed
-// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
-// CONDITIONS OF ANY KIND, either express or implied. See the License for the
-// specific language governing permissions and limitations under the License.
-
-static void requantize_leakyrelu_pack4_msa(const Mat& bottom_blob, Mat& top_blob, const Mat& scale_in_data, const Mat& scale_out_data, const Mat& bias_data, float slope, const Option& opt)
-{
-    int w = bottom_blob.w;
-    int h = bottom_blob.h;
-    int channels = bottom_blob.c;
-    int size = w * h;
-    int outc = top_blob.c;
-    int out_elempack = top_blob.elempack;
-
-    int scale_in_data_size = scale_in_data.w;
-    int scale_out_data_size = scale_out_data.w;
-    int bias_data_size = bias_data.w;
-
-    // int8(leakyrelu(v * scale_in, slope) * scale_out)
-    // int8_leakyrelu(v * (scale_in * scale_out), slope)
-
-    // int8(leakyrelu(v * scale_in + bias, slope) * scale_out)
-    // int8_leakyrelu(v * (scale_in * scale_out) + (bias * scale_out), slope)
-
-    if (out_elempack == 8)
-    {
-        if (bias_data_size == 0)
-        {
-            #pragma omp parallel for num_threads(opt.num_threads)
-            for (int q = 0; q < outc; q++)
-            {
-                const int* intptr0 = bottom_blob.channel(q * 2);
-                const int* intptr1 = bottom_blob.channel(q * 2 + 1);
-                signed char* ptr = top_blob.channel(q);
-
-                v4f32 _scale_in0 = scale_in_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_in_data[0]) : (v4f32)__msa_ld_w((const float*)scale_in_data + q * 8, 0);
-                v4f32 _scale_in1 = scale_in_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_in_data[0]) : (v4f32)__msa_ld_w((const float*)scale_in_data + q * 8 + 4, 0);
-                v4f32 _scale_out0 = scale_out_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_out_data[0]) : (v4f32)__msa_ld_w((const float*)scale_out_data + q * 8, 0);
-                v4f32 _scale_out1 = scale_out_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_out_data[0]) : (v4f32)__msa_ld_w((const float*)scale_out_data + q * 8 + 4, 0);
-
-                v4f32 _scale0 = __msa_fmul_w(_scale_in0, _scale_out0);
-                v4f32 _scale1 = __msa_fmul_w(_scale_in1, _scale_out1);
-                v4f32 _slope = (v4f32)__msa_fill_w_f32(slope);
-
-                int i = 0;
-                for (; i + 3 < size; i += 4)
-                {
-                    __builtin_prefetch(intptr0 + 64);
-                    __builtin_prefetch(intptr1 + 64);
-                    v4f32 _v00 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr0, 0));
-                    v4f32 _v01 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr0 + 4, 0));
-                    v4f32 _v02 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr0 + 8, 0));
-                    v4f32 _v03 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr0 + 12, 0));
-                    v4f32 _v10 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr1, 0));
-                    v4f32 _v11 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr1 + 4, 0));
-                    v4f32 _v12 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr1 + 8, 0));
-                    v4f32 _v13 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr1 + 12, 0));
-                    _v00 = __msa_fmul_w(_v00, _scale0);
-                    _v01 = __msa_fmul_w(_v01, _scale0);
-                    _v02 = __msa_fmul_w(_v02, _scale0);
-                    _v03 = __msa_fmul_w(_v03, _scale0);
-                    _v10 = __msa_fmul_w(_v10, _scale1);
-                    _v11 = __msa_fmul_w(_v11, _scale1);
-                    _v12 = __msa_fmul_w(_v12, _scale1);
-                    _v13 = __msa_fmul_w(_v13, _scale1);
-                    *((int64_t*)ptr) = float2int8leakyrelu(_v00, _v10, _slope);
-                    *((int64_t*)(ptr + 8)) = float2int8leakyrelu(_v01, _v11, _slope);
-                    *((int64_t*)(ptr + 16)) = float2int8leakyrelu(_v02, _v12, _slope);
-                    *((int64_t*)(ptr + 24)) = float2int8leakyrelu(_v03, _v13, _slope);
-
-                    intptr0 += 16;
-                    intptr1 += 16;
-                    ptr += 32;
-                }
-                for (; i < size; i++)
-                {
-                    __builtin_prefetch(intptr0 + 16);
-                    __builtin_prefetch(intptr1 + 16);
-                    v4f32 _v0 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr0, 0));
-                    v4f32 _v1 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr1, 0));
-                    _v0 = __msa_fmul_w(_v0, _scale0);
-                    _v1 = __msa_fmul_w(_v1, _scale1);
-                    *((int64_t*)ptr) = float2int8leakyrelu(_v0, _v1, _slope);
-
-                    intptr0 += 4;
-                    intptr1 += 4;
-                    ptr += 8;
-                }
-            }
-        }
-        else
-        {
-            #pragma omp parallel for num_threads(opt.num_threads)
-            for (int q = 0; q < outc; q++)
-            {
-                const int* intptr0 = bottom_blob.channel(q * 2);
-                const int* intptr1 = bottom_blob.channel(q * 2 + 1);
-                signed char* ptr = top_blob.channel(q);
-
-                v4f32 _scale_in0 = scale_in_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_in_data[0]) : (v4f32)__msa_ld_w((const float*)scale_in_data + q * 8, 0);
-                v4f32 _scale_in1 = scale_in_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_in_data[0]) : (v4f32)__msa_ld_w((const float*)scale_in_data + q * 8 + 4, 0);
-                v4f32 _scale_out0 = scale_out_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_out_data[0]) : (v4f32)__msa_ld_w((const float*)scale_out_data + q * 8, 0);
-                v4f32 _scale_out1 = scale_out_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_out_data[0]) : (v4f32)__msa_ld_w((const float*)scale_out_data + q * 8 + 4, 0);
-                v4f32 _bias0 = bias_data_size == 1 ? (v4f32)__msa_fill_w_f32(bias_data[0]) : (v4f32)__msa_ld_w((const float*)bias_data + q * 8, 0);
-                v4f32 _bias1 = bias_data_size == 1 ? (v4f32)__msa_fill_w_f32(bias_data[0]) : (v4f32)__msa_ld_w((const float*)bias_data + q * 8 + 4, 0);
-
-                v4f32 _scale0 = __msa_fmul_w(_scale_in0, _scale_out0);
-                v4f32 _scale1 = __msa_fmul_w(_scale_in1, _scale_out1);
-                _bias0 = __msa_fmul_w(_bias0, _scale_out0);
-                _bias1 = __msa_fmul_w(_bias1, _scale_out1);
-                v4f32 _slope = (v4f32)__msa_fill_w_f32(slope);
-
-                int i = 0;
-                for (; i + 3 < size; i += 4)
-                {
-                    __builtin_prefetch(intptr0 + 64);
-                    __builtin_prefetch(intptr1 + 64);
-                    v4f32 _v00 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr0, 0));
-                    v4f32 _v01 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr0 + 4, 0));
-                    v4f32 _v02 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr0 + 8, 0));
-                    v4f32 _v03 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr0 + 12, 0));
-                    v4f32 _v10 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr1, 0));
-                    v4f32 _v11 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr1 + 4, 0));
-                    v4f32 _v12 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr1 + 8, 0));
-                    v4f32 _v13 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr1 + 12, 0));
-                    _v00 = __msa_fmadd_w(_bias0, _v00, _scale0);
-                    _v01 = __msa_fmadd_w(_bias0, _v01, _scale0);
-                    _v02 = __msa_fmadd_w(_bias0, _v02, _scale0);
-                    _v03 = __msa_fmadd_w(_bias0, _v03, _scale0);
-                    _v10 = __msa_fmadd_w(_bias1, _v10, _scale1);
-                    _v11 = __msa_fmadd_w(_bias1, _v11, _scale1);
-                    _v12 = __msa_fmadd_w(_bias1, _v12, _scale1);
-                    _v13 = __msa_fmadd_w(_bias1, _v13, _scale1);
-                    *((int64_t*)ptr) = float2int8leakyrelu(_v00, _v10, _slope);
-                    *((int64_t*)(ptr + 8)) = float2int8leakyrelu(_v01, _v11, _slope);
-                    *((int64_t*)(ptr + 16)) = float2int8leakyrelu(_v02, _v12, _slope);
-                    *((int64_t*)(ptr + 24)) = float2int8leakyrelu(_v03, _v13, _slope);
-
-                    intptr0 += 16;
-                    intptr1 += 16;
-                    ptr += 32;
-                }
-                for (; i + 1 < size; i += 2)
-                {
-                    __builtin_prefetch(intptr0 + 32);
-                    __builtin_prefetch(intptr1 + 32);
-                    v4f32 _v00 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr0, 0));
-                    v4f32 _v01 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr0 + 4, 0));
-                    v4f32 _v10 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr1, 0));
-                    v4f32 _v11 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr1 + 4, 0));
-                    _v00 = __msa_fmadd_w(_bias0, _v00, _scale0);
-                    _v01 = __msa_fmadd_w(_bias0, _v01, _scale0);
-                    _v10 = __msa_fmadd_w(_bias1, _v10, _scale1);
-                    _v11 = __msa_fmadd_w(_bias1, _v11, _scale1);
-                    *((int64_t*)ptr) = float2int8leakyrelu(_v00, _v10, _slope);
-                    *((int64_t*)(ptr + 8)) = float2int8leakyrelu(_v01, _v11, _slope);
-
-                    intptr0 += 8;
-                    intptr1 += 8;
-                    ptr += 16;
-                }
-                for (; i < size; i++)
-                {
-                    __builtin_prefetch(intptr0 + 16);
-                    __builtin_prefetch(intptr1 + 16);
-                    v4f32 _v0 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr0, 0));
-                    v4f32 _v1 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr1, 0));
-                    _v0 = __msa_fmadd_w(_bias0, _v0, _scale0);
-                    _v1 = __msa_fmadd_w(_bias1, _v1, _scale1);
-                    *((int64_t*)ptr) = float2int8leakyrelu(_v0, _v1, _slope);
-
-                    intptr0 += 4;
-                    intptr1 += 4;
-                    ptr += 8;
-                }
-            }
-        }
-    }
-    if (out_elempack == 1)
-    {
-        if (bias_data_size == 0)
-        {
-            #pragma omp parallel for num_threads(opt.num_threads)
-            for (int q = 0; q < channels; q++)
-            {
-                const int* intptr = bottom_blob.channel(q);
-                signed char* ptr0 = top_blob.channel(q * 4);
-                signed char* ptr1 = top_blob.channel(q * 4 + 1);
-                signed char* ptr2 = top_blob.channel(q * 4 + 2);
-                signed char* ptr3 = top_blob.channel(q * 4 + 3);
-
-                v4f32 _scale_in = scale_in_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_in_data[0]) : (v4f32)__msa_ld_w((const float*)scale_in_data + q * 4, 0);
-                v4f32 _scale_out = scale_out_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_out_data[0]) : (v4f32)__msa_ld_w((const float*)scale_out_data + q * 4, 0);
-
-                v4f32 _scale = __msa_fmul_w(_scale_in, _scale_out);
-                v4f32 _slope = (v4f32)__msa_fill_w_f32(slope);
-
-                int i = 0;
-                for (; i < size; i++)
-                {
-                    __builtin_prefetch(intptr + 16);
-                    v4f32 _v = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr, 0));
-                    _v = __msa_fmul_w(_v, _scale);
-                    v16i8 v = float2int8leakyrelu(_v, _slope);
-                    ptr0[0] = v[0];
-                    ptr1[0] = v[1];
-                    ptr2[0] = v[2];
-                    ptr3[0] = v[3];
-
-                    intptr += 4;
-                    ptr0 += 1;
-                    ptr1 += 1;
-                    ptr2 += 1;
-                    ptr3 += 1;
-                }
-            }
-        }
-        else
-        {
-            #pragma omp parallel for num_threads(opt.num_threads)
-            for (int q = 0; q < channels; q++)
-            {
-                const int* intptr = bottom_blob.channel(q);
-                signed char* ptr0 = top_blob.channel(q * 4);
-                signed char* ptr1 = top_blob.channel(q * 4 + 1);
-                signed char* ptr2 = top_blob.channel(q * 4 + 2);
-                signed char* ptr3 = top_blob.channel(q * 4 + 3);
-
-                v4f32 _scale_in = scale_in_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_in_data[0]) : (v4f32)__msa_ld_w((const float*)scale_in_data + q * 4, 0);
-                v4f32 _scale_out = scale_out_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_out_data[0]) : (v4f32)__msa_ld_w((const float*)scale_out_data + q * 4, 0);
-                v4f32 _bias = bias_data_size == 1 ? (v4f32)__msa_fill_w_f32(bias_data[0]) : (v4f32)__msa_ld_w((const float*)bias_data + q * 4, 0);
-
-                v4f32 _scale = __msa_fmul_w(_scale_in, _scale_out);
-                _bias = __msa_fmul_w(_bias, _scale_out);
-                v4f32 _slope = (v4f32)__msa_fill_w_f32(slope);
-
-                int i = 0;
-                for (; i < size; i++)
-                {
-                    __builtin_prefetch(intptr + 16);
-                    v4f32 _v = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr, 0));
-                    _v = __msa_fmadd_w(_bias, _v, _scale);
-                    v16i8 v = float2int8leakyrelu(_v, _slope);
-                    ptr0[0] = v[0];
-                    ptr1[0] = v[1];
-                    ptr2[0] = v[2];
-                    ptr3[0] = v[3];
-
-                    intptr += 4;
-                    ptr0 += 1;
-                    ptr1 += 1;
-                    ptr2 += 1;
-                    ptr3 += 1;
-                }
-            }
-        }
-    }
-}
diff --git a/src/layer/mips/requantize_leakyrelu_pack8.h b/src/layer/mips/requantize_leakyrelu_pack8.h
deleted file mode 100644
index f7968c9df70..00000000000
--- a/src/layer/mips/requantize_leakyrelu_pack8.h
+++ /dev/null
@@ -1,188 +0,0 @@
-// Tencent is pleased to support the open source community by making ncnn available.
-//
-// Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved.
-//
-// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
-// in compliance with the License. You may obtain a copy of the License at
-//
-// https://opensource.org/licenses/BSD-3-Clause
-//
-// Unless required by applicable law or agreed to in writing, software distributed
-// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
-// CONDITIONS OF ANY KIND, either express or implied. See the License for the
-// specific language governing permissions and limitations under the License.
-
-static void requantize_leakyrelu_pack8_msa(const Mat& bottom_blob, Mat& top_blob, const Mat& scale_in_data, const Mat& scale_out_data, const Mat& bias_data, float slope, const Option& opt)
-{
-    int w = bottom_blob.w;
-    int h = bottom_blob.h;
-    int channels = bottom_blob.c;
-    int size = w * h;
-
-    int scale_in_data_size = scale_in_data.w;
-    int scale_out_data_size = scale_out_data.w;
-    int bias_data_size = bias_data.w;
-
-    // int8(leakyrelu(v * scale_in, slope) * scale_out)
-    // int8_leakyrelu(v * (scale_in * scale_out), slope)
-
-    // int8(leakyrelu(v * scale_in + bias, slope) * scale_out)
-    // int8_leakyrelu(v * (scale_in * scale_out) + (bias * scale_out), slope)
-
-    if (bias_data_size == 0)
-    {
-        #pragma omp parallel for num_threads(opt.num_threads)
-        for (int q = 0; q < channels; q++)
-        {
-            const int* intptr = bottom_blob.channel(q);
-            signed char* ptr = top_blob.channel(q);
-
-            v4f32 _scale_in0 = scale_in_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_in_data[0]) : (v4f32)__msa_ld_w((const float*)scale_in_data + q * 8, 0);
-            v4f32 _scale_in1 = scale_in_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_in_data[0]) : (v4f32)__msa_ld_w((const float*)scale_in_data + q * 8 + 4, 0);
-            v4f32 _scale_out0 = scale_out_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_out_data[0]) : (v4f32)__msa_ld_w((const float*)scale_out_data + q * 8, 0);
-            v4f32 _scale_out1 = scale_out_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_out_data[0]) : (v4f32)__msa_ld_w((const float*)scale_out_data + q * 8 + 4, 0);
-
-            v4f32 _scale0 = __msa_fmul_w(_scale_in0, _scale_out0);
-            v4f32 _scale1 = __msa_fmul_w(_scale_in1, _scale_out1);
-            v4f32 _slope = (v4f32)__msa_fill_w_f32(slope);
-
-            int i = 0;
-            for (; i + 3 < size; i += 4)
-            {
-                __builtin_prefetch(intptr + 128);
-                v4f32 _v0 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr, 0));
-                v4f32 _v1 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr + 4, 0));
-                v4f32 _v2 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr + 8, 0));
-                v4f32 _v3 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr + 12, 0));
-                v4f32 _v4 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr + 16, 0));
-                v4f32 _v5 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr + 20, 0));
-                v4f32 _v6 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr + 24, 0));
-                v4f32 _v7 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr + 28, 0));
-                _v0 = __msa_fmul_w(_v0, _scale0);
-                _v1 = __msa_fmul_w(_v1, _scale1);
-                _v2 = __msa_fmul_w(_v2, _scale0);
-                _v3 = __msa_fmul_w(_v3, _scale1);
-                _v4 = __msa_fmul_w(_v4, _scale0);
-                _v5 = __msa_fmul_w(_v5, _scale1);
-                _v6 = __msa_fmul_w(_v6, _scale0);
-                _v7 = __msa_fmul_w(_v7, _scale1);
-                *((int64_t*)ptr) = float2int8leakyrelu(_v0, _v1, _slope);
-                *((int64_t*)(ptr + 8)) = float2int8leakyrelu(_v2, _v3, _slope);
-                *((int64_t*)(ptr + 16)) = float2int8leakyrelu(_v4, _v5, _slope);
-                *((int64_t*)(ptr + 24)) = float2int8leakyrelu(_v6, _v7, _slope);
-
-                intptr += 32;
-                ptr += 32;
-            }
-            for (; i + 1 < size; i += 2)
-            {
-                __builtin_prefetch(intptr + 64);
-                v4f32 _v0 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr, 0));
-                v4f32 _v1 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr + 4, 0));
-                v4f32 _v2 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr + 8, 0));
-                v4f32 _v3 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr + 12, 0));
-                _v0 = __msa_fmul_w(_v0, _scale0);
-                _v1 = __msa_fmul_w(_v1, _scale1);
-                _v2 = __msa_fmul_w(_v2, _scale0);
-                _v3 = __msa_fmul_w(_v3, _scale1);
-                *((int64_t*)ptr) = float2int8leakyrelu(_v0, _v1, _slope);
-                *((int64_t*)(ptr + 8)) = float2int8leakyrelu(_v2, _v3, _slope);
-
-                intptr += 16;
-                ptr += 16;
-            }
-            for (; i < size; i++)
-            {
-                __builtin_prefetch(intptr + 32);
-                v4f32 _v0 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr, 0));
-                v4f32 _v1 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr + 4, 0));
-                _v0 = __msa_fmul_w(_v0, _scale0);
-                _v1 = __msa_fmul_w(_v1, _scale1);
-                *((int64_t*)ptr) = float2int8leakyrelu(_v0, _v1, _slope);
-
-                intptr += 8;
-                ptr += 8;
-            }
-        }
-    }
-    else
-    {
-        #pragma omp parallel for num_threads(opt.num_threads)
-        for (int q = 0; q < channels; q++)
-        {
-            const int* intptr = bottom_blob.channel(q);
-            signed char* ptr = top_blob.channel(q);
-
-            v4f32 _scale_in0 = scale_in_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_in_data[0]) : (v4f32)__msa_ld_w((const float*)scale_in_data + q * 8, 0);
-            v4f32 _scale_in1 = scale_in_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_in_data[0]) : (v4f32)__msa_ld_w((const float*)scale_in_data + q * 8 + 4, 0);
-            v4f32 _scale_out0 = scale_out_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_out_data[0]) : (v4f32)__msa_ld_w((const float*)scale_out_data + q * 8, 0);
-            v4f32 _scale_out1 = scale_out_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_out_data[0]) : (v4f32)__msa_ld_w((const float*)scale_out_data + q * 8 + 4, 0);
-            v4f32 _bias0 = bias_data_size == 1 ? (v4f32)__msa_fill_w_f32(bias_data[0]) : (v4f32)__msa_ld_w((const float*)bias_data + q * 8, 0);
-            v4f32 _bias1 = bias_data_size == 1 ? (v4f32)__msa_fill_w_f32(bias_data[0]) : (v4f32)__msa_ld_w((const float*)bias_data + q * 8 + 4, 0);
-
-            v4f32 _scale0 = __msa_fmul_w(_scale_in0, _scale_out0);
-            v4f32 _scale1 = __msa_fmul_w(_scale_in1, _scale_out1);
-            _bias0 = __msa_fmul_w(_bias0, _scale_out0);
-            _bias1 = __msa_fmul_w(_bias1, _scale_out1);
-            v4f32 _slope = (v4f32)__msa_fill_w_f32(slope);
-
-            int i = 0;
-            for (; i + 3 < size; i += 4)
-            {
-                __builtin_prefetch(intptr + 128);
-                v4f32 _v0 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr, 0));
-                v4f32 _v1 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr + 4, 0));
-                v4f32 _v2 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr + 8, 0));
-                v4f32 _v3 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr + 12, 0));
-                v4f32 _v4 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr + 16, 0));
-                v4f32 _v5 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr + 20, 0));
-                v4f32 _v6 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr + 24, 0));
-                v4f32 _v7 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr + 28, 0));
-                _v0 = __msa_fmadd_w(_bias0, _v0, _scale0);
-                _v1 = __msa_fmadd_w(_bias1, _v1, _scale1);
-                _v2 = __msa_fmadd_w(_bias0, _v2, _scale0);
-                _v3 = __msa_fmadd_w(_bias1, _v3, _scale1);
-                _v4 = __msa_fmadd_w(_bias0, _v4, _scale0);
-                _v5 = __msa_fmadd_w(_bias1, _v5, _scale1);
-                _v6 = __msa_fmadd_w(_bias0, _v6, _scale0);
-                _v7 = __msa_fmadd_w(_bias1, _v7, _scale1);
-                *((int64_t*)ptr) = float2int8leakyrelu(_v0, _v1, _slope);
-                *((int64_t*)(ptr + 8)) = float2int8leakyrelu(_v2, _v3, _slope);
-                *((int64_t*)(ptr + 16)) = float2int8leakyrelu(_v4, _v5, _slope);
-                *((int64_t*)(ptr + 24)) = float2int8leakyrelu(_v6, _v7, _slope);
-
-                intptr += 32;
-                ptr += 32;
-            }
-            for (; i + 1 < size; i += 2)
-            {
-                __builtin_prefetch(intptr + 64);
-                v4f32 _v0 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr, 0));
-                v4f32 _v1 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr + 4, 0));
-                v4f32 _v2 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr + 8, 0));
-                v4f32 _v3 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr + 12, 0));
-                _v0 = __msa_fmadd_w(_bias0, _v0, _scale0);
-                _v1 = __msa_fmadd_w(_bias1, _v1, _scale1);
-                _v2 = __msa_fmadd_w(_bias0, _v2, _scale0);
-                _v3 = __msa_fmadd_w(_bias1, _v3, _scale1);
-                *((int64_t*)ptr) = float2int8leakyrelu(_v0, _v1, _slope);
-                *((int64_t*)(ptr + 8)) = float2int8leakyrelu(_v2, _v3, _slope);
-
-                intptr += 16;
-                ptr += 16;
-            }
-            for (; i < size; i++)
-            {
-                __builtin_prefetch(intptr + 32);
-                v4f32 _v0 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr, 0));
-                v4f32 _v1 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr + 4, 0));
-                _v0 = __msa_fmadd_w(_bias0, _v0, _scale0);
-                _v1 = __msa_fmadd_w(_bias1, _v1, _scale1);
-                *((int64_t*)ptr) = float2int8leakyrelu(_v0, _v1, _slope);
-
-                intptr += 8;
-                ptr += 8;
-            }
-        }
-    }
-}
diff --git a/src/layer/mips/requantize_mips.cpp b/src/layer/mips/requantize_mips.cpp
index 44e55f89477..6d3bbad70e0 100644
--- a/src/layer/mips/requantize_mips.cpp
+++ b/src/layer/mips/requantize_mips.cpp
@@ -23,13 +23,6 @@
 
 namespace ncnn {
 
-#if __mips_msa
-#include "requantize_leakyrelu_pack4.h"
-#include "requantize_leakyrelu_pack8.h"
-#include "requantize_relu_pack4.h"
-#include "requantize_relu_pack8.h"
-#endif // __mips_msa
-
 Requantize_mips::Requantize_mips()
 {
 #if __mips_msa
@@ -37,1344 +30,567 @@ Requantize_mips::Requantize_mips()
 #endif
 }
 
-int Requantize_mips::forward(const Mat& bottom_blob, Mat& top_blob, const Option& opt) const
+static void requantize_relu(const int* intptr, signed char* ptr, const Mat& scale_in_data, const Mat& bias_data, const Mat& scale_out_data, int elemcount, int elempack)
 {
-    int dims = bottom_blob.dims;
-    int elempack = bottom_blob.elempack;
-
-#if __mips_msa
-    if (elempack == 8)
-    {
-        if (dims == 1)
-        {
-            int w = bottom_blob.w;
-
-            top_blob.create(w, (size_t)8u, 8, opt.blob_allocator);
-            if (top_blob.empty())
-                return -100;
-
-            if (scale_in_data_size == 1 && scale_out_data_size == 1)
-            {
-                v4f32 _scale_in = (v4f32)__msa_fill_w_f32(scale_in_data[0]);
-                v4f32 _scale_out = (v4f32)__msa_fill_w_f32(scale_out_data[0]);
-
-                if (bias_data_size == 0)
-                {
-                    #pragma omp parallel for num_threads(opt.num_threads)
-                    for (int i = 0; i < w; i++)
-                    {
-                        const int* intptr = (const int*)bottom_blob + i * 8;
-                        signed char* ptr = (signed char*)top_blob + i * 8;
-
-                        v4f32 _v0 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr, 0));
-                        v4f32 _v1 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr + 4, 0));
-                        _v0 = __msa_fmul_w(_v0, _scale_in);
-                        _v1 = __msa_fmul_w(_v1, _scale_in);
-                        _v0 = activation_ps(_v0, activation_type, activation_params);
-                        _v1 = activation_ps(_v1, activation_type, activation_params);
-                        _v0 = __msa_fmul_w(_v0, _scale_out);
-                        _v1 = __msa_fmul_w(_v1, _scale_out);
-                        *((int64_t*)ptr) = float2int8(_v0, _v1);
-                    }
-                }
-                else if (bias_data_size == 1)
-                {
-                    v4f32 _bias = (v4f32)__msa_fill_w_f32(bias_data[0]);
-
-                    #pragma omp parallel for num_threads(opt.num_threads)
-                    for (int i = 0; i < w; i++)
-                    {
-                        const int* intptr = (const int*)bottom_blob + i * 8;
-                        signed char* ptr = (signed char*)top_blob + i * 8;
-
-                        v4f32 _v0 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr, 0));
-                        v4f32 _v1 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr + 4, 0));
-                        _v0 = __msa_fmadd_w(_bias, _v0, _scale_in);
-                        _v1 = __msa_fmadd_w(_bias, _v1, _scale_in);
-                        _v0 = activation_ps(_v0, activation_type, activation_params);
-                        _v1 = activation_ps(_v1, activation_type, activation_params);
-                        _v0 = __msa_fmul_w(_v0, _scale_out);
-                        _v1 = __msa_fmul_w(_v1, _scale_out);
-                        *((int64_t*)ptr) = float2int8(_v0, _v1);
-                    }
-                }
-                else
-                {
-                    #pragma omp parallel for num_threads(opt.num_threads)
-                    for (int i = 0; i < w; i++)
-                    {
-                        const int* intptr = (const int*)bottom_blob + i * 8;
-                        signed char* ptr = (signed char*)top_blob + i * 8;
-
-                        v4f32 _bias0 = bias_data_size == 1 ? (v4f32)__msa_fill_w_f32(bias_data[0]) : (v4f32)__msa_ld_w((const float*)bias_data + i * 8, 0);
-                        v4f32 _bias1 = bias_data_size == 1 ? (v4f32)__msa_fill_w_f32(bias_data[0]) : (v4f32)__msa_ld_w((const float*)bias_data + i * 8 + 4, 0);
-                        v4f32 _v0 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr, 0));
-                        v4f32 _v1 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr + 4, 0));
-                        _v0 = __msa_fmadd_w(_bias0, _v0, _scale_in);
-                        _v1 = __msa_fmadd_w(_bias1, _v1, _scale_in);
-                        _v0 = activation_ps(_v0, activation_type, activation_params);
-                        _v1 = activation_ps(_v1, activation_type, activation_params);
-                        _v0 = __msa_fmul_w(_v0, _scale_out);
-                        _v1 = __msa_fmul_w(_v1, _scale_out);
-                        *((int64_t*)ptr) = float2int8(_v0, _v1);
-                    }
-                }
-            }
-            else if (scale_in_data_size == 1 && scale_out_data_size > 1)
-            {
-                v4f32 _scale_in = (v4f32)__msa_fill_w_f32(scale_in_data[0]);
+    const int scale_in_data_size = scale_in_data.w;
+    const int bias_data_size = bias_data.w;
+    const int scale_out_data_size = scale_out_data.w;
+    const int size = elemcount * elempack;
 
-                if (bias_data_size == 0)
-                {
-                    #pragma omp parallel for num_threads(opt.num_threads)
-                    for (int i = 0; i < w; i++)
-                    {
-                        const int* intptr = (const int*)bottom_blob + i * 8;
-                        signed char* ptr = (signed char*)top_blob + i * 8;
+    // NCNN_LOGE("requantize_relu %d %d %d   %d %d", scale_in_data_size, bias_data_size, scale_out_data_size, elemcount, elempack);
 
-                        v4f32 _scale_out0 = scale_out_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_out_data[0]) : (v4f32)__msa_ld_w((const float*)scale_out_data + i * 8, 0);
-                        v4f32 _scale_out1 = scale_out_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_out_data[0]) : (v4f32)__msa_ld_w((const float*)scale_out_data + i * 8 + 4, 0);
-                        v4f32 _v0 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr, 0));
-                        v4f32 _v1 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr + 4, 0));
-                        _v0 = __msa_fmul_w(_v0, _scale_in);
-                        _v1 = __msa_fmul_w(_v1, _scale_in);
-                        _v0 = activation_ps(_v0, activation_type, activation_params);
-                        _v1 = activation_ps(_v1, activation_type, activation_params);
-                        _v0 = __msa_fmul_w(_v0, _scale_out0);
-                        _v1 = __msa_fmul_w(_v1, _scale_out1);
-                        *((int64_t*)ptr) = float2int8(_v0, _v1);
-                    }
-                }
-                else if (bias_data_size == 1)
-                {
-                    v4f32 _bias = (v4f32)__msa_fill_w_f32(bias_data[0]);
+    // int8(relu(v * scale_in) * scale_out)
+    // int8_relu(v * (scale_in * scale_out))
 
-                    #pragma omp parallel for num_threads(opt.num_threads)
-                    for (int i = 0; i < w; i++)
-                    {
-                        const int* intptr = (const int*)bottom_blob + i * 8;
-                        signed char* ptr = (signed char*)top_blob + i * 8;
+    // int8(relu(v * scale_in + bias) * scale_out)
+    // int8_relu(v * (scale_in * scale_out) + (bias * scale_out))
 
-                        v4f32 _scale_out0 = scale_out_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_out_data[0]) : (v4f32)__msa_ld_w((const float*)scale_out_data + i * 8, 0);
-                        v4f32 _scale_out1 = scale_out_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_out_data[0]) : (v4f32)__msa_ld_w((const float*)scale_out_data + i * 8 + 4, 0);
-                        v4f32 _v0 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr, 0));
-                        v4f32 _v1 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr + 4, 0));
-                        _v0 = __msa_fmadd_w(_bias, _v0, _scale_in);
-                        _v1 = __msa_fmadd_w(_bias, _v1, _scale_in);
-                        _v0 = activation_ps(_v0, activation_type, activation_params);
-                        _v1 = activation_ps(_v1, activation_type, activation_params);
-                        _v0 = __msa_fmul_w(_v0, _scale_out0);
-                        _v1 = __msa_fmul_w(_v1, _scale_out1);
-                        *((int64_t*)ptr) = float2int8(_v0, _v1);
-                    }
-                }
-                else
-                {
-                    #pragma omp parallel for num_threads(opt.num_threads)
-                    for (int i = 0; i < w; i++)
-                    {
-                        const int* intptr = (const int*)bottom_blob + i * 8;
-                        signed char* ptr = (signed char*)top_blob + i * 8;
-
-                        v4f32 _scale_out0 = scale_out_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_out_data[0]) : (v4f32)__msa_ld_w((const float*)scale_out_data + i * 8, 0);
-                        v4f32 _scale_out1 = scale_out_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_out_data[0]) : (v4f32)__msa_ld_w((const float*)scale_out_data + i * 8 + 4, 0);
-                        v4f32 _bias0 = bias_data_size == 1 ? (v4f32)__msa_fill_w_f32(bias_data[0]) : (v4f32)__msa_ld_w((const float*)bias_data + i * 8, 0);
-                        v4f32 _bias1 = bias_data_size == 1 ? (v4f32)__msa_fill_w_f32(bias_data[0]) : (v4f32)__msa_ld_w((const float*)bias_data + i * 8 + 4, 0);
-                        v4f32 _v0 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr, 0));
-                        v4f32 _v1 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr + 4, 0));
-                        _v0 = __msa_fmadd_w(_bias0, _v0, _scale_in);
-                        _v1 = __msa_fmadd_w(_bias1, _v1, _scale_in);
-                        _v0 = activation_ps(_v0, activation_type, activation_params);
-                        _v1 = activation_ps(_v1, activation_type, activation_params);
-                        _v0 = __msa_fmul_w(_v0, _scale_out0);
-                        _v1 = __msa_fmul_w(_v1, _scale_out1);
-                        *((int64_t*)ptr) = float2int8(_v0, _v1);
-                    }
-                }
-            }
-            else if (scale_in_data_size > 1 && scale_out_data_size == 1)
-            {
-                v4f32 _scale_out = (v4f32)__msa_fill_w_f32(scale_out_data[0]);
-
-                if (bias_data_size == 0)
-                {
-                    #pragma omp parallel for num_threads(opt.num_threads)
-                    for (int i = 0; i < w; i++)
-                    {
-                        const int* intptr = (const int*)bottom_blob + i * 8;
-                        signed char* ptr = (signed char*)top_blob + i * 8;
-
-                        v4f32 _scale_in0 = scale_in_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_in_data[0]) : (v4f32)__msa_ld_w((const float*)scale_in_data + i * 8, 0);
-                        v4f32 _scale_in1 = scale_in_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_in_data[0]) : (v4f32)__msa_ld_w((const float*)scale_in_data + i * 8 + 4, 0);
-                        v4f32 _v0 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr, 0));
-                        v4f32 _v1 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr + 4, 0));
-                        _v0 = __msa_fmul_w(_v0, _scale_in0);
-                        _v1 = __msa_fmul_w(_v1, _scale_in1);
-                        _v0 = activation_ps(_v0, activation_type, activation_params);
-                        _v1 = activation_ps(_v1, activation_type, activation_params);
-                        _v0 = __msa_fmul_w(_v0, _scale_out);
-                        _v1 = __msa_fmul_w(_v1, _scale_out);
-                        *((int64_t*)ptr) = float2int8(_v0, _v1);
-                    }
-                }
-                else if (bias_data_size == 1)
-                {
-                    v4f32 _bias = (v4f32)__msa_fill_w_f32(bias_data[0]);
-
-                    #pragma omp parallel for num_threads(opt.num_threads)
-                    for (int i = 0; i < w; i++)
-                    {
-                        const int* intptr = (const int*)bottom_blob + i * 8;
-                        signed char* ptr = (signed char*)top_blob + i * 8;
-
-                        v4f32 _scale_in0 = scale_in_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_in_data[0]) : (v4f32)__msa_ld_w((const float*)scale_in_data + i * 8, 0);
-                        v4f32 _scale_in1 = scale_in_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_in_data[0]) : (v4f32)__msa_ld_w((const float*)scale_in_data + i * 8 + 4, 0);
-                        v4f32 _v0 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr, 0));
-                        v4f32 _v1 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr + 4, 0));
-                        _v0 = __msa_fmadd_w(_bias, _v0, _scale_in0);
-                        _v1 = __msa_fmadd_w(_bias, _v1, _scale_in1);
-                        _v0 = activation_ps(_v0, activation_type, activation_params);
-                        _v1 = activation_ps(_v1, activation_type, activation_params);
-                        _v0 = __msa_fmul_w(_v0, _scale_out);
-                        _v1 = __msa_fmul_w(_v1, _scale_out);
-                        *((int64_t*)ptr) = float2int8(_v0, _v1);
-                    }
-                }
-                else
-                {
-                    #pragma omp parallel for num_threads(opt.num_threads)
-                    for (int i = 0; i < w; i++)
-                    {
-                        const int* intptr = (const int*)bottom_blob + i * 8;
-                        signed char* ptr = (signed char*)top_blob + i * 8;
-
-                        v4f32 _scale_in0 = scale_in_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_in_data[0]) : (v4f32)__msa_ld_w((const float*)scale_in_data + i * 8, 0);
-                        v4f32 _scale_in1 = scale_in_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_in_data[0]) : (v4f32)__msa_ld_w((const float*)scale_in_data + i * 8 + 4, 0);
-                        v4f32 _bias0 = bias_data_size == 1 ? (v4f32)__msa_fill_w_f32(bias_data[0]) : (v4f32)__msa_ld_w((const float*)bias_data + i * 8, 0);
-                        v4f32 _bias1 = bias_data_size == 1 ? (v4f32)__msa_fill_w_f32(bias_data[0]) : (v4f32)__msa_ld_w((const float*)bias_data + i * 8 + 4, 0);
-                        v4f32 _v0 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr, 0));
-                        v4f32 _v1 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr + 4, 0));
-                        _v0 = __msa_fmadd_w(_bias0, _v0, _scale_in0);
-                        _v1 = __msa_fmadd_w(_bias1, _v1, _scale_in1);
-                        _v0 = activation_ps(_v0, activation_type, activation_params);
-                        _v1 = activation_ps(_v1, activation_type, activation_params);
-                        _v0 = __msa_fmul_w(_v0, _scale_out);
-                        _v1 = __msa_fmul_w(_v1, _scale_out);
-                        *((int64_t*)ptr) = float2int8(_v0, _v1);
-                    }
-                }
-            }
-            else // if (scale_in_data_size > 1 && scale_out_data_size > 1)
-            {
-                if (bias_data_size == 0)
-                {
-                    #pragma omp parallel for num_threads(opt.num_threads)
-                    for (int i = 0; i < w; i++)
-                    {
-                        const int* intptr = (const int*)bottom_blob + i * 8;
-                        signed char* ptr = (signed char*)top_blob + i * 8;
-
-                        v4f32 _scale_in0 = scale_in_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_in_data[0]) : (v4f32)__msa_ld_w((const float*)scale_in_data + i * 8, 0);
-                        v4f32 _scale_in1 = scale_in_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_in_data[0]) : (v4f32)__msa_ld_w((const float*)scale_in_data + i * 8 + 4, 0);
-                        v4f32 _scale_out0 = scale_out_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_out_data[0]) : (v4f32)__msa_ld_w((const float*)scale_out_data + i * 8, 0);
-                        v4f32 _scale_out1 = scale_out_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_out_data[0]) : (v4f32)__msa_ld_w((const float*)scale_out_data + i * 8 + 4, 0);
-                        v4f32 _v0 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr, 0));
-                        v4f32 _v1 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr + 4, 0));
-                        _v0 = __msa_fmul_w(_v0, _scale_in0);
-                        _v1 = __msa_fmul_w(_v1, _scale_in1);
-                        _v0 = activation_ps(_v0, activation_type, activation_params);
-                        _v1 = activation_ps(_v1, activation_type, activation_params);
-                        _v0 = __msa_fmul_w(_v0, _scale_out0);
-                        _v1 = __msa_fmul_w(_v1, _scale_out1);
-                        *((int64_t*)ptr) = float2int8(_v0, _v1);
-                    }
-                }
-                else if (bias_data_size == 1)
-                {
-                    v4f32 _bias = (v4f32)__msa_fill_w_f32(bias_data[0]);
-
-                    #pragma omp parallel for num_threads(opt.num_threads)
-                    for (int i = 0; i < w; i++)
-                    {
-                        const int* intptr = (const int*)bottom_blob + i * 8;
-                        signed char* ptr = (signed char*)top_blob + i * 8;
-
-                        v4f32 _scale_in0 = scale_in_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_in_data[0]) : (v4f32)__msa_ld_w((const float*)scale_in_data + i * 8, 0);
-                        v4f32 _scale_in1 = scale_in_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_in_data[0]) : (v4f32)__msa_ld_w((const float*)scale_in_data + i * 8 + 4, 0);
-                        v4f32 _scale_out0 = scale_out_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_out_data[0]) : (v4f32)__msa_ld_w((const float*)scale_out_data + i * 8, 0);
-                        v4f32 _scale_out1 = scale_out_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_out_data[0]) : (v4f32)__msa_ld_w((const float*)scale_out_data + i * 8 + 4, 0);
-                        v4f32 _v0 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr, 0));
-                        v4f32 _v1 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr + 4, 0));
-                        _v0 = __msa_fmadd_w(_bias, _v0, _scale_in0);
-                        _v1 = __msa_fmadd_w(_bias, _v1, _scale_in1);
-                        _v0 = activation_ps(_v0, activation_type, activation_params);
-                        _v1 = activation_ps(_v1, activation_type, activation_params);
-                        _v0 = __msa_fmul_w(_v0, _scale_out0);
-                        _v1 = __msa_fmul_w(_v1, _scale_out1);
-                        *((int64_t*)ptr) = float2int8(_v0, _v1);
-                    }
-                }
-                else
-                {
-                    #pragma omp parallel for num_threads(opt.num_threads)
-                    for (int i = 0; i < w; i++)
-                    {
-                        const int* intptr = (const int*)bottom_blob + i * 8;
-                        signed char* ptr = (signed char*)top_blob + i * 8;
-
-                        v4f32 _scale_in0 = scale_in_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_in_data[0]) : (v4f32)__msa_ld_w((const float*)scale_in_data + i * 8, 0);
-                        v4f32 _scale_in1 = scale_in_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_in_data[0]) : (v4f32)__msa_ld_w((const float*)scale_in_data + i * 8 + 4, 0);
-                        v4f32 _scale_out0 = scale_out_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_out_data[0]) : (v4f32)__msa_ld_w((const float*)scale_out_data + i * 8, 0);
-                        v4f32 _scale_out1 = scale_out_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_out_data[0]) : (v4f32)__msa_ld_w((const float*)scale_out_data + i * 8 + 4, 0);
-                        v4f32 _bias0 = bias_data_size == 1 ? (v4f32)__msa_fill_w_f32(bias_data[0]) : (v4f32)__msa_ld_w((const float*)bias_data + i * 8, 0);
-                        v4f32 _bias1 = bias_data_size == 1 ? (v4f32)__msa_fill_w_f32(bias_data[0]) : (v4f32)__msa_ld_w((const float*)bias_data + i * 8 + 4, 0);
-                        v4f32 _v0 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr, 0));
-                        v4f32 _v1 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr + 4, 0));
-                        _v0 = __msa_fmadd_w(_bias0, _v0, _scale_in0);
-                        _v1 = __msa_fmadd_w(_bias1, _v1, _scale_in1);
-                        _v0 = activation_ps(_v0, activation_type, activation_params);
-                        _v1 = activation_ps(_v1, activation_type, activation_params);
-                        _v0 = __msa_fmul_w(_v0, _scale_out0);
-                        _v1 = __msa_fmul_w(_v1, _scale_out1);
-                        *((int64_t*)ptr) = float2int8(_v0, _v1);
-                    }
-                }
-            }
+    float scale_in = scale_in_data[0];
+#if __mips_msa
+    v4f32 _scale_in0 = (v4f32)__msa_fill_w_f32(scale_in);
+    v4f32 _scale_in1 = _scale_in0;
+    if (scale_in_data_size > 1)
+    {
+        if (elempack == 8)
+        {
+            _scale_in0 = (v4f32)__msa_ld_w((const float*)scale_in_data);
+            _scale_in1 = (v4f32)__msa_ld_w((const float*)scale_in_data + 4);
         }
-
-        if (dims == 2)
+        if (elempack == 4)
         {
-            int w = bottom_blob.w;
-            int h = bottom_blob.h;
-
-            top_blob.create(w, h, (size_t)8u, 8, opt.blob_allocator);
-            if (top_blob.empty())
-                return -100;
-
-            if (bias_data_size == 0)
-            {
-                #pragma omp parallel for num_threads(opt.num_threads)
-                for (int i = 0; i < h; i++)
-                {
-                    const int* intptr = bottom_blob.row<const int>(i);
-                    signed char* ptr = top_blob.row<signed char>(i);
-
-                    v4f32 _scale_in0 = scale_in_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_in_data[0]) : (v4f32)__msa_ld_w((const float*)scale_in_data + i * 8, 0);
-                    v4f32 _scale_in1 = scale_in_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_in_data[0]) : (v4f32)__msa_ld_w((const float*)scale_in_data + i * 8 + 4, 0);
-                    v4f32 _scale_out0 = scale_out_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_out_data[0]) : (v4f32)__msa_ld_w((const float*)scale_out_data + i * 8, 0);
-                    v4f32 _scale_out1 = scale_out_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_out_data[0]) : (v4f32)__msa_ld_w((const float*)scale_out_data + i * 8 + 4, 0);
-
-                    for (int j = 0; j < w; j++)
-                    {
-                        __builtin_prefetch(intptr + 32);
-                        v4f32 _v0 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr, 0));
-                        v4f32 _v1 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr + 4, 0));
-                        _v0 = __msa_fmul_w(_v0, _scale_in0);
-                        _v1 = __msa_fmul_w(_v1, _scale_in1);
-                        _v0 = activation_ps(_v0, activation_type, activation_params);
-                        _v1 = activation_ps(_v1, activation_type, activation_params);
-                        _v0 = __msa_fmul_w(_v0, _scale_out0);
-                        _v1 = __msa_fmul_w(_v1, _scale_out1);
-                        *((int64_t*)ptr) = float2int8(_v0, _v1);
-
-                        intptr += 8;
-                        ptr += 8;
-                    }
-                }
-            }
-            else
-            {
-                #pragma omp parallel for num_threads(opt.num_threads)
-                for (int i = 0; i < h; i++)
-                {
-                    const int* intptr = bottom_blob.row<const int>(i);
-                    signed char* ptr = top_blob.row<signed char>(i);
-
-                    v4f32 _scale_in0 = scale_in_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_in_data[0]) : (v4f32)__msa_ld_w((const float*)scale_in_data + i * 8, 0);
-                    v4f32 _scale_in1 = scale_in_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_in_data[0]) : (v4f32)__msa_ld_w((const float*)scale_in_data + i * 8 + 4, 0);
-                    v4f32 _scale_out0 = scale_out_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_out_data[0]) : (v4f32)__msa_ld_w((const float*)scale_out_data + i * 8, 0);
-                    v4f32 _scale_out1 = scale_out_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_out_data[0]) : (v4f32)__msa_ld_w((const float*)scale_out_data + i * 8 + 4, 0);
-                    v4f32 _bias0 = bias_data_size == 1 ? (v4f32)__msa_fill_w_f32(bias_data[0]) : (v4f32)__msa_ld_w((const float*)bias_data + i * 8, 0);
-                    v4f32 _bias1 = bias_data_size == 1 ? (v4f32)__msa_fill_w_f32(bias_data[0]) : (v4f32)__msa_ld_w((const float*)bias_data + i * 8 + 4, 0);
-
-                    for (int j = 0; j < w; j++)
-                    {
-                        __builtin_prefetch(intptr + 32);
-                        v4f32 _v0 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr, 0));
-                        v4f32 _v1 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr + 4, 0));
-                        _v0 = __msa_fmadd_w(_bias0, _v0, _scale_in0);
-                        _v1 = __msa_fmadd_w(_bias1, _v1, _scale_in1);
-                        _v0 = activation_ps(_v0, activation_type, activation_params);
-                        _v1 = activation_ps(_v1, activation_type, activation_params);
-                        _v0 = __msa_fmul_w(_v0, _scale_out0);
-                        _v1 = __msa_fmul_w(_v1, _scale_out1);
-                        *((int64_t*)ptr) = float2int8(_v0, _v1);
-
-                        intptr += 8;
-                        ptr += 8;
-                    }
-                }
-            }
+            _scale_in0 = (v4f32)__msa_ld_w((const float*)scale_in_data);
+            _scale_in1 = _scale_in0;
         }
+    }
+#endif // __mips_msa
 
-        if (dims == 3)
+    float scale_out = scale_out_data[0];
+#if __mips_msa
+    v4f32 _scale_out0 = (v4f32)__msa_fill_w_f32(scale_out);
+    v4f32 _scale_out1 = _scale_out0;
+    if (scale_out_data_size > 1)
+    {
+        if (elempack == 8)
         {
-            int w = bottom_blob.w;
-            int h = bottom_blob.h;
-            int channels = bottom_blob.c;
-            int size = w * h;
-
-            top_blob.create(w, h, channels, (size_t)8u, 8, opt.blob_allocator);
-            if (top_blob.empty())
-                return -100;
-
-            if (activation_type == 1)
-            {
-                requantize_relu_pack8_msa(bottom_blob, top_blob, scale_in_data, scale_out_data, bias_data, opt);
-                return 0;
-            }
-
-            if (activation_type == 2 && activation_params[0] > 0.f)
-            {
-                requantize_leakyrelu_pack8_msa(bottom_blob, top_blob, scale_in_data, scale_out_data, bias_data, activation_params[0], opt);
-                return 0;
-            }
-
-            if (bias_data_size == 0)
-            {
-                #pragma omp parallel for num_threads(opt.num_threads)
-                for (int q = 0; q < channels; q++)
-                {
-                    const int* intptr = bottom_blob.channel(q);
-                    signed char* ptr = top_blob.channel(q);
-
-                    v4f32 _scale_in0 = scale_in_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_in_data[0]) : (v4f32)__msa_ld_w((const float*)scale_in_data + q * 8, 0);
-                    v4f32 _scale_in1 = scale_in_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_in_data[0]) : (v4f32)__msa_ld_w((const float*)scale_in_data + q * 8 + 4, 0);
-                    v4f32 _scale_out0 = scale_out_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_out_data[0]) : (v4f32)__msa_ld_w((const float*)scale_out_data + q * 8, 0);
-                    v4f32 _scale_out1 = scale_out_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_out_data[0]) : (v4f32)__msa_ld_w((const float*)scale_out_data + q * 8 + 4, 0);
-
-                    for (int i = 0; i < size; i++)
-                    {
-                        __builtin_prefetch(intptr + 32);
-                        v4f32 _v0 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr, 0));
-                        v4f32 _v1 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr + 4, 0));
-                        _v0 = __msa_fmul_w(_v0, _scale_in0);
-                        _v1 = __msa_fmul_w(_v1, _scale_in1);
-                        _v0 = activation_ps(_v0, activation_type, activation_params);
-                        _v1 = activation_ps(_v1, activation_type, activation_params);
-                        _v0 = __msa_fmul_w(_v0, _scale_out0);
-                        _v1 = __msa_fmul_w(_v1, _scale_out1);
-                        *((int64_t*)ptr) = float2int8(_v0, _v1);
-
-                        intptr += 8;
-                        ptr += 8;
-                    }
-                }
-            }
-            else
-            {
-                #pragma omp parallel for num_threads(opt.num_threads)
-                for (int q = 0; q < channels; q++)
-                {
-                    const int* intptr = bottom_blob.channel(q);
-                    signed char* ptr = top_blob.channel(q);
-
-                    v4f32 _scale_in0 = scale_in_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_in_data[0]) : (v4f32)__msa_ld_w((const float*)scale_in_data + q * 8, 0);
-                    v4f32 _scale_in1 = scale_in_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_in_data[0]) : (v4f32)__msa_ld_w((const float*)scale_in_data + q * 8 + 4, 0);
-                    v4f32 _scale_out0 = scale_out_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_out_data[0]) : (v4f32)__msa_ld_w((const float*)scale_out_data + q * 8, 0);
-                    v4f32 _scale_out1 = scale_out_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_out_data[0]) : (v4f32)__msa_ld_w((const float*)scale_out_data + q * 8 + 4, 0);
-                    v4f32 _bias0 = bias_data_size == 1 ? (v4f32)__msa_fill_w_f32(bias_data[0]) : (v4f32)__msa_ld_w((const float*)bias_data + q * 8, 0);
-                    v4f32 _bias1 = bias_data_size == 1 ? (v4f32)__msa_fill_w_f32(bias_data[0]) : (v4f32)__msa_ld_w((const float*)bias_data + q * 8 + 4, 0);
+            _scale_out0 = (v4f32)__msa_ld_w((const float*)scale_out_data);
+            _scale_out1 = (v4f32)__msa_ld_w((const float*)scale_out_data + 4);
+        }
+        if (elempack == 4)
+        {
+            _scale_out0 = (v4f32)__msa_ld_w((const float*)scale_out_data);
+            _scale_out1 = _scale_out0;
+        }
+    }
+#endif // __mips_msa
 
-                    for (int i = 0; i < size; i++)
-                    {
-                        __builtin_prefetch(intptr + 32);
-                        v4f32 _v0 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr, 0));
-                        v4f32 _v1 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr + 4, 0));
-                        _v0 = __msa_fmadd_w(_bias0, _v0, _scale_in0);
-                        _v1 = __msa_fmadd_w(_bias1, _v1, _scale_in1);
-                        _v0 = activation_ps(_v0, activation_type, activation_params);
-                        _v1 = activation_ps(_v1, activation_type, activation_params);
-                        _v0 = __msa_fmul_w(_v0, _scale_out0);
-                        _v1 = __msa_fmul_w(_v1, _scale_out1);
-                        *((int64_t*)ptr) = float2int8(_v0, _v1);
+    float scale = scale_in * scale_out;
+#if __mips_msa
+    v4f32 _scale0 = __msa_fmul_w(_scale_in0, _scale_out0);
+    v4f32 _scale1 = __msa_fmul_w(_scale_in1, _scale_out1);
+#endif // __mips_msa
 
-                        intptr += 8;
-                        ptr += 8;
-                    }
-                }
-            }
+    if (bias_data_size == 0)
+    {
+        int i = 0;
+#if __mips_msa
+        for (; i + 7 < size; i += 8)
+        {
+            __builtin_prefetch(intptr + 32);
+            v4f32 _v0 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr));
+            v4f32 _v1 = (v4f32)__msa_ffint_s_w(__msa_ld_w((intptr + 4)));
+            _v0 = __msa_fmul_w(_v0, _scale0);
+            _v1 = __msa_fmul_w(_v1, _scale1);
+            *((int64_t*)ptr) = float2int8relu(_v0, _v1);
+            intptr += 8;
+            ptr += 8;
+        }
+        for (; i + 3 < size; i += 4)
+        {
+            v4f32 _v = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr));
+            _v = __msa_fmul_w(_v, _scale0);
+            v16i8 v = float2int8relu(_v, _v);
+            ptr[0] = v[0];
+            ptr[1] = v[1];
+            ptr[2] = v[2];
+            ptr[3] = v[3];
+            intptr += 4;
+            ptr += 4;
+        }
+#endif // __mips_msa
+        for (; i < size; i++)
+        {
+            float v = *intptr * scale;
+            *ptr = float2int8(v);
+            if (*ptr < 0) *ptr = 0;
+            intptr++;
+            ptr++;
         }
-
-        return 0;
     }
-
-    if (elempack == 4)
+    else
     {
-        if (dims == 1)
+        float bias = bias_data[0];
+#if __mips_msa
+        v4f32 _bias0 = (v4f32)__msa_fill_w_f32(bias);
+        v4f32 _bias1 = _bias0;
+        if (bias_data_size > 1)
         {
-            int w = bottom_blob.w;
-            int out_elempack = opt.use_packing_layout && w * elempack % 8 == 0 ? 8 : 1;
-            int outw = w * elempack / out_elempack;
-
-            top_blob.create(outw, (size_t)out_elempack, out_elempack, opt.blob_allocator);
-            if (top_blob.empty())
-                return -100;
-
-            if (scale_in_data_size == 1 && scale_out_data_size == 1)
-            {
-                v4f32 _scale_in = (v4f32)__msa_fill_w_f32(scale_in_data[0]);
-                v4f32 _scale_out = (v4f32)__msa_fill_w_f32(scale_out_data[0]);
-
-                if (bias_data_size == 0)
-                {
-                    #pragma omp parallel for num_threads(opt.num_threads)
-                    for (int i = 0; i < w; i++)
-                    {
-                        const int* intptr = (const int*)bottom_blob + i * 4;
-                        signed char* ptr = (signed char*)top_blob + i * 4;
-
-                        v4f32 _v = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr, 0));
-                        _v = __msa_fmul_w(_v, _scale_in);
-                        _v = activation_ps(_v, activation_type, activation_params);
-                        _v = __msa_fmul_w(_v, _scale_out);
-                        v16i8 v = float2int8(_v);
-                        ptr[0] = v[0];
-                        ptr[1] = v[1];
-                        ptr[2] = v[2];
-                        ptr[3] = v[3];
-                    }
-                }
-                else if (bias_data_size == 1)
-                {
-                    v4f32 _bias = (v4f32)__msa_fill_w_f32(bias_data[0]);
-
-                    #pragma omp parallel for num_threads(opt.num_threads)
-                    for (int i = 0; i < w; i++)
-                    {
-                        const int* intptr = (const int*)bottom_blob + i * 4;
-                        signed char* ptr = (signed char*)top_blob + i * 4;
-
-                        v4f32 _v = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr, 0));
-                        _v = __msa_fmadd_w(_bias, _v, _scale_in);
-                        _v = activation_ps(_v, activation_type, activation_params);
-                        _v = __msa_fmul_w(_v, _scale_out);
-                        v16i8 v = float2int8(_v);
-                        ptr[0] = v[0];
-                        ptr[1] = v[1];
-                        ptr[2] = v[2];
-                        ptr[3] = v[3];
-                    }
-                }
-                else
-                {
-                    #pragma omp parallel for num_threads(opt.num_threads)
-                    for (int i = 0; i < w; i++)
-                    {
-                        const int* intptr = (const int*)bottom_blob + i * 4;
-                        signed char* ptr = (signed char*)top_blob + i * 4;
-
-                        v4f32 _bias = (v4f32)__msa_ld_w((const float*)bias_data + i * 4, 0);
-                        v4f32 _v = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr, 0));
-                        _v = __msa_fmadd_w(_bias, _v, _scale_in);
-                        _v = activation_ps(_v, activation_type, activation_params);
-                        _v = __msa_fmul_w(_v, _scale_out);
-                        v16i8 v = float2int8(_v);
-                        ptr[0] = v[0];
-                        ptr[1] = v[1];
-                        ptr[2] = v[2];
-                        ptr[3] = v[3];
-                    }
-                }
-            }
-            else if (scale_in_data_size == 1 && scale_out_data_size > 1)
+            if (elempack == 8)
             {
-                v4f32 _scale_in = (v4f32)__msa_fill_w_f32(scale_in_data[0]);
-
-                if (bias_data_size == 0)
-                {
-                    #pragma omp parallel for num_threads(opt.num_threads)
-                    for (int i = 0; i < w; i++)
-                    {
-                        const int* intptr = (const int*)bottom_blob + i * 4;
-                        signed char* ptr = (signed char*)top_blob + i * 4;
-
-                        v4f32 _scale_out = (v4f32)__msa_ld_w((const float*)scale_out_data + i * 4, 0);
-                        v4f32 _v = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr, 0));
-                        _v = __msa_fmul_w(_v, _scale_in);
-                        _v = activation_ps(_v, activation_type, activation_params);
-                        _v = __msa_fmul_w(_v, _scale_out);
-                        v16i8 v = float2int8(_v);
-                        ptr[0] = v[0];
-                        ptr[1] = v[1];
-                        ptr[2] = v[2];
-                        ptr[3] = v[3];
-                    }
-                }
-                else if (bias_data_size == 1)
-                {
-                    v4f32 _bias = (v4f32)__msa_fill_w_f32(bias_data[0]);
-
-                    #pragma omp parallel for num_threads(opt.num_threads)
-                    for (int i = 0; i < w; i++)
-                    {
-                        const int* intptr = (const int*)bottom_blob + i * 4;
-                        signed char* ptr = (signed char*)top_blob + i * 4;
-
-                        v4f32 _scale_out = (v4f32)__msa_ld_w((const float*)scale_out_data + i * 4, 0);
-                        v4f32 _v = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr, 0));
-                        _v = __msa_fmadd_w(_bias, _v, _scale_in);
-                        _v = activation_ps(_v, activation_type, activation_params);
-                        _v = __msa_fmul_w(_v, _scale_out);
-                        v16i8 v = float2int8(_v);
-                        ptr[0] = v[0];
-                        ptr[1] = v[1];
-                        ptr[2] = v[2];
-                        ptr[3] = v[3];
-                    }
-                }
-                else
-                {
-                    #pragma omp parallel for num_threads(opt.num_threads)
-                    for (int i = 0; i < w; i++)
-                    {
-                        const int* intptr = (const int*)bottom_blob + i * 4;
-                        signed char* ptr = (signed char*)top_blob + i * 4;
-
-                        v4f32 _scale_out = (v4f32)__msa_ld_w((const float*)scale_out_data + i * 4, 0);
-                        v4f32 _bias = (v4f32)__msa_ld_w((const float*)bias_data + i * 4, 0);
-                        v4f32 _v = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr, 0));
-                        _v = __msa_fmadd_w(_bias, _v, _scale_in);
-                        _v = activation_ps(_v, activation_type, activation_params);
-                        _v = __msa_fmul_w(_v, _scale_out);
-                        v16i8 v = float2int8(_v);
-                        ptr[0] = v[0];
-                        ptr[1] = v[1];
-                        ptr[2] = v[2];
-                        ptr[3] = v[3];
-                    }
-                }
+                _bias0 = (v4f32)__msa_ld_w((const float*)bias_data);
+                _bias1 = (v4f32)__msa_ld_w((const float*)bias_data + 4);
             }
-            else if (scale_in_data_size > 1 && scale_out_data_size == 1)
+            if (elempack == 4)
             {
-                v4f32 _scale_out = (v4f32)__msa_fill_w_f32(scale_out_data[0]);
-
-                if (bias_data_size == 0)
-                {
-                    #pragma omp parallel for num_threads(opt.num_threads)
-                    for (int i = 0; i < w; i++)
-                    {
-                        const int* intptr = (const int*)bottom_blob + i * 4;
-                        signed char* ptr = (signed char*)top_blob + i * 4;
-
-                        v4f32 _scale_in = (v4f32)__msa_ld_w((const float*)scale_in_data + i * 4, 0);
-                        v4f32 _v = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr, 0));
-                        _v = __msa_fmul_w(_v, _scale_in);
-                        _v = activation_ps(_v, activation_type, activation_params);
-                        _v = __msa_fmul_w(_v, _scale_out);
-                        v16i8 v = float2int8(_v);
-                        ptr[0] = v[0];
-                        ptr[1] = v[1];
-                        ptr[2] = v[2];
-                        ptr[3] = v[3];
-                    }
-                }
-                else if (bias_data_size == 1)
-                {
-                    v4f32 _bias = (v4f32)__msa_fill_w_f32(bias_data[0]);
-
-                    #pragma omp parallel for num_threads(opt.num_threads)
-                    for (int i = 0; i < w; i++)
-                    {
-                        const int* intptr = (const int*)bottom_blob + i * 4;
-                        signed char* ptr = (signed char*)top_blob + i * 4;
-
-                        v4f32 _scale_in = (v4f32)__msa_ld_w((const float*)scale_in_data + i * 4, 0);
-                        v4f32 _v = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr, 0));
-                        _v = __msa_fmadd_w(_bias, _v, _scale_in);
-                        _v = activation_ps(_v, activation_type, activation_params);
-                        _v = __msa_fmul_w(_v, _scale_out);
-                        v16i8 v = float2int8(_v);
-                        ptr[0] = v[0];
-                        ptr[1] = v[1];
-                        ptr[2] = v[2];
-                        ptr[3] = v[3];
-                    }
-                }
-                else
-                {
-                    #pragma omp parallel for num_threads(opt.num_threads)
-                    for (int i = 0; i < w; i++)
-                    {
-                        const int* intptr = (const int*)bottom_blob + i * 4;
-                        signed char* ptr = (signed char*)top_blob + i * 4;
-
-                        v4f32 _scale_in = (v4f32)__msa_ld_w((const float*)scale_in_data + i * 4, 0);
-                        v4f32 _bias = (v4f32)__msa_ld_w((const float*)bias_data + i * 4, 0);
-                        v4f32 _v = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr, 0));
-                        _v = __msa_fmadd_w(_bias, _v, _scale_in);
-                        _v = activation_ps(_v, activation_type, activation_params);
-                        _v = __msa_fmul_w(_v, _scale_out);
-                        v16i8 v = float2int8(_v);
-                        ptr[0] = v[0];
-                        ptr[1] = v[1];
-                        ptr[2] = v[2];
-                        ptr[3] = v[3];
-                    }
-                }
+                _bias0 = (v4f32)__msa_ld_w((const float*)bias_data);
+                _bias1 = _bias0;
             }
-            else // if (scale_in_data_size > 1 && scale_out_data_size > 1)
-            {
-                if (bias_data_size == 0)
-                {
-                    #pragma omp parallel for num_threads(opt.num_threads)
-                    for (int i = 0; i < w; i++)
-                    {
-                        const int* intptr = (const int*)bottom_blob + i * 4;
-                        signed char* ptr = (signed char*)top_blob + i * 4;
-
-                        v4f32 _scale_in = (v4f32)__msa_ld_w((const float*)scale_in_data + i * 4, 0);
-                        v4f32 _scale_out = (v4f32)__msa_ld_w((const float*)scale_out_data + i * 4, 0);
-                        v4f32 _v = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr, 0));
-                        _v = __msa_fmul_w(_v, _scale_in);
-                        _v = activation_ps(_v, activation_type, activation_params);
-                        _v = __msa_fmul_w(_v, _scale_out);
-                        v16i8 v = float2int8(_v);
-                        ptr[0] = v[0];
-                        ptr[1] = v[1];
-                        ptr[2] = v[2];
-                        ptr[3] = v[3];
-                    }
-                }
-                else if (bias_data_size == 1)
-                {
-                    v4f32 _bias = (v4f32)__msa_fill_w_f32(bias_data[0]);
-
-                    #pragma omp parallel for num_threads(opt.num_threads)
-                    for (int i = 0; i < w; i++)
-                    {
-                        const int* intptr = (const int*)bottom_blob + i * 4;
-                        signed char* ptr = (signed char*)top_blob + i * 4;
+        }
+#endif // __mips_msa
 
-                        v4f32 _scale_in = (v4f32)__msa_ld_w((const float*)scale_in_data + i * 4, 0);
-                        v4f32 _scale_out = (v4f32)__msa_ld_w((const float*)scale_out_data + i * 4, 0);
-                        v4f32 _v = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr, 0));
-                        _v = __msa_fmadd_w(_bias, _v, _scale_in);
-                        _v = activation_ps(_v, activation_type, activation_params);
-                        _v = __msa_fmul_w(_v, _scale_out);
-                        v16i8 v = float2int8(_v);
-                        ptr[0] = v[0];
-                        ptr[1] = v[1];
-                        ptr[2] = v[2];
-                        ptr[3] = v[3];
-                    }
-                }
-                else
-                {
-                    #pragma omp parallel for num_threads(opt.num_threads)
-                    for (int i = 0; i < w; i++)
-                    {
-                        const int* intptr = (const int*)bottom_blob + i * 4;
-                        signed char* ptr = (signed char*)top_blob + i * 4;
+        bias = bias * scale_out;
+#if __mips_msa
+        _bias0 = __msa_fmul_w(_bias0, _scale_out0);
+        _bias1 = __msa_fmul_w(_bias1, _scale_out1);
+#endif // __mips_msa
 
-                        v4f32 _scale_in = (v4f32)__msa_ld_w((const float*)scale_in_data + i * 4, 0);
-                        v4f32 _scale_out = (v4f32)__msa_ld_w((const float*)scale_out_data + i * 4, 0);
-                        v4f32 _bias = (v4f32)__msa_ld_w((const float*)bias_data + i * 4, 0);
-                        v4f32 _v = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr, 0));
-                        _v = __msa_fmadd_w(_bias, _v, _scale_in);
-                        _v = activation_ps(_v, activation_type, activation_params);
-                        _v = __msa_fmul_w(_v, _scale_out);
-                        v16i8 v = float2int8(_v);
-                        ptr[0] = v[0];
-                        ptr[1] = v[1];
-                        ptr[2] = v[2];
-                        ptr[3] = v[3];
-                    }
-                }
-            }
+        int i = 0;
+#if __mips_msa
+        for (; i + 7 < size; i += 8)
+        {
+            __builtin_prefetch(intptr + 32);
+            v4f32 _v0 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr));
+            v4f32 _v1 = (v4f32)__msa_ffint_s_w(__msa_ld_w((intptr + 4)));
+            _v0 = __msa_fmadd_w(_bias0, _v0, _scale0);
+            _v1 = __msa_fmadd_w(_bias1, _v1, _scale1);
+            *((int64_t*)ptr) = float2int8relu(_v0, _v1);
+            intptr += 8;
+            ptr += 8;
         }
-
-        if (dims == 2)
+        for (; i + 3 < size; i += 4)
         {
-            int w = bottom_blob.w;
-            int h = bottom_blob.h;
-            int out_elempack = opt.use_packing_layout && h * elempack % 8 == 0 ? 8 : 1;
-            int outh = h * elempack / out_elempack;
-
-            top_blob.create(w, outh, (size_t)out_elempack, out_elempack, opt.blob_allocator);
-            if (top_blob.empty())
-                return -100;
-
-            if (out_elempack == 8)
-            {
-                if (bias_data_size == 0)
-                {
-                    #pragma omp parallel for num_threads(opt.num_threads)
-                    for (int i = 0; i < outh; i++)
-                    {
-                        const int* intptr0 = bottom_blob.row<const int>(i * 2);
-                        const int* intptr1 = bottom_blob.row<const int>(i * 2 + 1);
-                        signed char* ptr = top_blob.row<signed char>(i);
-
-                        v4f32 _scale_in0 = scale_in_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_in_data[0]) : (v4f32)__msa_ld_w((const float*)scale_in_data + i * 8, 0);
-                        v4f32 _scale_in1 = scale_in_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_in_data[0]) : (v4f32)__msa_ld_w((const float*)scale_in_data + i * 8 + 4, 0);
-                        v4f32 _scale_out0 = scale_out_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_out_data[0]) : (v4f32)__msa_ld_w((const float*)scale_out_data + i * 8, 0);
-                        v4f32 _scale_out1 = scale_out_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_out_data[0]) : (v4f32)__msa_ld_w((const float*)scale_out_data + i * 8 + 4, 0);
-
-                        for (int j = 0; j < w; j++)
-                        {
-                            __builtin_prefetch(intptr0 + 16);
-                            __builtin_prefetch(intptr1 + 16);
-                            v4f32 _v0 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr0, 0));
-                            v4f32 _v1 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr1, 0));
-                            _v0 = __msa_fmul_w(_v0, _scale_in0);
-                            _v1 = __msa_fmul_w(_v1, _scale_in1);
-                            _v0 = activation_ps(_v0, activation_type, activation_params);
-                            _v1 = activation_ps(_v1, activation_type, activation_params);
-                            _v0 = __msa_fmul_w(_v0, _scale_out0);
-                            _v1 = __msa_fmul_w(_v1, _scale_out1);
-                            *((int64_t*)ptr) = float2int8(_v0, _v1);
-
-                            intptr0 += 4;
-                            intptr1 += 4;
-                            ptr += 8;
-                        }
-                    }
-                }
-                else
-                {
-                    #pragma omp parallel for num_threads(opt.num_threads)
-                    for (int i = 0; i < outh; i++)
-                    {
-                        const int* intptr0 = bottom_blob.row<const int>(i * 2);
-                        const int* intptr1 = bottom_blob.row<const int>(i * 2 + 1);
-                        signed char* ptr = top_blob.row<signed char>(i);
-
-                        v4f32 _scale_in0 = scale_in_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_in_data[0]) : (v4f32)__msa_ld_w((const float*)scale_in_data + i * 8, 0);
-                        v4f32 _scale_in1 = scale_in_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_in_data[0]) : (v4f32)__msa_ld_w((const float*)scale_in_data + i * 8 + 4, 0);
-                        v4f32 _scale_out0 = scale_out_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_out_data[0]) : (v4f32)__msa_ld_w((const float*)scale_out_data + i * 8, 0);
-                        v4f32 _scale_out1 = scale_out_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_out_data[0]) : (v4f32)__msa_ld_w((const float*)scale_out_data + i * 8 + 4, 0);
-                        v4f32 _bias0 = bias_data_size == 1 ? (v4f32)__msa_fill_w_f32(bias_data[0]) : (v4f32)__msa_ld_w((const float*)bias_data + i * 8, 0);
-                        v4f32 _bias1 = bias_data_size == 1 ? (v4f32)__msa_fill_w_f32(bias_data[0]) : (v4f32)__msa_ld_w((const float*)bias_data + i * 8 + 4, 0);
-
-                        for (int j = 0; j < w; j++)
-                        {
-                            __builtin_prefetch(intptr0 + 16);
-                            __builtin_prefetch(intptr1 + 16);
-                            v4f32 _v0 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr0, 0));
-                            v4f32 _v1 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr1, 0));
-                            _v0 = __msa_fmadd_w(_bias0, _v0, _scale_in0);
-                            _v1 = __msa_fmadd_w(_bias1, _v1, _scale_in1);
-                            _v0 = activation_ps(_v0, activation_type, activation_params);
-                            _v1 = activation_ps(_v1, activation_type, activation_params);
-                            _v0 = __msa_fmul_w(_v0, _scale_out0);
-                            _v1 = __msa_fmul_w(_v1, _scale_out1);
-                            *((int64_t*)ptr) = float2int8(_v0, _v1);
-
-                            intptr0 += 4;
-                            intptr1 += 4;
-                            ptr += 8;
-                        }
-                    }
-                }
-            }
-            if (out_elempack == 1)
-            {
-                if (bias_data_size == 0)
-                {
-                    #pragma omp parallel for num_threads(opt.num_threads)
-                    for (int i = 0; i < h; i++)
-                    {
-                        const int* intptr = bottom_blob.row<const int>(i);
-                        signed char* ptr0 = top_blob.row<signed char>(i * 4);
-                        signed char* ptr1 = top_blob.row<signed char>(i * 4 + 1);
-                        signed char* ptr2 = top_blob.row<signed char>(i * 4 + 2);
-                        signed char* ptr3 = top_blob.row<signed char>(i * 4 + 3);
-
-                        v4f32 _scale_in = scale_in_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_in_data[0]) : (v4f32)__msa_ld_w((const float*)scale_in_data + i * 4, 0);
-                        v4f32 _scale_out = scale_out_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_out_data[0]) : (v4f32)__msa_ld_w((const float*)scale_out_data + i * 4, 0);
+            v4f32 _v = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr));
+            _v = __msa_fmadd_w(_bias0, _v, _scale0);
+            v16i8 v = float2int8relu(_v, _v);
+            ptr[0] = v[0];
+            ptr[1] = v[1];
+            ptr[2] = v[2];
+            ptr[3] = v[3];
+            intptr += 4;
+            ptr += 4;
+        }
+#endif // __mips_msa
+        for (; i < size; i++)
+        {
+            float v = *intptr * scale + bias;
+            *ptr = float2int8(v);
+            if (*ptr < 0) *ptr = 0;
+            intptr++;
+            ptr++;
+        }
+    }
+}
 
-                        for (int j = 0; j < w; j++)
-                        {
-                            __builtin_prefetch(intptr + 16);
-                            v4f32 _v = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr, 0));
-                            _v = __msa_fmul_w(_v, _scale_in);
-                            _v = activation_ps(_v, activation_type, activation_params);
-                            _v = __msa_fmul_w(_v, _scale_out);
-                            v16i8 v = float2int8(_v);
-                            ptr0[0] = v[0];
-                            ptr1[0] = v[1];
-                            ptr2[0] = v[2];
-                            ptr3[0] = v[3];
+static void requantize_leakyrelu(const int* intptr, signed char* ptr, const Mat& scale_in_data, const Mat& bias_data, const Mat& scale_out_data, float slope, int elemcount, int elempack)
+{
+    const int scale_in_data_size = scale_in_data.w;
+    const int bias_data_size = bias_data.w;
+    const int scale_out_data_size = scale_out_data.w;
+    const int size = elemcount * elempack;
 
-                            intptr += 4;
-                            ptr0 += 1;
-                            ptr1 += 1;
-                            ptr2 += 1;
-                            ptr3 += 1;
-                        }
-                    }
-                }
-                else
-                {
-                    #pragma omp parallel for num_threads(opt.num_threads)
-                    for (int i = 0; i < h; i++)
-                    {
-                        const int* intptr = bottom_blob.row<const int>(i);
-                        signed char* ptr0 = top_blob.row<signed char>(i * 4);
-                        signed char* ptr1 = top_blob.row<signed char>(i * 4 + 1);
-                        signed char* ptr2 = top_blob.row<signed char>(i * 4 + 2);
-                        signed char* ptr3 = top_blob.row<signed char>(i * 4 + 3);
+    // NCNN_LOGE("requantize_leakyrelu %d %d %d   %d %d", scale_in_data_size, bias_data_size, scale_out_data_size, elemcount, elempack);
 
-                        v4f32 _scale_in = scale_in_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_in_data[0]) : (v4f32)__msa_ld_w((const float*)scale_in_data + i * 4, 0);
-                        v4f32 _scale_out = scale_out_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_out_data[0]) : (v4f32)__msa_ld_w((const float*)scale_out_data + i * 4, 0);
-                        v4f32 _bias = bias_data_size == 1 ? (v4f32)__msa_fill_w_f32(bias_data[0]) : (v4f32)__msa_ld_w((const float*)bias_data + i * 4, 0);
+    // int8(leakyrelu(v * scale_in, slope) * scale_out)
+    // int8_leakyrelu(v * (scale_in * scale_out), slope)
 
-                        for (int j = 0; j < w; j++)
-                        {
-                            __builtin_prefetch(intptr + 16);
-                            v4f32 _v = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr, 0));
-                            _v = __msa_fmadd_w(_bias, _v, _scale_in);
-                            _v = activation_ps(_v, activation_type, activation_params);
-                            _v = __msa_fmul_w(_v, _scale_out);
-                            v16i8 v = float2int8(_v);
-                            ptr0[0] = v[0];
-                            ptr1[0] = v[1];
-                            ptr2[0] = v[2];
-                            ptr3[0] = v[3];
+    // int8(leakyrelu(v * scale_in + bias, slope) * scale_out)
+    // int8_leakyrelu(v * (scale_in * scale_out) + (bias * scale_out), slope)
 
-                            intptr += 4;
-                            ptr0 += 1;
-                            ptr1 += 1;
-                            ptr2 += 1;
-                            ptr3 += 1;
-                        }
-                    }
-                }
-            }
+    float scale_in = scale_in_data[0];
+#if __mips_msa
+    v4f32 _scale_in0 = (v4f32)__msa_fill_w_f32(scale_in);
+    v4f32 _scale_in1 = _scale_in0;
+    if (scale_in_data_size > 1)
+    {
+        if (elempack == 8)
+        {
+            _scale_in0 = (v4f32)__msa_ld_w((const float*)scale_in_data);
+            _scale_in1 = (v4f32)__msa_ld_w((const float*)scale_in_data + 4);
+        }
+        if (elempack == 4)
+        {
+            _scale_in0 = (v4f32)__msa_ld_w((const float*)scale_in_data);
+            _scale_in1 = _scale_in0;
         }
+    }
+#endif // __mips_msa
 
-        if (dims == 3)
+    float scale_out = scale_out_data[0];
+#if __mips_msa
+    v4f32 _scale_out0 = (v4f32)__msa_fill_w_f32(scale_out);
+    v4f32 _scale_out1 = _scale_out0;
+    if (scale_out_data_size > 1)
+    {
+        if (elempack == 8)
+        {
+            _scale_out0 = (v4f32)__msa_ld_w((const float*)scale_out_data);
+            _scale_out1 = (v4f32)__msa_ld_w((const float*)scale_out_data + 4);
+        }
+        if (elempack == 4)
         {
-            int w = bottom_blob.w;
-            int h = bottom_blob.h;
-            int channels = bottom_blob.c;
-            int size = w * h;
-            int out_elempack = opt.use_packing_layout && channels * elempack % 8 == 0 ? 8 : 1;
-            int outc = channels * elempack / out_elempack;
+            _scale_out0 = (v4f32)__msa_ld_w((const float*)scale_out_data);
+            _scale_out1 = _scale_out0;
+        }
+    }
+#endif // __mips_msa
 
-            top_blob.create(w, h, outc, (size_t)out_elempack, out_elempack, opt.blob_allocator);
-            if (top_blob.empty())
-                return -100;
+    float scale = scale_in * scale_out;
+#if __mips_msa
+    v4f32 _scale0 = __msa_fmul_w(_scale_in0, _scale_out0);
+    v4f32 _scale1 = __msa_fmul_w(_scale_in1, _scale_out1);
+    v4f32 _slope = (v4f32)__msa_fill_w_f32(slope);
+#endif // __mips_msa
 
-            if (activation_type == 1)
+    if (bias_data_size == 0)
+    {
+        int i = 0;
+#if __mips_msa
+        for (; i + 7 < size; i += 8)
+        {
+            __builtin_prefetch(intptr + 32);
+            v4f32 _v0 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr));
+            v4f32 _v1 = (v4f32)__msa_ffint_s_w(__msa_ld_w((intptr + 4)));
+            _v0 = __msa_fmul_w(_v0, _scale0);
+            _v1 = __msa_fmul_w(_v1, _scale1);
+            *((int64_t*)ptr) = float2int8leakyrelu(_v0, _v1, _slope);
+            intptr += 8;
+            ptr += 8;
+        }
+        for (; i + 3 < size; i += 4)
+        {
+            v4f32 _v = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr));
+            _v = __msa_fmul_w(_v, _scale0);
+            v16i8 v = float2int8leakyrelu(_v, _v, _slope);
+            ptr[0] = v[0];
+            ptr[1] = v[1];
+            ptr[2] = v[2];
+            ptr[3] = v[3];
+            intptr += 4;
+            ptr += 4;
+        }
+#endif // __mips_msa
+        for (; i < size; i++)
+        {
+            float v = *intptr * scale;
+            *ptr = float2int8(v);
+            if (*ptr < 0) *ptr *= slope;
+            intptr++;
+            ptr++;
+        }
+    }
+    else
+    {
+        float bias = bias_data[0];
+#if __mips_msa
+        v4f32 _bias0 = (v4f32)__msa_fill_w_f32(bias);
+        v4f32 _bias1 = _bias0;
+        if (bias_data_size > 1)
+        {
+            if (elempack == 8)
             {
-                requantize_relu_pack4_msa(bottom_blob, top_blob, scale_in_data, scale_out_data, bias_data, opt);
-                return 0;
+                _bias0 = (v4f32)__msa_ld_w((const float*)bias_data);
+                _bias1 = (v4f32)__msa_ld_w((const float*)bias_data + 4);
             }
-
-            if (activation_type == 2 && activation_params[0] > 0.f)
+            if (elempack == 4)
             {
-                requantize_leakyrelu_pack4_msa(bottom_blob, top_blob, scale_in_data, scale_out_data, bias_data, activation_params[0], opt);
-                return 0;
+                _bias0 = (v4f32)__msa_ld_w((const float*)bias_data);
+                _bias1 = _bias0;
             }
+        }
+#endif // __mips_msa
 
-            if (out_elempack == 8)
-            {
-                if (bias_data_size == 0)
-                {
-                    #pragma omp parallel for num_threads(opt.num_threads)
-                    for (int q = 0; q < outc; q++)
-                    {
-                        const int* intptr0 = bottom_blob.channel(q * 2);
-                        const int* intptr1 = bottom_blob.channel(q * 2 + 1);
-                        signed char* ptr = top_blob.channel(q);
-
-                        v4f32 _scale_in0 = scale_in_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_in_data[0]) : (v4f32)__msa_ld_w((const float*)scale_in_data + q * 8, 0);
-                        v4f32 _scale_in1 = scale_in_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_in_data[0]) : (v4f32)__msa_ld_w((const float*)scale_in_data + q * 8 + 4, 0);
-                        v4f32 _scale_out0 = scale_out_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_out_data[0]) : (v4f32)__msa_ld_w((const float*)scale_out_data + q * 8, 0);
-                        v4f32 _scale_out1 = scale_out_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_out_data[0]) : (v4f32)__msa_ld_w((const float*)scale_out_data + q * 8 + 4, 0);
-
-                        for (int i = 0; i < size; i++)
-                        {
-                            __builtin_prefetch(intptr0 + 16);
-                            __builtin_prefetch(intptr1 + 16);
-                            v4f32 _v0 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr0, 0));
-                            v4f32 _v1 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr1, 0));
-                            _v0 = __msa_fmul_w(_v0, _scale_in0);
-                            _v1 = __msa_fmul_w(_v1, _scale_in1);
-                            _v0 = activation_ps(_v0, activation_type, activation_params);
-                            _v1 = activation_ps(_v1, activation_type, activation_params);
-                            _v0 = __msa_fmul_w(_v0, _scale_out0);
-                            _v1 = __msa_fmul_w(_v1, _scale_out1);
-                            *((int64_t*)ptr) = float2int8(_v0, _v1);
-
-                            intptr0 += 4;
-                            intptr1 += 4;
-                            ptr += 8;
-                        }
-                    }
-                }
-                else
-                {
-                    #pragma omp parallel for num_threads(opt.num_threads)
-                    for (int q = 0; q < outc; q++)
-                    {
-                        const int* intptr0 = bottom_blob.channel(q * 2);
-                        const int* intptr1 = bottom_blob.channel(q * 2 + 1);
-                        signed char* ptr = top_blob.channel(q);
-
-                        v4f32 _scale_in0 = scale_in_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_in_data[0]) : (v4f32)__msa_ld_w((const float*)scale_in_data + q * 8, 0);
-                        v4f32 _scale_in1 = scale_in_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_in_data[0]) : (v4f32)__msa_ld_w((const float*)scale_in_data + q * 8 + 4, 0);
-                        v4f32 _scale_out0 = scale_out_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_out_data[0]) : (v4f32)__msa_ld_w((const float*)scale_out_data + q * 8, 0);
-                        v4f32 _scale_out1 = scale_out_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_out_data[0]) : (v4f32)__msa_ld_w((const float*)scale_out_data + q * 8 + 4, 0);
-                        v4f32 _bias0 = bias_data_size == 1 ? (v4f32)__msa_fill_w_f32(bias_data[0]) : (v4f32)__msa_ld_w((const float*)bias_data + q * 8, 0);
-                        v4f32 _bias1 = bias_data_size == 1 ? (v4f32)__msa_fill_w_f32(bias_data[0]) : (v4f32)__msa_ld_w((const float*)bias_data + q * 8 + 4, 0);
-
-                        for (int i = 0; i < size; i++)
-                        {
-                            __builtin_prefetch(intptr0 + 16);
-                            __builtin_prefetch(intptr1 + 16);
-                            v4f32 _v0 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr0, 0));
-                            v4f32 _v1 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr1, 0));
-                            _v0 = __msa_fmadd_w(_bias0, _v0, _scale_in0);
-                            _v1 = __msa_fmadd_w(_bias1, _v1, _scale_in1);
-                            _v0 = activation_ps(_v0, activation_type, activation_params);
-                            _v1 = activation_ps(_v1, activation_type, activation_params);
-                            _v0 = __msa_fmul_w(_v0, _scale_out0);
-                            _v1 = __msa_fmul_w(_v1, _scale_out1);
-                            *((int64_t*)ptr) = float2int8(_v0, _v1);
-
-                            intptr0 += 4;
-                            intptr1 += 4;
-                            ptr += 8;
-                        }
-                    }
-                }
-            }
-            if (out_elempack == 1)
-            {
-                if (bias_data_size == 0)
-                {
-                    #pragma omp parallel for num_threads(opt.num_threads)
-                    for (int q = 0; q < channels; q++)
-                    {
-                        const int* intptr = bottom_blob.channel(q);
-                        signed char* ptr0 = top_blob.channel(q * 4);
-                        signed char* ptr1 = top_blob.channel(q * 4 + 1);
-                        signed char* ptr2 = top_blob.channel(q * 4 + 2);
-                        signed char* ptr3 = top_blob.channel(q * 4 + 3);
+        bias = bias * scale_out;
+#if __mips_msa
+        _bias0 = __msa_fmul_w(_bias0, _scale_out0);
+        _bias1 = __msa_fmul_w(_bias1, _scale_out1);
+#endif // __mips_msa
 
-                        v4f32 _scale_in = scale_in_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_in_data[0]) : (v4f32)__msa_ld_w((const float*)scale_in_data + q * 4, 0);
-                        v4f32 _scale_out = scale_out_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_out_data[0]) : (v4f32)__msa_ld_w((const float*)scale_out_data + q * 4, 0);
+        int i = 0;
+#if __mips_msa
+        for (; i + 7 < size; i += 8)
+        {
+            __builtin_prefetch(intptr + 32);
+            v4f32 _v0 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr));
+            v4f32 _v1 = (v4f32)__msa_ffint_s_w(__msa_ld_w((intptr + 4)));
+            _v0 = __msa_fmadd_w(_bias0, _v0, _scale0);
+            _v1 = __msa_fmadd_w(_bias1, _v1, _scale1);
+            *((int64_t*)ptr) = float2int8leakyrelu(_v0, _v1, _slope);
+            intptr += 8;
+            ptr += 8;
+        }
+        for (; i + 3 < size; i += 4)
+        {
+            v4f32 _v = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr));
+            _v = __msa_fmadd_w(_bias0, _v, _scale0);
+            v16i8 v = float2int8leakyrelu(_v, _v, _slope);
+            ptr[0] = v[0];
+            ptr[1] = v[1];
+            ptr[2] = v[2];
+            ptr[3] = v[3];
+            intptr += 4;
+            ptr += 4;
+        }
+#endif // __mips_msa
+        for (; i < size; i++)
+        {
+            float v = *intptr * scale + bias;
+            *ptr = float2int8(v);
+            if (*ptr < 0) *ptr *= slope;
+            intptr++;
+            ptr++;
+        }
+    }
+}
 
-                        for (int i = 0; i < size; i++)
-                        {
-                            __builtin_prefetch(intptr + 16);
-                            v4f32 _v = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr, 0));
-                            _v = __msa_fmul_w(_v, _scale_in);
-                            _v = activation_ps(_v, activation_type, activation_params);
-                            _v = __msa_fmul_w(_v, _scale_out);
-                            v16i8 v = float2int8(_v);
-                            ptr0[0] = v[0];
-                            ptr1[0] = v[1];
-                            ptr2[0] = v[2];
-                            ptr3[0] = v[3];
+static void requantize(const int* intptr, signed char* ptr, const Mat& scale_in_data, const Mat& bias_data, const Mat& scale_out_data, int activation_type, const Mat& activation_params, int elemcount, int elempack)
+{
+    if (activation_type == 1)
+    {
+        requantize_relu(intptr, ptr, scale_in_data, bias_data, scale_out_data, elemcount, elempack);
+        return;
+    }
 
-                            intptr += 4;
-                            ptr0 += 1;
-                            ptr1 += 1;
-                            ptr2 += 1;
-                            ptr3 += 1;
-                        }
-                    }
-                }
-                else
-                {
-                    #pragma omp parallel for num_threads(opt.num_threads)
-                    for (int q = 0; q < channels; q++)
-                    {
-                        const int* intptr = bottom_blob.channel(q);
-                        signed char* ptr0 = top_blob.channel(q * 4);
-                        signed char* ptr1 = top_blob.channel(q * 4 + 1);
-                        signed char* ptr2 = top_blob.channel(q * 4 + 2);
-                        signed char* ptr3 = top_blob.channel(q * 4 + 3);
+    if (activation_type == 2 && activation_params[0] > 0.f)
+    {
+        const float slope = activation_params[0];
+        requantize_leakyrelu(intptr, ptr, scale_in_data, bias_data, scale_out_data, slope, elemcount, elempack);
+        return;
+    }
 
-                        v4f32 _scale_in = scale_in_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_in_data[0]) : (v4f32)__msa_ld_w((const float*)scale_in_data + q * 4, 0);
-                        v4f32 _scale_out = scale_out_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_out_data[0]) : (v4f32)__msa_ld_w((const float*)scale_out_data + q * 4, 0);
-                        v4f32 _bias = bias_data_size == 1 ? (v4f32)__msa_fill_w_f32(bias_data[0]) : (v4f32)__msa_ld_w((const float*)bias_data + q * 4, 0);
+    const int scale_in_data_size = scale_in_data.w;
+    const int bias_data_size = bias_data.w;
+    const int scale_out_data_size = scale_out_data.w;
+    const int size = elemcount * elempack;
 
-                        for (int i = 0; i < size; i++)
-                        {
-                            __builtin_prefetch(intptr + 16);
-                            v4f32 _v = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr, 0));
-                            _v = __msa_fmadd_w(_bias, _v, _scale_in);
-                            _v = activation_ps(_v, activation_type, activation_params);
-                            _v = __msa_fmul_w(_v, _scale_out);
-                            v16i8 v = float2int8(_v);
-                            ptr0[0] = v[0];
-                            ptr1[0] = v[1];
-                            ptr2[0] = v[2];
-                            ptr3[0] = v[3];
+    // NCNN_LOGE("requantize %d %d %d   %d %d", scale_in_data_size, bias_data_size, scale_out_data_size, elemcount, elempack);
 
-                            intptr += 4;
-                            ptr0 += 1;
-                            ptr1 += 1;
-                            ptr2 += 1;
-                            ptr3 += 1;
-                        }
-                    }
-                }
-            }
+    float scale_in = scale_in_data[0];
+#if __mips_msa
+    v4f32 _scale_in0 = (v4f32)__msa_fill_w_f32(scale_in);
+    v4f32 _scale_in1 = _scale_in0;
+    if (scale_in_data_size > 1)
+    {
+        if (elempack == 8)
+        {
+            _scale_in0 = (v4f32)__msa_ld_w((const float*)scale_in_data);
+            _scale_in1 = (v4f32)__msa_ld_w((const float*)scale_in_data + 4);
+        }
+        if (elempack == 4)
+        {
+            _scale_in0 = (v4f32)__msa_ld_w((const float*)scale_in_data);
+            _scale_in1 = _scale_in0;
         }
-
-        return 0;
     }
 #endif // __mips_msa
 
-    if (dims == 1)
+    float scale_out = scale_out_data[0];
+#if __mips_msa
+    v4f32 _scale_out0 = (v4f32)__msa_fill_w_f32(scale_out);
+    v4f32 _scale_out1 = _scale_out0;
+    if (scale_out_data_size > 1)
     {
-        int w = bottom_blob.w;
-
-        top_blob.create(w, (size_t)1u, opt.blob_allocator);
-        if (top_blob.empty())
-            return -100;
-
-        const int* intptr = bottom_blob;
-        signed char* ptr = top_blob;
-
-        if (scale_in_data_size == 1 && scale_out_data_size == 1)
+        if (elempack == 8)
         {
-            const float scale_in = scale_in_data[0];
-            const float scale_out = scale_out_data[0];
-
-            if (bias_data_size == 0)
-            {
-                #pragma omp parallel for num_threads(opt.num_threads)
-                for (int i = 0; i < w; i++)
-                {
-                    float v = intptr[i] * scale_in;
-                    ptr[i] = float2int8(activation_ss(v, activation_type, activation_params) * scale_out);
-                }
-            }
-            else if (bias_data_size == 1)
-            {
-                const float bias = bias_data[0];
-
-                #pragma omp parallel for num_threads(opt.num_threads)
-                for (int i = 0; i < w; i++)
-                {
-                    float v = intptr[i] * scale_in + bias;
-                    ptr[i] = float2int8(activation_ss(v, activation_type, activation_params) * scale_out);
-                }
-            }
-            else
-            {
-                #pragma omp parallel for num_threads(opt.num_threads)
-                for (int i = 0; i < w; i++)
-                {
-                    float v = intptr[i] * scale_in + bias_data[i];
-                    ptr[i] = float2int8(activation_ss(v, activation_type, activation_params) * scale_out);
-                }
-            }
+            _scale_out0 = (v4f32)__msa_ld_w((const float*)scale_out_data);
+            _scale_out1 = (v4f32)__msa_ld_w((const float*)scale_out_data + 4);
         }
-        else if (scale_in_data_size == 1 && scale_out_data_size > 1)
+        if (elempack == 4)
         {
-            const float scale_in = scale_in_data[0];
-
-            if (bias_data_size == 0)
-            {
-                #pragma omp parallel for num_threads(opt.num_threads)
-                for (int i = 0; i < w; i++)
-                {
-                    float v = intptr[i] * scale_in;
-                    ptr[i] = float2int8(activation_ss(v, activation_type, activation_params) * scale_out_data[i]);
-                }
-            }
-            else if (bias_data_size == 1)
-            {
-                const float bias = bias_data[0];
+            _scale_out0 = (v4f32)__msa_ld_w((const float*)scale_out_data);
+            _scale_out1 = _scale_out0;
+        }
+    }
+#endif // __mips_msa
 
-                #pragma omp parallel for num_threads(opt.num_threads)
-                for (int i = 0; i < w; i++)
-                {
-                    float v = intptr[i] * scale_in + bias;
-                    ptr[i] = float2int8(activation_ss(v, activation_type, activation_params) * scale_out_data[i]);
-                }
-            }
-            else
-            {
-                #pragma omp parallel for num_threads(opt.num_threads)
-                for (int i = 0; i < w; i++)
-                {
-                    float v = intptr[i] * scale_in + bias_data[i];
-                    ptr[i] = float2int8(activation_ss(v, activation_type, activation_params) * scale_out_data[i]);
-                }
-            }
+    if (bias_data_size == 0)
+    {
+        int i = 0;
+#if __mips_msa
+        for (; i + 7 < size; i += 8)
+        {
+            __builtin_prefetch(intptr + 32);
+            v4f32 _v0 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr));
+            v4f32 _v1 = (v4f32)__msa_ffint_s_w(__msa_ld_w((intptr + 4)));
+            _v0 = __msa_fmul_w(_v0, _scale_in0);
+            _v1 = __msa_fmul_w(_v1, _scale_in1);
+            _v0 = activation_ps(_v0, activation_type, activation_params);
+            _v1 = activation_ps(_v1, activation_type, activation_params);
+            _v0 = __msa_fmul_w(_v0, _scale_out0);
+            _v1 = __msa_fmul_w(_v1, _scale_out1);
+            *((int64_t*)ptr) = float2int8(_v0, _v1);
+            intptr += 8;
+            ptr += 8;
         }
-        else if (scale_in_data_size > 1 && scale_out_data_size == 1)
+        for (; i + 3 < size; i += 4)
         {
-            const float scale_out = scale_out_data[0];
-
-            if (bias_data_size == 0)
-            {
-                #pragma omp parallel for num_threads(opt.num_threads)
-                for (int i = 0; i < w; i++)
-                {
-                    float v = intptr[i] * scale_in_data[i];
-                    ptr[i] = float2int8(activation_ss(v, activation_type, activation_params) * scale_out);
-                }
-            }
-            else if (bias_data_size == 1)
-            {
-                const float bias = bias_data[0];
-
-                #pragma omp parallel for num_threads(opt.num_threads)
-                for (int i = 0; i < w; i++)
-                {
-                    float v = intptr[i] * scale_in_data[i] + bias;
-                    ptr[i] = float2int8(activation_ss(v, activation_type, activation_params) * scale_out);
-                }
-            }
-            else
-            {
-                #pragma omp parallel for num_threads(opt.num_threads)
-                for (int i = 0; i < w; i++)
-                {
-                    float v = intptr[i] * scale_in_data[i] + bias_data[i];
-                    ptr[i] = float2int8(activation_ss(v, activation_type, activation_params) * scale_out);
-                }
-            }
+            v4f32 _v = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr));
+            _v = __msa_fmul_w(_v, _scale_in0);
+            _v = activation_ps(_v, activation_type, activation_params);
+            _v = __msa_fmul_w(_v, _scale_out0);
+            v16i8 v = float2int8(_v, _v);
+            ptr[0] = v[0];
+            ptr[1] = v[1];
+            ptr[2] = v[2];
+            ptr[3] = v[3];
+            intptr += 4;
+            ptr += 4;
         }
-        else // if (scale_in_data_size > 1 && scale_out_data_size > 1)
+#endif // __mips_msa
+        for (; i < size; i++)
         {
-            if (bias_data_size == 0)
-            {
-                #pragma omp parallel for num_threads(opt.num_threads)
-                for (int i = 0; i < w; i++)
-                {
-                    float v = intptr[i] * scale_in_data[i];
-                    ptr[i] = float2int8(activation_ss(v, activation_type, activation_params) * scale_out_data[i]);
-                }
-            }
-            else if (bias_data_size == 1)
+            float v = *intptr * scale_in;
+            v = activation_ss(v, activation_type, activation_params);
+            *ptr = float2int8(v * scale_out);
+            intptr++;
+            ptr++;
+        }
+    }
+    else
+    {
+        float bias = bias_data[0];
+#if __mips_msa
+        v4f32 _bias0 = (v4f32)__msa_fill_w_f32(bias);
+        v4f32 _bias1 = _bias0;
+        if (bias_data_size > 1)
+        {
+            if (elempack == 8)
             {
-                const float bias = bias_data[0];
-
-                #pragma omp parallel for num_threads(opt.num_threads)
-                for (int i = 0; i < w; i++)
-                {
-                    float v = intptr[i] * scale_in_data[i] + bias;
-                    ptr[i] = float2int8(activation_ss(v, activation_type, activation_params) * scale_out_data[i]);
-                }
+                _bias0 = (v4f32)__msa_ld_w((const float*)bias_data);
+                _bias1 = (v4f32)__msa_ld_w((const float*)bias_data + 4);
             }
-            else
+            if (elempack == 4)
             {
-                #pragma omp parallel for num_threads(opt.num_threads)
-                for (int i = 0; i < w; i++)
-                {
-                    float v = intptr[i] * scale_in_data[i] + bias_data[i];
-                    ptr[i] = float2int8(activation_ss(v, activation_type, activation_params) * scale_out_data[i]);
-                }
+                _bias0 = (v4f32)__msa_ld_w((const float*)bias_data);
+                _bias1 = _bias0;
             }
         }
+#endif // __mips_msa
+
+        int i = 0;
+#if __mips_msa
+        for (; i + 7 < size; i += 8)
+        {
+            __builtin_prefetch(intptr + 32);
+            v4f32 _v0 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr));
+            v4f32 _v1 = (v4f32)__msa_ffint_s_w(__msa_ld_w((intptr + 4)));
+            _v0 = __msa_fmadd_w(_bias0, _v0, _scale_in0);
+            _v1 = __msa_fmadd_w(_bias1, _v1, _scale_in1);
+            _v0 = activation_ps(_v0, activation_type, activation_params);
+            _v1 = activation_ps(_v1, activation_type, activation_params);
+            _v0 = __msa_fmul_w(_v0, _scale_out0);
+            _v1 = __msa_fmul_w(_v1, _scale_out1);
+            *((int64_t*)ptr) = float2int8(_v0, _v1);
+            intptr += 8;
+            ptr += 8;
+        }
+        for (; i + 3 < size; i += 4)
+        {
+            v4f32 _v = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr));
+            _v = __msa_fmadd_w(_bias0, _v, _scale_in0);
+            _v = activation_ps(_v, activation_type, activation_params);
+            _v = __msa_fmul_w(_v, _scale_out0);
+            v16i8 v = float2int8(_v, _v);
+            ptr[0] = v[0];
+            ptr[1] = v[1];
+            ptr[2] = v[2];
+            ptr[3] = v[3];
+            intptr += 4;
+            ptr += 4;
+        }
+#endif // __mips_msa
+        for (; i < size; i++)
+        {
+            float v = *intptr * scale_in + bias;
+            v = activation_ss(v, activation_type, activation_params);
+            *ptr = float2int8(v * scale_out);
+            intptr++;
+            ptr++;
+        }
     }
+}
 
-    if (dims == 2)
-    {
-        int w = bottom_blob.w;
-        int h = bottom_blob.h;
+int Requantize_mips::forward(const Mat& bottom_blob, Mat& top_blob, const Option& opt) const
+{
+    const int dims = bottom_blob.dims;
+    const int w = bottom_blob.w;
+    const int h = bottom_blob.h;
+    const int channels = bottom_blob.c;
+    const int elempack = bottom_blob.elempack;
+    const size_t out_elemsize = elempack * 1u;
 
-        top_blob.create(w, h, (size_t)1u, opt.blob_allocator);
+    if (dims == 1)
+    {
+        top_blob.create(w, out_elemsize, elempack, opt.blob_allocator);
         if (top_blob.empty())
             return -100;
 
-        if (bias_data_size == 0)
+        const int wp = std::max(1, w / opt.num_threads);
+        const int nn_w = (w + wp - 1) / wp;
+
+        #pragma omp parallel for num_threads(opt.num_threads)
+        for (int ii = 0; ii < nn_w; ii++)
         {
-            #pragma omp parallel for num_threads(opt.num_threads)
-            for (int i = 0; i < h; i++)
-            {
-                const int* intptr = bottom_blob.row<const int>(i);
-                signed char* ptr = top_blob.row<signed char>(i);
+            const int i = ii * wp;
 
-                const float scale_in = scale_in_data_size == 1 ? scale_in_data[0] : scale_in_data[i];
-                const float scale_out = scale_out_data_size == 1 ? scale_out_data[0] : scale_out_data[i];
+            const int* intptr = (const int*)bottom_blob + i * elempack;
+            signed char* ptr = (signed char*)top_blob + i * elempack;
 
-                for (int j = 0; j < w; j++)
-                {
-                    float v = intptr[j] * scale_in;
-                    ptr[j] = float2int8(activation_ss(v, activation_type, activation_params) * scale_out);
-                }
-            }
-        }
-        else
-        {
-            #pragma omp parallel for num_threads(opt.num_threads)
-            for (int i = 0; i < h; i++)
-            {
-                const int* intptr = bottom_blob.row<const int>(i);
-                signed char* ptr = top_blob.row<signed char>(i);
+            // assert scale_in_data_size == 1
+            // assert bias_data_size == 0 || bias_data_size == 1
+            // assert scale_out_data_size == 1
 
-                const float scale_in = scale_in_data_size == 1 ? scale_in_data[0] : scale_in_data[i];
-                const float scale_out = scale_out_data_size == 1 ? scale_out_data[0] : scale_out_data[i];
-                const float bias = bias_data_size == 1 ? bias_data[0] : bias_data[i];
+            const int size = std::min(w - i, wp) * elempack;
 
-                for (int j = 0; j < w; j++)
-                {
-                    float v = intptr[j] * scale_in + bias;
-                    ptr[j] = float2int8(activation_ss(v, activation_type, activation_params) * scale_out);
-                }
-            }
+            requantize(intptr, ptr, scale_in_data, bias_data, scale_out_data, activation_type, activation_params, size, 1);
         }
     }
 
-    if (dims == 3)
+    if (dims == 2)
     {
-        int w = bottom_blob.w;
-        int h = bottom_blob.h;
-        int channels = bottom_blob.c;
-        int size = w * h;
-
-        top_blob.create(w, h, channels, (size_t)1u, opt.blob_allocator);
+        top_blob.create(w, h, out_elemsize, elempack, opt.blob_allocator);
         if (top_blob.empty())
             return -100;
 
-        if (bias_data_size == 0)
+        #pragma omp parallel for num_threads(opt.num_threads)
+        for (int i = 0; i < h; i++)
         {
-            #pragma omp parallel for num_threads(opt.num_threads)
-            for (int q = 0; q < channels; q++)
-            {
-                const int* intptr = bottom_blob.channel(q);
-                signed char* ptr = top_blob.channel(q);
+            const int* intptr = bottom_blob.row<const int>(i);
+            signed char* ptr = top_blob.row<signed char>(i);
 
-                const float scale_in = scale_in_data_size == 1 ? scale_in_data[0] : scale_in_data[q];
-                const float scale_out = scale_out_data_size == 1 ? scale_out_data[0] : scale_out_data[q];
+            const Mat scale_in_data_i = scale_in_data_size > 1 ? scale_in_data.range(i * elempack, elempack) : scale_in_data;
+            const Mat bias_data_i = bias_data_size > 1 ? bias_data.range(i * elempack, elempack) : bias_data;
+            const Mat scale_out_data_i = scale_out_data_size > 1 ? scale_out_data.range(i * elempack, elempack) : scale_out_data;
 
-                for (int i = 0; i < size; i++)
-                {
-                    float v = intptr[i] * scale_in;
-                    ptr[i] = float2int8(activation_ss(v, activation_type, activation_params) * scale_out);
-                }
-            }
+            requantize(intptr, ptr, scale_in_data_i, bias_data_i, scale_out_data_i, activation_type, activation_params, w, elempack);
         }
-        else
+    }
+
+    if (dims == 3)
+    {
+        top_blob.create(w, h, channels, out_elemsize, elempack, opt.blob_allocator);
+        if (top_blob.empty())
+            return -100;
+
+        #pragma omp parallel for num_threads(opt.num_threads)
+        for (int q = 0; q < channels; q++)
         {
-            #pragma omp parallel for num_threads(opt.num_threads)
-            for (int q = 0; q < channels; q++)
-            {
-                const int* intptr = bottom_blob.channel(q);
-                signed char* ptr = top_blob.channel(q);
+            const int* intptr = bottom_blob.channel(q);
+            signed char* ptr = top_blob.channel(q);
 
-                const float scale_in = scale_in_data_size == 1 ? scale_in_data[0] : scale_in_data[q];
-                const float scale_out = scale_out_data_size == 1 ? scale_out_data[0] : scale_out_data[q];
-                const float bias = bias_data_size == 1 ? bias_data[0] : bias_data[q];
+            const Mat scale_in_data_q = scale_in_data_size > 1 ? scale_in_data.range(q * elempack, elempack) : scale_in_data;
+            const Mat bias_data_q = bias_data_size > 1 ? bias_data.range(q * elempack, elempack) : bias_data;
+            const Mat scale_out_data_q = scale_out_data_size > 1 ? scale_out_data.range(q * elempack, elempack) : scale_out_data;
 
-                for (int i = 0; i < size; i++)
-                {
-                    float v = intptr[i] * scale_in + bias;
-                    ptr[i] = float2int8(activation_ss(v, activation_type, activation_params) * scale_out);
-                }
-            }
+            requantize(intptr, ptr, scale_in_data_q, bias_data_q, scale_out_data_q, activation_type, activation_params, w * h, elempack);
         }
     }
 
diff --git a/src/layer/mips/requantize_relu_pack4.h b/src/layer/mips/requantize_relu_pack4.h
deleted file mode 100644
index e43797bd8b8..00000000000
--- a/src/layer/mips/requantize_relu_pack4.h
+++ /dev/null
@@ -1,263 +0,0 @@
-// Tencent is pleased to support the open source community by making ncnn available.
-//
-// Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved.
-//
-// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
-// in compliance with the License. You may obtain a copy of the License at
-//
-// https://opensource.org/licenses/BSD-3-Clause
-//
-// Unless required by applicable law or agreed to in writing, software distributed
-// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
-// CONDITIONS OF ANY KIND, either express or implied. See the License for the
-// specific language governing permissions and limitations under the License.
-
-static void requantize_relu_pack4_msa(const Mat& bottom_blob, Mat& top_blob, const Mat& scale_in_data, const Mat& scale_out_data, const Mat& bias_data, const Option& opt)
-{
-    int w = bottom_blob.w;
-    int h = bottom_blob.h;
-    int channels = bottom_blob.c;
-    int size = w * h;
-    int outc = top_blob.c;
-    int out_elempack = top_blob.elempack;
-
-    int scale_in_data_size = scale_in_data.w;
-    int scale_out_data_size = scale_out_data.w;
-    int bias_data_size = bias_data.w;
-
-    // int8(relu(v * scale_in) * scale_out)
-    // int8_relu(v * (scale_in * scale_out))
-
-    // int8(relu(v * scale_in + bias) * scale_out)
-    // int8_relu(v * (scale_in * scale_out) + (bias * scale_out))
-
-    if (out_elempack == 8)
-    {
-        if (bias_data_size == 0)
-        {
-            #pragma omp parallel for num_threads(opt.num_threads)
-            for (int q = 0; q < outc; q++)
-            {
-                const int* intptr0 = bottom_blob.channel(q * 2);
-                const int* intptr1 = bottom_blob.channel(q * 2 + 1);
-                signed char* ptr = top_blob.channel(q);
-
-                v4f32 _scale_in0 = scale_in_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_in_data[0]) : (v4f32)__msa_ld_w((const float*)scale_in_data + q * 8, 0);
-                v4f32 _scale_in1 = scale_in_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_in_data[0]) : (v4f32)__msa_ld_w((const float*)scale_in_data + q * 8 + 4, 0);
-                v4f32 _scale_out0 = scale_out_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_out_data[0]) : (v4f32)__msa_ld_w((const float*)scale_out_data + q * 8, 0);
-                v4f32 _scale_out1 = scale_out_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_out_data[0]) : (v4f32)__msa_ld_w((const float*)scale_out_data + q * 8 + 4, 0);
-
-                v4f32 _scale0 = __msa_fmul_w(_scale_in0, _scale_out0);
-                v4f32 _scale1 = __msa_fmul_w(_scale_in1, _scale_out1);
-
-                int i = 0;
-                for (; i + 3 < size; i += 4)
-                {
-                    __builtin_prefetch(intptr0 + 64);
-                    __builtin_prefetch(intptr1 + 64);
-                    v4f32 _v00 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr0, 0));
-                    v4f32 _v01 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr0 + 4, 0));
-                    v4f32 _v02 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr0 + 8, 0));
-                    v4f32 _v03 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr0 + 12, 0));
-                    v4f32 _v10 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr1, 0));
-                    v4f32 _v11 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr1 + 4, 0));
-                    v4f32 _v12 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr1 + 8, 0));
-                    v4f32 _v13 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr1 + 12, 0));
-                    _v00 = __msa_fmul_w(_v00, _scale0);
-                    _v01 = __msa_fmul_w(_v01, _scale0);
-                    _v02 = __msa_fmul_w(_v02, _scale0);
-                    _v03 = __msa_fmul_w(_v03, _scale0);
-                    _v10 = __msa_fmul_w(_v10, _scale1);
-                    _v11 = __msa_fmul_w(_v11, _scale1);
-                    _v12 = __msa_fmul_w(_v12, _scale1);
-                    _v13 = __msa_fmul_w(_v13, _scale1);
-                    *((int64_t*)ptr) = float2int8relu(_v00, _v10);
-                    *((int64_t*)(ptr + 8)) = float2int8relu(_v01, _v11);
-                    *((int64_t*)(ptr + 16)) = float2int8relu(_v02, _v12);
-                    *((int64_t*)(ptr + 24)) = float2int8relu(_v03, _v13);
-
-                    intptr0 += 16;
-                    intptr1 += 16;
-                    ptr += 32;
-                }
-                for (; i < size; i++)
-                {
-                    __builtin_prefetch(intptr0 + 16);
-                    __builtin_prefetch(intptr1 + 16);
-                    v4f32 _v0 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr0, 0));
-                    v4f32 _v1 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr1, 0));
-                    _v0 = __msa_fmul_w(_v0, _scale0);
-                    _v1 = __msa_fmul_w(_v1, _scale1);
-                    *((int64_t*)ptr) = float2int8relu(_v0, _v1);
-
-                    intptr0 += 4;
-                    intptr1 += 4;
-                    ptr += 8;
-                }
-            }
-        }
-        else
-        {
-            #pragma omp parallel for num_threads(opt.num_threads)
-            for (int q = 0; q < outc; q++)
-            {
-                const int* intptr0 = bottom_blob.channel(q * 2);
-                const int* intptr1 = bottom_blob.channel(q * 2 + 1);
-                signed char* ptr = top_blob.channel(q);
-
-                v4f32 _scale_in0 = scale_in_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_in_data[0]) : (v4f32)__msa_ld_w((const float*)scale_in_data + q * 8, 0);
-                v4f32 _scale_in1 = scale_in_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_in_data[0]) : (v4f32)__msa_ld_w((const float*)scale_in_data + q * 8 + 4, 0);
-                v4f32 _scale_out0 = scale_out_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_out_data[0]) : (v4f32)__msa_ld_w((const float*)scale_out_data + q * 8, 0);
-                v4f32 _scale_out1 = scale_out_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_out_data[0]) : (v4f32)__msa_ld_w((const float*)scale_out_data + q * 8 + 4, 0);
-                v4f32 _bias0 = bias_data_size == 1 ? (v4f32)__msa_fill_w_f32(bias_data[0]) : (v4f32)__msa_ld_w((const float*)bias_data + q * 8, 0);
-                v4f32 _bias1 = bias_data_size == 1 ? (v4f32)__msa_fill_w_f32(bias_data[0]) : (v4f32)__msa_ld_w((const float*)bias_data + q * 8 + 4, 0);
-
-                v4f32 _scale0 = __msa_fmul_w(_scale_in0, _scale_out0);
-                v4f32 _scale1 = __msa_fmul_w(_scale_in1, _scale_out1);
-                _bias0 = __msa_fmul_w(_bias0, _scale_out0);
-                _bias1 = __msa_fmul_w(_bias1, _scale_out1);
-
-                int i = 0;
-                for (; i + 3 < size; i += 4)
-                {
-                    __builtin_prefetch(intptr0 + 64);
-                    __builtin_prefetch(intptr1 + 64);
-                    v4f32 _v00 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr0, 0));
-                    v4f32 _v01 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr0 + 4, 0));
-                    v4f32 _v02 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr0 + 8, 0));
-                    v4f32 _v03 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr0 + 12, 0));
-                    v4f32 _v10 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr1, 0));
-                    v4f32 _v11 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr1 + 4, 0));
-                    v4f32 _v12 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr1 + 8, 0));
-                    v4f32 _v13 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr1 + 12, 0));
-                    _v00 = __msa_fmadd_w(_bias0, _v00, _scale0);
-                    _v01 = __msa_fmadd_w(_bias0, _v01, _scale0);
-                    _v02 = __msa_fmadd_w(_bias0, _v02, _scale0);
-                    _v03 = __msa_fmadd_w(_bias0, _v03, _scale0);
-                    _v10 = __msa_fmadd_w(_bias1, _v10, _scale1);
-                    _v11 = __msa_fmadd_w(_bias1, _v11, _scale1);
-                    _v12 = __msa_fmadd_w(_bias1, _v12, _scale1);
-                    _v13 = __msa_fmadd_w(_bias1, _v13, _scale1);
-                    *((int64_t*)ptr) = float2int8relu(_v00, _v10);
-                    *((int64_t*)(ptr + 8)) = float2int8relu(_v01, _v11);
-                    *((int64_t*)(ptr + 16)) = float2int8relu(_v02, _v12);
-                    *((int64_t*)(ptr + 24)) = float2int8relu(_v03, _v13);
-
-                    intptr0 += 16;
-                    intptr1 += 16;
-                    ptr += 32;
-                }
-                for (; i + 1 < size; i += 2)
-                {
-                    __builtin_prefetch(intptr0 + 32);
-                    __builtin_prefetch(intptr1 + 32);
-                    v4f32 _v00 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr0, 0));
-                    v4f32 _v01 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr0 + 4, 0));
-                    v4f32 _v10 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr1, 0));
-                    v4f32 _v11 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr1 + 4, 0));
-                    _v00 = __msa_fmadd_w(_bias0, _v00, _scale0);
-                    _v01 = __msa_fmadd_w(_bias0, _v01, _scale0);
-                    _v10 = __msa_fmadd_w(_bias1, _v10, _scale1);
-                    _v11 = __msa_fmadd_w(_bias1, _v11, _scale1);
-                    *((int64_t*)ptr) = float2int8relu(_v00, _v10);
-                    *((int64_t*)(ptr + 8)) = float2int8relu(_v01, _v11);
-
-                    intptr0 += 8;
-                    intptr1 += 8;
-                    ptr += 16;
-                }
-                for (; i < size; i++)
-                {
-                    __builtin_prefetch(intptr0 + 16);
-                    __builtin_prefetch(intptr1 + 16);
-                    v4f32 _v0 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr0, 0));
-                    v4f32 _v1 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr1, 0));
-                    _v0 = __msa_fmadd_w(_bias0, _v0, _scale0);
-                    _v1 = __msa_fmadd_w(_bias1, _v1, _scale1);
-                    *((int64_t*)ptr) = float2int8relu(_v0, _v1);
-
-                    intptr0 += 4;
-                    intptr1 += 4;
-                    ptr += 8;
-                }
-            }
-        }
-    }
-    if (out_elempack == 1)
-    {
-        if (bias_data_size == 0)
-        {
-            #pragma omp parallel for num_threads(opt.num_threads)
-            for (int q = 0; q < channels; q++)
-            {
-                const int* intptr = bottom_blob.channel(q);
-                signed char* ptr0 = top_blob.channel(q * 4);
-                signed char* ptr1 = top_blob.channel(q * 4 + 1);
-                signed char* ptr2 = top_blob.channel(q * 4 + 2);
-                signed char* ptr3 = top_blob.channel(q * 4 + 3);
-
-                v4f32 _scale_in = scale_in_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_in_data[0]) : (v4f32)__msa_ld_w((const float*)scale_in_data + q * 4, 0);
-                v4f32 _scale_out = scale_out_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_out_data[0]) : (v4f32)__msa_ld_w((const float*)scale_out_data + q * 4, 0);
-
-                v4f32 _scale = __msa_fmul_w(_scale_in, _scale_out);
-
-                int i = 0;
-                for (; i < size; i++)
-                {
-                    __builtin_prefetch(intptr + 16);
-                    v4f32 _v = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr, 0));
-                    _v = __msa_fmul_w(_v, _scale);
-                    v16i8 v = float2int8relu(_v);
-                    ptr0[0] = v[0];
-                    ptr1[0] = v[1];
-                    ptr2[0] = v[2];
-                    ptr3[0] = v[3];
-
-                    intptr += 4;
-                    ptr0 += 1;
-                    ptr1 += 1;
-                    ptr2 += 1;
-                    ptr3 += 1;
-                }
-            }
-        }
-        else
-        {
-            #pragma omp parallel for num_threads(opt.num_threads)
-            for (int q = 0; q < channels; q++)
-            {
-                const int* intptr = bottom_blob.channel(q);
-                signed char* ptr0 = top_blob.channel(q * 4);
-                signed char* ptr1 = top_blob.channel(q * 4 + 1);
-                signed char* ptr2 = top_blob.channel(q * 4 + 2);
-                signed char* ptr3 = top_blob.channel(q * 4 + 3);
-
-                v4f32 _scale_in = scale_in_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_in_data[0]) : (v4f32)__msa_ld_w((const float*)scale_in_data + q * 4, 0);
-                v4f32 _scale_out = scale_out_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_out_data[0]) : (v4f32)__msa_ld_w((const float*)scale_out_data + q * 4, 0);
-                v4f32 _bias = bias_data_size == 1 ? (v4f32)__msa_fill_w_f32(bias_data[0]) : (v4f32)__msa_ld_w((const float*)bias_data + q * 4, 0);
-
-                v4f32 _scale = __msa_fmul_w(_scale_in, _scale_out);
-                _bias = __msa_fmul_w(_bias, _scale_out);
-
-                int i = 0;
-                for (; i < size; i++)
-                {
-                    __builtin_prefetch(intptr + 16);
-                    v4f32 _v = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr, 0));
-                    _v = __msa_fmadd_w(_bias, _v, _scale);
-                    v16i8 v = float2int8relu(_v);
-                    ptr0[0] = v[0];
-                    ptr1[0] = v[1];
-                    ptr2[0] = v[2];
-                    ptr3[0] = v[3];
-
-                    intptr += 4;
-                    ptr0 += 1;
-                    ptr1 += 1;
-                    ptr2 += 1;
-                    ptr3 += 1;
-                }
-            }
-        }
-    }
-}
diff --git a/src/layer/mips/requantize_relu_pack8.h b/src/layer/mips/requantize_relu_pack8.h
deleted file mode 100644
index 824b668cb73..00000000000
--- a/src/layer/mips/requantize_relu_pack8.h
+++ /dev/null
@@ -1,186 +0,0 @@
-// Tencent is pleased to support the open source community by making ncnn available.
-//
-// Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved.
-//
-// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
-// in compliance with the License. You may obtain a copy of the License at
-//
-// https://opensource.org/licenses/BSD-3-Clause
-//
-// Unless required by applicable law or agreed to in writing, software distributed
-// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
-// CONDITIONS OF ANY KIND, either express or implied. See the License for the
-// specific language governing permissions and limitations under the License.
-
-static void requantize_relu_pack8_msa(const Mat& bottom_blob, Mat& top_blob, const Mat& scale_in_data, const Mat& scale_out_data, const Mat& bias_data, const Option& opt)
-{
-    int w = bottom_blob.w;
-    int h = bottom_blob.h;
-    int channels = bottom_blob.c;
-    int size = w * h;
-
-    int scale_in_data_size = scale_in_data.w;
-    int scale_out_data_size = scale_out_data.w;
-    int bias_data_size = bias_data.w;
-
-    // int8(relu(v * scale_in) * scale_out)
-    // int8_relu(v * (scale_in * scale_out))
-
-    // int8(relu(v * scale_in + bias) * scale_out)
-    // int8_relu(v * (scale_in * scale_out) + (bias * scale_out))
-
-    if (bias_data_size == 0)
-    {
-        #pragma omp parallel for num_threads(opt.num_threads)
-        for (int q = 0; q < channels; q++)
-        {
-            const int* intptr = bottom_blob.channel(q);
-            signed char* ptr = top_blob.channel(q);
-
-            v4f32 _scale_in0 = scale_in_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_in_data[0]) : (v4f32)__msa_ld_w((const float*)scale_in_data + q * 8, 0);
-            v4f32 _scale_in1 = scale_in_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_in_data[0]) : (v4f32)__msa_ld_w((const float*)scale_in_data + q * 8 + 4, 0);
-            v4f32 _scale_out0 = scale_out_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_out_data[0]) : (v4f32)__msa_ld_w((const float*)scale_out_data + q * 8, 0);
-            v4f32 _scale_out1 = scale_out_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_out_data[0]) : (v4f32)__msa_ld_w((const float*)scale_out_data + q * 8 + 4, 0);
-
-            v4f32 _scale0 = __msa_fmul_w(_scale_in0, _scale_out0);
-            v4f32 _scale1 = __msa_fmul_w(_scale_in1, _scale_out1);
-
-            int i = 0;
-            for (; i + 3 < size; i += 4)
-            {
-                __builtin_prefetch(intptr + 128);
-                v4f32 _v0 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr, 0));
-                v4f32 _v1 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr + 4, 0));
-                v4f32 _v2 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr + 8, 0));
-                v4f32 _v3 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr + 12, 0));
-                v4f32 _v4 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr + 16, 0));
-                v4f32 _v5 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr + 20, 0));
-                v4f32 _v6 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr + 24, 0));
-                v4f32 _v7 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr + 28, 0));
-                _v0 = __msa_fmul_w(_v0, _scale0);
-                _v1 = __msa_fmul_w(_v1, _scale1);
-                _v2 = __msa_fmul_w(_v2, _scale0);
-                _v3 = __msa_fmul_w(_v3, _scale1);
-                _v4 = __msa_fmul_w(_v4, _scale0);
-                _v5 = __msa_fmul_w(_v5, _scale1);
-                _v6 = __msa_fmul_w(_v6, _scale0);
-                _v7 = __msa_fmul_w(_v7, _scale1);
-                *((int64_t*)ptr) = float2int8relu(_v0, _v1);
-                *((int64_t*)(ptr + 8)) = float2int8relu(_v2, _v3);
-                *((int64_t*)(ptr + 16)) = float2int8relu(_v4, _v5);
-                *((int64_t*)(ptr + 24)) = float2int8relu(_v6, _v7);
-
-                intptr += 32;
-                ptr += 32;
-            }
-            for (; i + 1 < size; i += 2)
-            {
-                __builtin_prefetch(intptr + 64);
-                v4f32 _v0 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr, 0));
-                v4f32 _v1 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr + 4, 0));
-                v4f32 _v2 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr + 8, 0));
-                v4f32 _v3 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr + 12, 0));
-                _v0 = __msa_fmul_w(_v0, _scale0);
-                _v1 = __msa_fmul_w(_v1, _scale1);
-                _v2 = __msa_fmul_w(_v2, _scale0);
-                _v3 = __msa_fmul_w(_v3, _scale1);
-                *((int64_t*)ptr) = float2int8relu(_v0, _v1);
-                *((int64_t*)(ptr + 8)) = float2int8relu(_v2, _v3);
-
-                intptr += 16;
-                ptr += 16;
-            }
-            for (; i < size; i++)
-            {
-                __builtin_prefetch(intptr + 32);
-                v4f32 _v0 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr, 0));
-                v4f32 _v1 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr + 4, 0));
-                _v0 = __msa_fmul_w(_v0, _scale0);
-                _v1 = __msa_fmul_w(_v1, _scale1);
-                *((int64_t*)ptr) = float2int8relu(_v0, _v1);
-
-                intptr += 8;
-                ptr += 8;
-            }
-        }
-    }
-    else
-    {
-        #pragma omp parallel for num_threads(opt.num_threads)
-        for (int q = 0; q < channels; q++)
-        {
-            const int* intptr = bottom_blob.channel(q);
-            signed char* ptr = top_blob.channel(q);
-
-            v4f32 _scale_in0 = scale_in_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_in_data[0]) : (v4f32)__msa_ld_w((const float*)scale_in_data + q * 8, 0);
-            v4f32 _scale_in1 = scale_in_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_in_data[0]) : (v4f32)__msa_ld_w((const float*)scale_in_data + q * 8 + 4, 0);
-            v4f32 _scale_out0 = scale_out_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_out_data[0]) : (v4f32)__msa_ld_w((const float*)scale_out_data + q * 8, 0);
-            v4f32 _scale_out1 = scale_out_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_out_data[0]) : (v4f32)__msa_ld_w((const float*)scale_out_data + q * 8 + 4, 0);
-            v4f32 _bias0 = bias_data_size == 1 ? (v4f32)__msa_fill_w_f32(bias_data[0]) : (v4f32)__msa_ld_w((const float*)bias_data + q * 8, 0);
-            v4f32 _bias1 = bias_data_size == 1 ? (v4f32)__msa_fill_w_f32(bias_data[0]) : (v4f32)__msa_ld_w((const float*)bias_data + q * 8 + 4, 0);
-
-            v4f32 _scale0 = __msa_fmul_w(_scale_in0, _scale_out0);
-            v4f32 _scale1 = __msa_fmul_w(_scale_in1, _scale_out1);
-            _bias0 = __msa_fmul_w(_bias0, _scale_out0);
-            _bias1 = __msa_fmul_w(_bias1, _scale_out1);
-
-            int i = 0;
-            for (; i + 3 < size; i += 4)
-            {
-                __builtin_prefetch(intptr + 128);
-                v4f32 _v0 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr, 0));
-                v4f32 _v1 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr + 4, 0));
-                v4f32 _v2 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr + 8, 0));
-                v4f32 _v3 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr + 12, 0));
-                v4f32 _v4 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr + 16, 0));
-                v4f32 _v5 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr + 20, 0));
-                v4f32 _v6 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr + 24, 0));
-                v4f32 _v7 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr + 28, 0));
-                _v0 = __msa_fmadd_w(_bias0, _v0, _scale0);
-                _v1 = __msa_fmadd_w(_bias1, _v1, _scale1);
-                _v2 = __msa_fmadd_w(_bias0, _v2, _scale0);
-                _v3 = __msa_fmadd_w(_bias1, _v3, _scale1);
-                _v4 = __msa_fmadd_w(_bias0, _v4, _scale0);
-                _v5 = __msa_fmadd_w(_bias1, _v5, _scale1);
-                _v6 = __msa_fmadd_w(_bias0, _v6, _scale0);
-                _v7 = __msa_fmadd_w(_bias1, _v7, _scale1);
-                *((int64_t*)ptr) = float2int8relu(_v0, _v1);
-                *((int64_t*)(ptr + 8)) = float2int8relu(_v2, _v3);
-                *((int64_t*)(ptr + 16)) = float2int8relu(_v4, _v5);
-                *((int64_t*)(ptr + 24)) = float2int8relu(_v6, _v7);
-
-                intptr += 32;
-                ptr += 32;
-            }
-            for (; i + 1 < size; i += 2)
-            {
-                __builtin_prefetch(intptr + 64);
-                v4f32 _v0 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr, 0));
-                v4f32 _v1 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr + 4, 0));
-                v4f32 _v2 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr + 8, 0));
-                v4f32 _v3 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr + 12, 0));
-                _v0 = __msa_fmadd_w(_bias0, _v0, _scale0);
-                _v1 = __msa_fmadd_w(_bias1, _v1, _scale1);
-                _v2 = __msa_fmadd_w(_bias0, _v2, _scale0);
-                _v3 = __msa_fmadd_w(_bias1, _v3, _scale1);
-                *((int64_t*)ptr) = float2int8relu(_v0, _v1);
-                *((int64_t*)(ptr + 8)) = float2int8relu(_v2, _v3);
-
-                intptr += 16;
-                ptr += 16;
-            }
-            for (; i < size; i++)
-            {
-                __builtin_prefetch(intptr + 32);
-                v4f32 _v0 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr, 0));
-                v4f32 _v1 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr + 4, 0));
-                _v0 = __msa_fmadd_w(_bias0, _v0, _scale0);
-                _v1 = __msa_fmadd_w(_bias1, _v1, _scale1);
-                *((int64_t*)ptr) = float2int8relu(_v0, _v1);
-
-                intptr += 8;
-                ptr += 8;
-            }
-        }
-    }
-}