Skip to content

Commit dc0bce9

Browse files
qkeras-robotcopybara-github
authored andcommitted
No public description
PiperOrigin-RevId: 601178210 Change-Id: Ic49845aa14378fb1dcac5f06476f1d61a04c498f
1 parent c5051b5 commit dc0bce9

File tree

1 file changed

+63
-0
lines changed

1 file changed

+63
-0
lines changed

qkeras/qmodel.proto

+63
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,63 @@
1+
// Copyright 2019 Google LLC
2+
//
3+
//
4+
// Licensed under the Apache License, Version 2.0 (the "License");
5+
// you may not use this file except in compliance with the License.
6+
// You may obtain a copy of the License at
7+
//
8+
// http://www.apache.org/licenses/LICENSE-2.0
9+
//
10+
// Unless required by applicable law or agreed to in writing, software
11+
// distributed under the License is distributed on an "AS IS" BASIS,
12+
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13+
// See the License for the specific language governing permissions and
14+
// limitations under the License.
15+
// ==============================================================================
16+
syntax = "proto2";
17+
18+
package qkeras;
19+
20+
import "google/protobuf/any.proto";
21+
22+
// Protobuf to represent a quantized machine learning model.
23+
message QModel {
24+
// Layers of a quantized model.
25+
repeated QLayer qlayers = 1;
26+
}
27+
28+
// Protobuf to represent an individual layer that supports quantization.
29+
//
30+
// TODO(akshayap): Add platform agnostic way of saving weights, ideally
31+
// something that can mimic numpy arrays.
32+
message QLayer {
33+
// Layer name.
34+
optional string name = 1;
35+
// Input shape for the layer.
36+
repeated int32 input_shape = 2 [packed = true];
37+
// Output shape for the layer.
38+
repeated int32 output_shape = 3 [packed = true];
39+
// Quantization configuration for this layer.
40+
optional Quantization quantization = 4;
41+
// Harware parameters associated with this layer.
42+
optional HardwareParams hw_params = 5;
43+
// Model specific custom details.
44+
optional google.protobuf.Any details = 6;
45+
}
46+
47+
// Qantization configurations for a model layer.
48+
message Quantization {
49+
// Number of bits to perform quantization.
50+
optional int32 bits = 1;
51+
// Number of bits to the left of the decimal point.
52+
optional int32 integer = 2;
53+
// The minimum allowed power of two exponent
54+
optional int32 min_po2 = 3;
55+
// The maximum allowed power of two exponent
56+
optional int32 max_po2 = 4;
57+
}
58+
59+
// Parameters for hardware synthesis of machine learning models.
60+
message HardwareParams {
61+
// MAC bitwidth.
62+
optional int32 mac_bitwidth = 1;
63+
}

0 commit comments

Comments
 (0)