-
Notifications
You must be signed in to change notification settings - Fork 9
/
Copy pathcommon.proto
500 lines (462 loc) · 19.8 KB
/
common.proto
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
/*
* Copyright 2022 The Dragonfly Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
syntax = "proto3";
package common.v2;
import "validate/validate.proto";
import "google/protobuf/duration.proto";
import "google/protobuf/timestamp.proto";
option go_package = "d7y.io/api/v2/pkg/apis/common/v2;common";
// SizeScope represents size scope of task.
enum SizeScope {
// NORMAL task has pieces is more than one piece.
NORMAL = 0;
// SMALL task's content length is more than 128 byte and has only one piece.
SMALL = 1;
// TINY task's content length is less than 128 byte.
TINY = 2;
// EMPTY task's content length is equal to zero.
EMPTY = 3;
// UNKNOW task has invalid size scope.
UNKNOW = 4;
}
// TaskType represents type of task.
enum TaskType {
// STANDARD is standard type of task, it can download from source, remote peer and
// local peer(local cache). When the standard task is never downloaded in the
// P2P cluster, dfdaemon will download the task from the source. When the standard
// task is downloaded in the P2P cluster, dfdaemon will download the task from
// the remote peer or local peer(local cache).
STANDARD = 0;
// PERSISTENT is persistent type of task, it can import file and export file in P2P cluster.
// When the persistent task is imported into the P2P cluster, dfdaemon will store
// the task in the peer's disk and copy multiple replicas to remote peers to
// prevent data loss.
PERSISTENT = 1;
// PERSIST_CACHE is persistent cache type of task, it can import file and export file in P2P cluster.
// When the persistent cache task is imported into the P2P cluster, dfdaemon will store
// the task in the peer's disk and copy multiple replicas to remote peers to prevent data loss.
// When the expiration time is reached, task will be deleted in the P2P cluster.
PERSISTENT_CACHE = 2;
}
// TrafficType represents type of traffic.
enum TrafficType {
// BACK_TO_SOURCE is to download traffic from the source.
BACK_TO_SOURCE = 0;
// REMOTE_PEER is to download traffic from the remote peer.
REMOTE_PEER = 1;
// LOCAL_PEER is to download traffic from the local peer.
LOCAL_PEER = 2;
}
// Priority represents priority of application.
enum Priority {
// LEVEL0 has no special meaning for scheduler.
LEVEL0 = 0;
// LEVEL1 represents the download task is forbidden,
// and an error code is returned during the registration.
LEVEL1 = 1;
// LEVEL2 represents when the task is downloaded for the first time,
// allow peers to download from the other peers,
// but not back-to-source. When the task is not downloaded for
// the first time, it is scheduled normally.
LEVEL2 = 2;
// LEVEL3 represents when the task is downloaded for the first time,
// the normal peer is first to download back-to-source.
// When the task is not downloaded for the first time, it is scheduled normally.
LEVEL3 = 3;
// LEVEL4 represents when the task is downloaded for the first time,
// the weak peer is first triggered to back-to-source.
// When the task is not downloaded for the first time, it is scheduled normally.
LEVEL4 = 4;
// LEVEL5 represents when the task is downloaded for the first time,
// the strong peer is first triggered to back-to-source.
// When the task is not downloaded for the first time, it is scheduled normally.
LEVEL5 = 5;
// LEVEL6 represents when the task is downloaded for the first time,
// the super peer is first triggered to back-to-source.
// When the task is not downloaded for the first time, it is scheduled normally.
LEVEL6 = 6;
}
// Peer metadata.
message Peer {
// Peer id.
string id = 1 [(validate.rules).string.min_len = 1];
// Range is url range of request.
optional Range range = 2;
// Peer priority.
Priority priority = 3 [(validate.rules).enum.defined_only = true];
// Pieces of peer.
repeated Piece pieces = 4 [(validate.rules).repeated = {min_items: 1, ignore_empty: true}];
// Peer downloads costs time.
google.protobuf.Duration cost = 5 [(validate.rules).duration.required = true];
// Peer state.
string state = 6 [(validate.rules).string.min_len = 1];
// Task info.
Task task = 7 [(validate.rules).message.required = true];
// Host info.
Host host = 8 [(validate.rules).message.required = true];
// NeedBackToSource needs downloaded from source.
bool need_back_to_source = 9;
// Peer create time.
google.protobuf.Timestamp created_at = 10 [(validate.rules).timestamp.required = true];
// Peer update time.
google.protobuf.Timestamp updated_at = 11 [(validate.rules).timestamp.required = true];
}
// PersistentCachePeer metadata.
message PersistentCachePeer {
// Peer id.
string id = 1 [(validate.rules).string.min_len = 1];
// Persistent represents whether the persistent cache peer is persistent.
// If the persistent cache peer is persistent, the persistent cache peer will
// not be deleted when dfdaemon runs garbage collection. It only be deleted
// when the task is deleted by the user.
bool persistent = 2;
// Peer downloads costs time.
google.protobuf.Duration cost = 3 [(validate.rules).duration.required = true];
// Peer state.
string state = 4 [(validate.rules).string.min_len = 1];
// Task info.
PersistentCacheTask task = 5 [(validate.rules).message.required = true];
// Host info.
Host host = 6 [(validate.rules).message.required = true];
// Peer create time.
google.protobuf.Timestamp created_at = 7 [(validate.rules).timestamp.required = true];
// Peer update time.
google.protobuf.Timestamp updated_at = 8 [(validate.rules).timestamp.required = true];
}
// Task metadata.
message Task {
// Task id.
string id = 1 [(validate.rules).string.min_len = 1];
// Task type.
TaskType type = 2 [(validate.rules).enum.defined_only = true];
// Download url.
string url = 3 [(validate.rules).string.uri = true];
// Digest of the task digest, for example blake3:xxx or sha256:yyy.
optional string digest = 4 [(validate.rules).string = {pattern: "^(md5:[a-fA-F0-9]{32}|sha1:[a-fA-F0-9]{40}|sha256:[a-fA-F0-9]{64}|sha512:[a-fA-F0-9]{128}|blake3:[a-fA-F0-9]{64}|crc32:[a-fA-F0-9]+)$", ignore_empty: true}];
// URL tag identifies different task for same url.
optional string tag = 5;
// Application of task.
optional string application = 6;
// Filtered query params to generate the task id.
// When filter is ["Signature", "Expires", "ns"], for example:
// http://example.com/xyz?Expires=e1&Signature=s1&ns=docker.io and http://example.com/xyz?Expires=e2&Signature=s2&ns=docker.io
// will generate the same task id.
// Default value includes the filtered query params of s3, gcs, oss, obs, cos.
repeated string filtered_query_params = 7;
// Task request headers.
map<string, string> request_header = 8;
// Task piece length.
uint64 piece_length = 9 [(validate.rules).uint64.gte = 1];
// Task content length.
uint64 content_length = 10;
// Task piece count.
uint32 piece_count = 11;
// Task size scope.
SizeScope size_scope = 12;
// Pieces of task.
repeated Piece pieces = 13 [(validate.rules).repeated = {min_items: 1, ignore_empty: true}];
// Task state.
string state = 14 [(validate.rules).string.min_len = 1];
// Task peer count.
uint32 peer_count = 15;
// Task contains available peer.
bool has_available_peer = 16;
// Task create time.
google.protobuf.Timestamp created_at = 17 [(validate.rules).timestamp.required = true];
// Task update time.
google.protobuf.Timestamp updated_at = 18 [(validate.rules).timestamp.required = true];
}
// PersistentCacheTask metadata.
message PersistentCacheTask {
// Task id.
string id = 1 [(validate.rules).string.min_len = 1];
// Replica count of the persistent cache task. The persistent cache task will
// not be deleted when dfdamon runs garbage collection. It only be deleted
// when the task is deleted by the user.
uint64 persistent_replica_count = 2 [(validate.rules).uint64.gte = 1];
// Current replica count of the persistent cache task. The persistent cache task
// will not be deleted when dfdaemon runs garbage collection. It only be deleted
// when the task is deleted by the user.
uint64 current_persistent_replica_count = 3;
// Current replica count of the cache task. If cache task is not persistent,
// the persistent cache task will be deleted when dfdaemon runs garbage collection.
uint64 current_replica_count = 4;
// Tag is used to distinguish different persistent cache tasks.
optional string tag = 5;
// Application of task.
optional string application = 6;
// Task piece length.
uint64 piece_length = 7 [(validate.rules).uint64.gte = 1];
// Task content length.
uint64 content_length = 8;
// Task piece count.
uint32 piece_count = 9;
// Task state.
string state = 10 [(validate.rules).string.min_len = 1];
// Task create time.
google.protobuf.Timestamp created_at = 11 [(validate.rules).timestamp.required = true];
// Task update time.
google.protobuf.Timestamp updated_at = 12 [(validate.rules).timestamp.required = true];
}
// Host metadata.
message Host {
// Host id.
string id = 1 [(validate.rules).string.min_len = 1];
// Host type.
uint32 type = 2 [(validate.rules).uint32.lte = 3];
// Hostname.
string hostname = 3 [(validate.rules).string.min_len = 1];
// Host ip.
string ip = 4 [(validate.rules).string.ip = true];
// Port of grpc service.
int32 port = 5 [(validate.rules).int32 = {gte: 1024, lt: 65535}];
// Port of download server.
int32 download_port = 6 [(validate.rules).int32 = {gte: 1024, lt: 65535}];
// Host OS.
string os = 7;
// Host platform.
string platform = 8;
// Host platform family.
string platform_family = 9;
// Host platform version.
string platform_version = 10;
// Host kernel version.
string kernel_version = 11;
// CPU Stat.
optional CPU cpu = 12;
// Memory Stat.
optional Memory memory = 13;
// Network Stat.
optional Network network = 14;
// Disk Stat.
optional Disk disk = 15;
// Build information.
optional Build build = 16;
// ID of the cluster to which the host belongs.
uint64 scheduler_cluster_id = 17;
// Disable shared data for other peers.
bool disable_shared = 18;
}
// CPU Stat.
message CPU {
// Number of logical cores in the system.
uint32 logical_count = 1;
// Number of physical cores in the system
uint32 physical_count = 2;
// Percent calculates the percentage of cpu used.
double percent = 3 [(validate.rules).double.gte = 0];
// Calculates the percentage of cpu used by process.
double process_percent = 4 [(validate.rules).double.gte = 0];
// CPUTimes contains the amounts of time the CPU has spent performing different kinds of work.
optional CPUTimes times = 5;
}
// CPUTimes contains the amounts of time the CPU has spent performing different
// kinds of work. Time units are in seconds.
message CPUTimes {
// CPU time of user.
double user = 1 [(validate.rules).double.gte = 0];
// CPU time of system.
double system = 2 [(validate.rules).double.gte = 0];
// CPU time of idle.
double idle = 3 [(validate.rules).double.gte = 0];
// CPU time of nice.
double nice = 4 [(validate.rules).double.gte = 0];
// CPU time of iowait.
double iowait = 5 [(validate.rules).double.gte = 0];
// CPU time of irq.
double irq = 6 [(validate.rules).double.gte = 0];
// CPU time of softirq.
double softirq = 7 [(validate.rules).double.gte = 0];
// CPU time of steal.
double steal = 8 [(validate.rules).double.gte = 0];
// CPU time of guest.
double guest = 9 [(validate.rules).double.gte = 0];
// CPU time of guest nice.
double guest_nice = 10 [(validate.rules).double.gte = 0];
}
// Memory Stat.
message Memory {
// Total amount of RAM on this system.
uint64 total = 1;
// RAM available for programs to allocate.
uint64 available = 2;
// RAM used by programs.
uint64 used = 3;
// Percentage of RAM used by programs.
double used_percent = 4 [(validate.rules).double = {gte: 0, lte: 100}];
// Calculates the percentage of memory used by process.
double process_used_percent = 5 [(validate.rules).double = {gte: 0, lte: 100}];
// This is the kernel's notion of free memory.
uint64 free = 6;
}
// Network Stat.
message Network {
// Return count of tcp connections opened and status is ESTABLISHED.
uint32 tcp_connection_count = 1;
// Return count of upload tcp connections opened and status is ESTABLISHED.
uint32 upload_tcp_connection_count = 2;
// Location path(area|country|province|city|...).
optional string location = 3;
// IDC where the peer host is located
optional string idc = 4;
// Download rate is received bytes per second.
uint64 download_rate = 5;
// Download rate is the limit of received bytes per second.
uint64 download_rate_limit = 6;
// Upload rate is transmitted bytes per second.
uint64 upload_rate = 7;
// Upload rate is the limit of transmitted bytes per second.
uint64 upload_rate_limit = 8;
}
// Disk Stat.
message Disk {
// Total amount of disk on the data path of dragonfly.
uint64 total = 1;
// Free amount of disk on the data path of dragonfly.
uint64 free = 2;
// Used amount of disk on the data path of dragonfly.
uint64 used = 3;
// Used percent of disk on the data path of dragonfly directory.
double used_percent = 4 [(validate.rules).double = {gte: 0, lte: 100}];
// Total amount of indoes on the data path of dragonfly directory.
uint64 inodes_total = 5;
// Used amount of indoes on the data path of dragonfly directory.
uint64 inodes_used = 6;
// Free amount of indoes on the data path of dragonfly directory.
uint64 inodes_free = 7;
// Used percent of indoes on the data path of dragonfly directory.
double inodes_used_percent = 8 [(validate.rules).double = {gte: 0, lte: 100}];
// Disk read bandwidth, in bytes per second.
uint64 read_bandwidth = 9;
// Disk write bandwidth, in bytes per second.
uint64 write_bandwidth = 10;
}
// Build information.
message Build {
// Git version.
string git_version = 1;
// Git commit.
optional string git_commit = 2;
// Golang version.
optional string go_version = 3;
// Rust version.
optional string rust_version = 4;
// Build platform.
optional string platform = 5;
}
// Download information.
message Download {
// Download url.
string url = 1 [(validate.rules).string.uri = true];
// Digest of the task digest, for example blake3:xxx or sha256:yyy.
optional string digest = 2 [(validate.rules).string = {pattern: "^(md5:[a-fA-F0-9]{32}|sha1:[a-fA-F0-9]{40}|sha256:[a-fA-F0-9]{64}|sha512:[a-fA-F0-9]{128}|blake3:[a-fA-F0-9]{64}|crc32:[a-fA-F0-9]+)$", ignore_empty: true}];
// Range is url range of request. If protocol is http, range
// will set in request header. If protocol is others, range
// will set in range field.
optional Range range = 3;
// Task type.
TaskType type = 4 [(validate.rules).enum.defined_only = true];
// URL tag identifies different task for same url.
optional string tag = 5;
// Application of task.
optional string application = 6;
// Peer priority.
Priority priority = 7 [(validate.rules).enum.defined_only = true];
// Filtered query params to generate the task id.
// When filter is ["Signature", "Expires", "ns"], for example:
// http://example.com/xyz?Expires=e1&Signature=s1&ns=docker.io and http://example.com/xyz?Expires=e2&Signature=s2&ns=docker.io
// will generate the same task id.
// Default value includes the filtered query params of s3, gcs, oss, obs, cos.
repeated string filtered_query_params = 8;
// Task request headers.
map<string, string> request_header = 9;
// Task piece length.
optional uint64 piece_length = 10 [(validate.rules).uint64 = {gte: 1, ignore_empty: true}];
// File path to be exported.
optional string output_path = 11 [(validate.rules).string = {min_len: 1, ignore_empty: true}];
// Download timeout.
optional google.protobuf.Duration timeout = 12;
// Dfdaemon cannot download the task from the source if disable_back_to_source is true.
bool disable_back_to_source = 13;
// Scheduler needs to schedule the task downloads from the source if need_back_to_source is true.
bool need_back_to_source = 14;
// certificate_chain is the client certs with DER format for the backend client to download back-to-source.
repeated bytes certificate_chain = 15;
// Prefetch pre-downloads all pieces of the task when the download task request is a range request.
bool prefetch = 16;
// Object storage protocol information.
optional ObjectStorage object_storage = 17;
// HDFS protocol information.
optional HDFS hdfs = 18;
// is_prefetch is the flag to indicate whether the request is a prefetch request.
bool is_prefetch = 19;
// need_piece_content is the flag to indicate whether the response needs to return piece content.
bool need_piece_content = 20;
// load_to_cache indicates whether the content downloaded will be stored in the cache storage.
// Cache storage is designed to store downloaded piece content from preheat tasks,
// allowing other peers to access the content from memory instead of disk.
bool load_to_cache = 21;
}
// Object Storage related information.
message ObjectStorage {
// Region is the region of the object storage service.
optional string region = 1 [(validate.rules).string = {min_len: 1, ignore_empty: true}];
// Endpoint is the endpoint of the object storage service.
optional string endpoint = 2 [(validate.rules).string = {min_len: 1, ignore_empty: true}];
// Access key that used to access the object storage service.
optional string access_key_id = 3 [(validate.rules).string.min_len = 1];
// Access secret that used to access the object storage service.
optional string access_key_secret = 4 [(validate.rules).string.min_len = 1];
// Session token that used to access s3 storage service.
optional string session_token = 5 [(validate.rules).string = {min_len: 1, ignore_empty: true}];
// Local path to credential file for Google Cloud Storage service OAuth2 authentication.
optional string credential_path = 6 [(validate.rules).string = {min_len: 1, ignore_empty: true}];
// Predefined ACL that used for the Google Cloud Storage service.
optional string predefined_acl = 7 [(validate.rules).string = {min_len: 1, ignore_empty: true}];
}
// HDFS related information.
message HDFS {
// Delegation token for Web HDFS operator.
optional string delegation_token = 1 [(validate.rules).string = {min_len: 1, ignore_empty: true}];
}
// Range represents download range.
message Range {
// Start of range.
uint64 start = 1;
// Length of range.
uint64 length = 2;
}
// Piece represents information of piece.
message Piece {
// Piece number.
uint32 number = 1;
// Parent peer id.
optional string parent_id = 2 [(validate.rules).string = {min_len: 1, ignore_empty: true}];
// Piece offset.
uint64 offset = 3;
// Piece length.
uint64 length = 4;
// Digest of the piece data, for example blake3:xxx or sha256:yyy.
string digest = 5 [(validate.rules).string = {pattern: "^(md5:[a-fA-F0-9]{32}|sha1:[a-fA-F0-9]{40}|sha256:[a-fA-F0-9]{64}|sha512:[a-fA-F0-9]{128}|blake3:[a-fA-F0-9]{64}|crc32:[a-fA-F0-9]+)$", ignore_empty: true}];
// Piece content.
optional bytes content = 6 [(validate.rules).bytes = {min_len: 1, ignore_empty: true}];
// Traffic type.
optional TrafficType traffic_type = 7;
// Downloading piece costs time.
google.protobuf.Duration cost = 8 [(validate.rules).duration.required = true];
// Piece create time.
google.protobuf.Timestamp created_at = 9 [(validate.rules).timestamp.required = true];
}