-
Notifications
You must be signed in to change notification settings - Fork 134
/
Copy pathschedulingpb.proto
197 lines (166 loc) · 5.99 KB
/
schedulingpb.proto
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
syntax = "proto3";
package schedulingpb;
import "pdpb.proto";
import "gogoproto/gogo.proto";
import "rustproto.proto";
import "metapb.proto";
option (gogoproto.sizer_all) = true;
option (gogoproto.marshaler_all) = true;
option (gogoproto.unmarshaler_all) = true;
option (rustproto.lite_runtime_all) = true;
option java_package = "org.tikv.kvproto";
service Scheduling {
rpc StoreHeartbeat(StoreHeartbeatRequest) returns (StoreHeartbeatResponse) {}
rpc RegionHeartbeat(stream RegionHeartbeatRequest) returns (stream RegionHeartbeatResponse) {}
rpc SplitRegions(SplitRegionsRequest) returns (SplitRegionsResponse) {}
rpc ScatterRegions(ScatterRegionsRequest) returns (ScatterRegionsResponse) {}
rpc GetOperator(GetOperatorRequest) returns (GetOperatorResponse) {}
rpc AskBatchSplit(AskBatchSplitRequest) returns (AskBatchSplitResponse) {}
}
message RequestHeader {
// cluster_id is the ID of the cluster which be sent to.
uint64 cluster_id = 1;
// sender_id is the ID of the sender server.
uint64 sender_id = 2;
}
message ResponseHeader {
// cluster_id is the ID of the cluster which sent the response.
uint64 cluster_id = 1;
Error error = 2;
}
enum ErrorType {
OK = 0;
UNKNOWN = 1;
NOT_BOOTSTRAPPED = 2;
ALREADY_BOOTSTRAPPED = 3;
INVALID_VALUE = 4;
CLUSTER_MISMATCHED = 5;
}
message Error {
ErrorType type = 1;
string message = 2;
}
message Participant {
// name is the unique name of the scheduling participant.
string name = 1;
// id is the unique id of the scheduling participant.
uint64 id = 2;
// listen_urls is the serivce endpoint list in the url format.
// listen_urls[0] is primary service endpoint.
repeated string listen_urls = 3;
}
message StoreHeartbeatRequest {
RequestHeader header = 1;
pdpb.StoreStats stats = 2;
}
message StoreHeartbeatResponse {
ResponseHeader header = 1;
string cluster_version = 2;
}
message RegionHeartbeatRequest {
RequestHeader header = 1;
metapb.Region region = 2;
// Leader Peer sending the heartbeat.
metapb.Peer leader = 3;
// Term is the term of raft group.
uint64 term = 4;
// Leader considers that these peers are down.
repeated pdpb.PeerStats down_peers = 5;
// Pending peers are the peers that the leader can't consider as
// working followers.
repeated metapb.Peer pending_peers = 6;
// Bytes read/written during this period.
uint64 bytes_written = 7;
uint64 bytes_read = 8;
// Keys read/written during this period.
uint64 keys_written = 9;
uint64 keys_read = 10;
// Approximate region size.
uint64 approximate_size = 11;
// Approximate number of keys.
uint64 approximate_keys = 12;
// QueryStats reported write query stats, and there are read query stats in store heartbeat
pdpb.QueryStats query_stats = 13;
// Actually reported time interval
pdpb.TimeInterval interval = 14;
}
message RegionHeartbeatResponse {
ResponseHeader header = 1;
// ID of the region
uint64 region_id = 2;
metapb.RegionEpoch region_epoch = 3;
// Leader of the region at the moment of the corresponding request was made.
metapb.Peer target_peer = 4;
// Notice, Pd only allows handling reported epoch >= current pd's.
// Leader peer reports region status with RegionHeartbeatRequest
// to pd regularly, pd will determine whether this region
// should do ChangePeer or not.
// E,g, max peer number is 3, region A, first only peer 1 in A.
// 1. Pd region state -> Peers (1), ConfVer (1).
// 2. Leader peer 1 reports region state to pd, pd finds the
// peer number is < 3, so first changes its current region
// state -> Peers (1, 2), ConfVer (1), and returns ChangePeer Adding 2.
// 3. Leader does ChangePeer, then reports Peers (1, 2), ConfVer (2),
// pd updates its state -> Peers (1, 2), ConfVer (2).
// 4. Leader may report old Peers (1), ConfVer (1) to pd before ConfChange
// finished, pd stills responses ChangePeer Adding 2, of course, we must
// guarantee the second ChangePeer can't be applied in TiKV.
pdpb.ChangePeer change_peer = 5;
// Pd can return transfer_leader to let TiKV does leader transfer itself.
pdpb.TransferLeader transfer_leader = 6;
pdpb.Merge merge = 7;
// PD sends split_region to let TiKV split a region into two regions.
pdpb.SplitRegion split_region = 8;
// Multiple change peer operations atomically.
// Note: PD can use both ChangePeer and ChangePeerV2 at the same time
// (not in the same RegionHeartbeatResponse).
// Now, PD use ChangePeerV2 in following scenarios:
// 1. replacing peers
// 2. demoting voter directly
pdpb.ChangePeerV2 change_peer_v2 = 9;
pdpb.BatchSwitchWitness switch_witnesses = 10;
}
message ScatterRegionsRequest {
RequestHeader header = 1;
// If group is defined, the regions with the same group would be scattered as a whole group.
// If not defined, the regions would be scattered in a cluster level.
string group = 2;
// If regions_id is defined, the region_id would be ignored.
repeated uint64 regions_id = 3;
uint64 retry_limit = 4;
bool skip_store_limit = 5;
}
message ScatterRegionsResponse {
ResponseHeader header = 1;
uint64 finished_percentage = 2;
}
message SplitRegionsRequest {
RequestHeader header = 1;
repeated bytes split_keys = 2;
uint64 retry_limit = 3;
}
message SplitRegionsResponse {
ResponseHeader header = 1;
uint64 finished_percentage = 2;
repeated uint64 regions_id = 3;
}
message GetOperatorRequest {
RequestHeader header = 1;
uint64 region_id = 2;
}
message GetOperatorResponse {
ResponseHeader header = 1;
uint64 region_id = 2;
bytes desc = 3;
pdpb.OperatorStatus status = 4;
bytes kind = 5;
}
message AskBatchSplitRequest {
RequestHeader header = 1;
metapb.Region region = 2;
uint32 split_count = 3;
}
message AskBatchSplitResponse {
ResponseHeader header = 1;
repeated pdpb.SplitID ids = 2;
}