-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathserver.py
272 lines (235 loc) · 10.1 KB
/
server.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
'''
License: Apache
Original Authors: Flower
https://github.com/adap/flower
Modified by: Trevor Tomlin
'''
import timeit
from typing import Dict, Optional, Tuple, Union, List
import flwr as fl
import flwr as fl
from torch.utils.data import DataLoader
from typing import Tuple, Union, List
from flwr.common import GetParametersIns, parameters_to_ndarrays
from cnn import CNN
from flwr.common.typing import Parameters
from traintest import test
from config import *
import lightgbm as lgb
import lightgbm as lgb
import flwr as fl
from flwr.common.typing import Parameters
from typing import Dict, List, Optional, Tuple, Union
from torch.utils.data import DataLoader
from flwr.common import GetParametersIns, parameters_to_ndarrays
from config import *
from cnn import CNN
from tree import tree_encoding_loader
import timeit
from logging import DEBUG, INFO
from typing import Dict, List, Optional, Tuple, Union
from flwr.common import Parameters, Scalar
from flwr.common.logger import log
from flwr.common.typing import GetParametersIns
from flwr.server.client_manager import ClientManager
from flwr.server.history import History
from flwr.server.strategy import Strategy
from flwr.server.server import fit_clients, evaluate_clients
def serverside_eval(server_round: int,
parameters: Tuple[Parameters, Union[Tuple[lgb.LGBMClassifier, int], List[Tuple[lgb.LGBMClassifier, int]]]],
config: Dict[str, Scalar], testloader: DataLoader, batch_size: int, client_tree_num: int, client_num: int
) -> Tuple[float, Dict[str, float]]:
"""An evaluation function for centralized/serverside evaluation over the entire test set."""
# device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
device = "cpu"
model = CNN()
# print_model_layers(model)
model.set_weights(parameters_to_ndarrays(parameters[0]))
model.to(device)
trees_aggregated = parameters[1]
testloader = tree_encoding_loader(testloader, batch_size, trees_aggregated, client_tree_num, client_num)
loss, result, _ = test(model, testloader, device=device, log_progress=False)
print(f"Evaluation on the server: test_loss={loss:.4f}, test_accuracy={result:.4f}")
return loss, {"accuracy": result}
class FL_Server(fl.server.Server):
"""Flower server."""
def __init__(
self, *, client_manager: ClientManager, strategy: Optional[Strategy] = None
) -> None:
self._client_manager: ClientManager = client_manager
self.parameters: Parameters = Parameters(
tensors=[], tensor_type="numpy.ndarray"
)
self.strategy: Strategy = strategy
self.max_workers: Optional[int] = None
# pylint: disable=too-many-locals
def fit(self, num_rounds: int, timeout: Optional[float]) -> History:
"""Run federated averaging for a number of rounds."""
history = History()
# Initialize parameters
log(INFO, "Initializing global parameters")
self.parameters = self._get_initial_parameters(timeout=timeout)
log(INFO, "Evaluating initial parameters")
res = self.strategy.evaluate(0, parameters=self.parameters)
if res is not None:
log(
INFO,
"initial parameters (loss, other metrics): %s, %s",
res[0],
res[1],
)
history.add_loss_centralized(server_round=0, loss=res[0])
history.add_metrics_centralized(server_round=0, metrics=res[1])
# Run federated learning for num_rounds
log(INFO, "FL starting")
start_time = timeit.default_timer()
for current_round in range(1, num_rounds + 1):
# Train model and replace previous global model
res_fit = self.fit_round(server_round=current_round, timeout=timeout)
if res_fit:
parameters_prime, _, _ = res_fit # fit_metrics_aggregated
if parameters_prime:
self.parameters = parameters_prime
# Evaluate model using strategy implementation
res_cen = self.strategy.evaluate(current_round, parameters=self.parameters)
if res_cen is not None:
loss_cen, metrics_cen = res_cen
log(
INFO,
"fit progress: (%s, %s, %s, %s)",
current_round,
loss_cen,
metrics_cen,
timeit.default_timer() - start_time,
)
history.add_loss_centralized(server_round=current_round, loss=loss_cen)
history.add_metrics_centralized(
server_round=current_round, metrics=metrics_cen
)
# Evaluate model on a sample of available clients
res_fed = self.evaluate_round(server_round=current_round, timeout=timeout)
if res_fed:
loss_fed, evaluate_metrics_fed, _ = res_fed
if loss_fed:
history.add_loss_distributed(
server_round=current_round, loss=loss_fed
)
history.add_metrics_distributed(
server_round=current_round, metrics=evaluate_metrics_fed
)
# Bookkeeping
end_time = timeit.default_timer()
elapsed = end_time - start_time
log(INFO, "FL finished in %s", elapsed)
return history
def evaluate_round(
self,
server_round: int,
timeout: Optional[float],
) -> Optional[
Tuple[Optional[float], Dict[str, Scalar], EvaluateResultsAndFailures]
]:
"""Validate current global model on a number of clients."""
# Get clients and their respective instructions from strategy
client_instructions = self.strategy.configure_evaluate(
server_round=server_round,
parameters=self.parameters,
client_manager=self._client_manager,
)
if not client_instructions:
log(INFO, "evaluate_round %s: no clients selected, cancel", server_round)
return None
log(
DEBUG,
"evaluate_round %s: strategy sampled %s clients (out of %s)",
server_round,
len(client_instructions),
self._client_manager.num_available(),
)
# Collect `evaluate` results from all clients participating in this round
results, failures = evaluate_clients(
client_instructions,
max_workers=self.max_workers,
timeout=timeout,
)
log(
DEBUG,
"evaluate_round %s received %s results and %s failures",
server_round,
len(results),
len(failures),
)
# Aggregate the evaluation results
aggregated_result: Tuple[
Optional[float],
Dict[str, Scalar],
] = self.strategy.aggregate_evaluate(server_round, results, failures)
loss_aggregated, metrics_aggregated = aggregated_result
return loss_aggregated, metrics_aggregated, (results, failures)
def fit_round(
self,
server_round: int,
timeout: Optional[float],
) -> Optional[
Tuple[Optional[Tuple[Parameters, Union[Tuple[lgb.LGBMClassifier, int], List[Tuple[lgb.LGBMClassifier, int]]]]],
Dict[str, Scalar],
FitResultsAndFailures]
]:
"""Perform a single round of federated averaging."""
# Get clients and their respective instructions from strategy
client_instructions = self.strategy.configure_fit(
server_round=server_round,
parameters=self.parameters,
client_manager=self._client_manager,
)
if not client_instructions:
log(INFO, "fit_round %s: no clients selected, cancel", server_round)
return None
log(
DEBUG,
"fit_round %s: strategy sampled %s clients (out of %s)",
server_round,
len(client_instructions),
self._client_manager.num_available(),
)
# Collect `fit` results from all clients participating in this round
results, failures = fit_clients(
client_instructions=client_instructions,
max_workers=self.max_workers,
timeout=timeout,
)
log(
DEBUG,
"fit_round %s received %s results and %s failures",
server_round,
len(results),
len(failures),
)
# Aggregate training results
NN_aggregated: Parameters
trees_aggregated: Union[Tuple[lgb.LGBMClassifier, int], List[Tuple[lgb.LGBMClassifier, int]]]
metrics_aggregated: Dict[str, Scalar]
aggregated, metrics_aggregated= self.strategy.aggregate_fit(server_round, results, failures)
NN_aggregated, trees_aggregated = aggregated[0], aggregated[1]
if type(trees_aggregated) is list:
print("Server side aggregated", len(trees_aggregated), "trees.")
else:
print("Server side did not aggregate trees.")
return [NN_aggregated, trees_aggregated], metrics_aggregated, (results, failures)
def _get_initial_parameters(self, timeout: Optional[float]) -> Tuple[Parameters, Tuple[lgb.LGBMClassifier, int]]:
"""Get initial parameters from one of the available clients."""
# Server-side parameter initialization
parameters: Optional[Parameters] = self.strategy.initialize_parameters(
client_manager=self._client_manager
)
if parameters is not None:
log(INFO, "Using initial parameters provided by strategy")
return parameters
# Get initial parameters from one of the clients
log(INFO, "Requesting initial parameters from one random client")
random_client = self._client_manager.sample(1)[0]
ins = GetParametersIns(config={})
get_parameters_res_tree = random_client.get_parameters(ins=ins, timeout=timeout)
parameters = [get_parameters_res_tree[0].parameters, get_parameters_res_tree[1]]
log(INFO, "Received initial parameters from one random client")
return parameters