Skip to content

Commit 650b470

Browse files
committed
Merge branch 'fedsp' of https://github.com/alibaba/FederatedScope into fedprompt
2 parents f3b7270 + 06e68f5 commit 650b470

File tree

15 files changed

+64
-17
lines changed

15 files changed

+64
-17
lines changed

README.md

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -51,14 +51,14 @@ FederatedScope
5151
│   ├── nlp # Federated learning in NLP
5252
│   ├── gfl # Graph federated learning
5353
│   ├── autotune # Auto-tunning for federated learning
54-
│   ├── vertical_fl # Vartical federated learning
54+
│   ├── vertical_fl # Vertical federated learning
5555
│   ├── contrib
5656
│   ├── main.py
5757
│   ├── ... ...
5858
├── scripts # Scripts for reproducing existing algorithms
5959
├── benchmark # We release several benchmarks for convenient and fair comparisons
6060
├── doc # For automatic documentation
61-
├── enviornment # Installation requirements and provided docker files
61+
├── environment # Installation requirements and provided docker files
6262
├── materials # Materials of related topics (e.g., paper lists)
6363
│   ├── notebook
6464
│   ├── paper_list

federatedscope/attack/trainer/gaussian_attack_trainer.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -31,6 +31,9 @@ def hook_on_batch_backward_generate_gaussian_noise_gradient(ctx):
3131
ctx.optimizer.zero_grad()
3232
ctx.loss_task.backward()
3333

34+
if ctx.grad_clip > 0:
35+
torch.nn.utils.clip_grad_norm_(ctx.model.parameters(), ctx.grad_clip)
36+
3437
grad_values = list()
3538
for name, param in ctx.model.named_parameters():
3639
if 'bn' not in name:

federatedscope/core/aggregators/bulyan_aggregator.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -77,7 +77,7 @@ def _aggre_with_bulyan(self, models):
7777
Apply MultiKrum to select \theta (\theta <= client_num-
7878
2*self.byzantine_node_num) local models
7979
'''
80-
init_model = self.model.state_dict()
80+
_, init_model = models[0]
8181
global_update = copy.deepcopy(init_model)
8282
models_para = [each_model[1] for each_model in models]
8383
krum_scores = self._calculate_score(models_para)

federatedscope/core/aggregators/clients_avg_aggregator.py

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -71,6 +71,9 @@ def _para_weighted_avg(self, models, recover_fun=None):
7171
for i in range(len(models)):
7272
local_sample_size, local_model = models[i]
7373

74+
if key not in local_model:
75+
continue
76+
7477
if self.cfg.federate.ignore_weight:
7578
weight = 1.0 / len(models)
7679
elif self.cfg.federate.use_ss:
@@ -126,6 +129,8 @@ def inc(self, content):
126129
if isinstance(content, tuple):
127130
sample_size, model_params = content
128131
for key in self.maintained:
132+
if key not in model_params:
133+
continue
129134
# if model_params[key].device != self.maintained[key].device:
130135
# model_params[key].to(self.maintained[key].device)
131136
self.maintained[key] = (self.cnt * self.maintained[key] +

federatedscope/core/aggregators/median_aggregator.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -41,7 +41,7 @@ def aggregate(self, agg_info):
4141
return updated_model
4242

4343
def _aggre_with_median(self, models):
44-
init_model = self.model.state_dict()
44+
_, init_model = models[0]
4545
global_update = copy.deepcopy(init_model)
4646
for key in init_model:
4747
temp = torch.stack([each_model[1][key] for each_model in models],

federatedscope/core/aggregators/normbounding_aggregator.py

Lines changed: 11 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -35,28 +35,34 @@ def aggregate(self, agg_info):
3535
def _aggre_with_normbounding(self, models):
3636
models_temp = []
3737
for each_model in models:
38-
param = self._flatten_updates(each_model[1])
38+
param, ignore_keys = self._flatten_updates(each_model[1])
3939
if torch.norm(param, p=2) > self.norm_bound:
4040
scaling_rate = self.norm_bound / torch.norm(param, p=2)
4141
scaled_param = scaling_rate * param
4242
models_temp.append(
43-
(each_model[0], self._reconstruct_updates(scaled_param)))
43+
(each_model[0],
44+
self._reconstruct_updates(scaled_param, ignore_keys)))
4445
else:
4546
models_temp.append(each_model)
4647
return self._para_weighted_avg(models_temp)
4748

4849
def _flatten_updates(self, model):
49-
model_update = []
50+
model_update, ignore_keys = [], []
5051
init_model = self.model.state_dict()
5152
for key in init_model:
53+
if key not in model:
54+
ignore_keys.append(key)
55+
continue
5256
model_update.append(model[key].view(-1))
53-
return torch.cat(model_update, dim=0)
57+
return torch.cat(model_update, dim=0), ignore_keys
5458

55-
def _reconstruct_updates(self, flatten_updates):
59+
def _reconstruct_updates(self, flatten_updates, ignore_keys):
5660
start_idx = 0
5761
init_model = self.model.state_dict()
5862
reconstructed_model = copy.deepcopy(init_model)
5963
for key in init_model:
64+
if key in ignore_keys:
65+
continue
6066
reconstructed_model[key] = flatten_updates[
6167
start_idx:start_idx + len(init_model[key].view(-1))].reshape(
6268
init_model[key].shape)

federatedscope/core/aggregators/trimmedmean_aggregator.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -42,7 +42,7 @@ def aggregate(self, agg_info):
4242
return updated_model
4343

4444
def _aggre_with_trimmedmean(self, models):
45-
init_model = self.model.state_dict()
45+
_, init_model = models[0]
4646
global_update = copy.deepcopy(init_model)
4747
excluded_num = int(len(models) * self.excluded_ratio)
4848
for key in init_model:

federatedscope/core/auxiliaries/model_builder.py

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -207,4 +207,8 @@ def get_model(model_config, local_data=None, backend='torch', role='client'):
207207

208208

209209
def get_trainable_para_names(model):
210-
return set(dict(list(model.named_parameters())).keys())
210+
grad_params = set()
211+
for name, param in model.named_parameters():
212+
if param.requires_grad:
213+
grad_params.add(name)
214+
return grad_params

federatedscope/core/trainers/context.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -154,6 +154,7 @@ def __init__(self, model, cfg, data=None, device=None):
154154

155155
# Setup optimize-related context variable
156156
if self.cfg.backend == 'torch':
157+
# TODO: should we make `self.trainable_para_names` @property?
157158
self.trainable_para_names = get_trainable_para_names(self.model)
158159
# TODO: make `criterion` and `regularizer` @property and cached
159160
# to compare whether changes happen

federatedscope/core/trainers/trainer.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -392,11 +392,11 @@ def _param_filter(self, state_dict, filter_keywords=None):
392392

393393
trainable_filter = lambda p: True if \
394394
self.cfg.personalization.share_non_trainable_para else \
395-
lambda p: p in self.ctx.trainable_para_names
395+
p in self.ctx.trainable_para_names
396396
keyword_filter = filter_by_specified_keywords
397397
return dict(
398398
filter(
399-
lambda elem: trainable_filter(elem[1]) and keyword_filter(
399+
lambda elem: trainable_filter(elem[0]) and keyword_filter(
400400
elem[0], filter_keywords), state_dict.items()))
401401

402402
def save_model(self, path, cur_round=-1):

0 commit comments

Comments
 (0)