Skip to content

Commit fe76611

Browse files
committed
denoising testing code
1 parent 51bfb6c commit fe76611

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

76 files changed

+122617
-0
lines changed

.gitignore

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,2 +1,3 @@
11
.*/
22
.DS*
3+
*.pyc

Denoising/Datasets/README.md

Lines changed: 30 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,30 @@
1+
For training and testing, your directory structure should look like this
2+
3+
4+
`Datasets` <br/>
5+
`├──train` <br/>
6+
     `├──DFWB` <br/>
7+
     `└──SIDD` <br/>
8+
          `├──input_crops` <br/>
9+
          `└──target_crops` <br/>
10+
`├──val` <br/>
11+
     `└──SIDD` <br/>
12+
          `├──input_crops` <br/>
13+
          `└──target_crops` <br/>
14+
`└──test` <br/>
15+
     `├──BSD68` <br/>
16+
     `├──CBSD68` <br/>
17+
     `├──Kodak` <br/>
18+
     `├──McMaster` <br/>
19+
     `├──Set12` <br/>
20+
     `├──Urban100` <br/>
21+
     `├──SIDD` <br/>
22+
          `├──ValidationNoisyBlocksSrgb.mat` <br/>
23+
          `└──ValidationGtBlocksSrgb.mat` <br/>
24+
     `├──DND` <br/>
25+
          `├──info.mat` <br/>
26+
          `└──images_srgb` <br/>
27+
               `├──0001.mat` <br/>
28+
               `├──0002.mat` <br/>
29+
               `├── ... ` <br/>
30+
               `└──0050.mat`

Denoising/Options/gau_color_15.yml

Lines changed: 125 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,125 @@
1+
# general settings
2+
name: gaum-kb333
3+
model_type: ImageCleanModel
4+
scale: 1
5+
num_gpu: 8 # set num_gpu: 0 for cpu mode
6+
manual_seed: 100
7+
8+
# dataset and data loader settings
9+
datasets:
10+
train:
11+
name: TrainSet
12+
type: Dataset_GaussianDenoising
13+
sigma_type: random
14+
sigma_range: 15
15+
in_ch: 3 ## RGB image
16+
dataroot_gt: s3://zhangyi/images/val
17+
dataroot_lq: none
18+
geometric_augs: true
19+
20+
filename_tmpl: '{}'
21+
io_backend:
22+
type: petrel
23+
24+
# data loader
25+
use_shuffle: true
26+
num_worker_per_gpu: 8
27+
batch_size_per_gpu: 8
28+
29+
### -------------Progressive training--------------------------
30+
mini_batch_sizes: [8,5,3,2,1,1] # Batch size per gpu
31+
iters: [92000,64000,48000,36000,36000,24000]
32+
gt_size: 384 # Max patch size for progressive training
33+
gt_sizes: [128,160,192,256,320,384] # Patch sizes for progressive training.
34+
### ------------------------------------------------------------
35+
36+
### ------- Training on single fixed-patch size 128x128---------
37+
# mini_batch_sizes: [8]
38+
# iters: [300000]
39+
# gt_size: 128
40+
# gt_sizes: [128]
41+
### ------------------------------------------------------------
42+
43+
dataset_enlarge_ratio: 1
44+
prefetch_mode: ~
45+
46+
val:
47+
name: ValSet
48+
type: Dataset_GaussianDenoising
49+
sigma_test: 25
50+
in_ch: 3 ## RGB image
51+
dataroot_gt: s3://zhangyi/restormer-train/Denoising/Datasets/test/CBSD68
52+
dataroot_lq: none
53+
io_backend:
54+
type: petrel
55+
56+
# network structures
57+
network_g:
58+
type: KBNet_s
59+
60+
# path
61+
path:
62+
pretrain_network_g: pretrained_models/gau_color_15.pth
63+
strict_load_g: true
64+
resume_state: ~
65+
66+
# training settings
67+
train:
68+
total_iter: 300000
69+
warmup_iter: -1 # no warm up
70+
use_grad_clip: true
71+
72+
# Split 300k iterations into two cycles.
73+
# 1st cycle: fixed 3e-4 LR for 92k iters.
74+
# 2nd cycle: cosine annealing (3e-4 to 1e-6) for 208k iters.
75+
scheduler:
76+
type: CosineAnnealingRestartCyclicLR
77+
periods: [92000, 208000]
78+
restart_weights: [1,1]
79+
eta_mins: [0.0003,0.000001]
80+
81+
mixing_augs:
82+
mixup: true
83+
mixup_beta: 1.2
84+
use_identity: true
85+
86+
optim_g:
87+
type: AdamW
88+
lr: !!float 3e-4
89+
weight_decay: !!float 1e-4
90+
betas: [0.9, 0.999]
91+
92+
# losses
93+
pixel_opt:
94+
type: L1Loss
95+
loss_weight: 1
96+
reduction: mean
97+
98+
# validation settings
99+
val:
100+
window_size: 8
101+
val_freq: !!float 4e3
102+
save_img: false
103+
rgb2bgr: true
104+
use_image: false
105+
max_minibatch: 8
106+
107+
metrics:
108+
psnr: # metric name, can be arbitrary
109+
type: calculate_psnr
110+
crop_border: 0
111+
test_y_channel: false
112+
113+
# logging settings
114+
logger:
115+
print_freq: 1000
116+
save_checkpoint_freq: !!float 1e3
117+
use_tb_logger: true
118+
wandb:
119+
project: ~
120+
resume_id: ~
121+
122+
# dist training settings
123+
dist_params:
124+
backend: nccl
125+
port: 29500

Denoising/Options/gau_color_25.yml

Lines changed: 125 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,125 @@
1+
# general settings
2+
name: gaum-kb333
3+
model_type: ImageCleanModel
4+
scale: 1
5+
num_gpu: 8 # set num_gpu: 0 for cpu mode
6+
manual_seed: 100
7+
8+
# dataset and data loader settings
9+
datasets:
10+
train:
11+
name: TrainSet
12+
type: Dataset_GaussianDenoising
13+
sigma_type: random
14+
sigma_range: 25
15+
in_ch: 3 ## RGB image
16+
dataroot_gt: s3://zhangyi/images/val
17+
dataroot_lq: none
18+
geometric_augs: true
19+
20+
filename_tmpl: '{}'
21+
io_backend:
22+
type: petrel
23+
24+
# data loader
25+
use_shuffle: true
26+
num_worker_per_gpu: 8
27+
batch_size_per_gpu: 8
28+
29+
### -------------Progressive training--------------------------
30+
mini_batch_sizes: [8,5,3,2,1,1] # Batch size per gpu
31+
iters: [92000,64000,48000,36000,36000,24000]
32+
gt_size: 384 # Max patch size for progressive training
33+
gt_sizes: [128,160,192,256,320,384] # Patch sizes for progressive training.
34+
### ------------------------------------------------------------
35+
36+
### ------- Training on single fixed-patch size 128x128---------
37+
# mini_batch_sizes: [8]
38+
# iters: [300000]
39+
# gt_size: 128
40+
# gt_sizes: [128]
41+
### ------------------------------------------------------------
42+
43+
dataset_enlarge_ratio: 1
44+
prefetch_mode: ~
45+
46+
val:
47+
name: ValSet
48+
type: Dataset_GaussianDenoising
49+
sigma_test: 25
50+
in_ch: 3 ## RGB image
51+
dataroot_gt: s3://zhangyi/restormer-train/Denoising/Datasets/test/CBSD68
52+
dataroot_lq: none
53+
io_backend:
54+
type: petrel
55+
56+
# network structures
57+
network_g:
58+
type: KBNet_s
59+
60+
# path
61+
path:
62+
pretrain_network_g: pretrained_models/gau_color_25.pth
63+
strict_load_g: true
64+
resume_state: ~
65+
66+
# training settings
67+
train:
68+
total_iter: 300000
69+
warmup_iter: -1 # no warm up
70+
use_grad_clip: true
71+
72+
# Split 300k iterations into two cycles.
73+
# 1st cycle: fixed 3e-4 LR for 92k iters.
74+
# 2nd cycle: cosine annealing (3e-4 to 1e-6) for 208k iters.
75+
scheduler:
76+
type: CosineAnnealingRestartCyclicLR
77+
periods: [92000, 208000]
78+
restart_weights: [1,1]
79+
eta_mins: [0.0003,0.000001]
80+
81+
mixing_augs:
82+
mixup: true
83+
mixup_beta: 1.2
84+
use_identity: true
85+
86+
optim_g:
87+
type: AdamW
88+
lr: !!float 3e-4
89+
weight_decay: !!float 1e-4
90+
betas: [0.9, 0.999]
91+
92+
# losses
93+
pixel_opt:
94+
type: L1Loss
95+
loss_weight: 1
96+
reduction: mean
97+
98+
# validation settings
99+
val:
100+
window_size: 8
101+
val_freq: !!float 4e3
102+
save_img: false
103+
rgb2bgr: true
104+
use_image: false
105+
max_minibatch: 8
106+
107+
metrics:
108+
psnr: # metric name, can be arbitrary
109+
type: calculate_psnr
110+
crop_border: 0
111+
test_y_channel: false
112+
113+
# logging settings
114+
logger:
115+
print_freq: 1000
116+
save_checkpoint_freq: !!float 1e3
117+
use_tb_logger: true
118+
wandb:
119+
project: ~
120+
resume_id: ~
121+
122+
# dist training settings
123+
dist_params:
124+
backend: nccl
125+
port: 29500

0 commit comments

Comments
 (0)