This repository has been archived by the owner on Jul 18, 2024. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 17
/
Copy pathoptimize.py
59 lines (50 loc) · 1.64 KB
/
optimize.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from absl import app
from absl import flags
import os.path as osp
import sys
sys.path.insert(0,'third_party')
import pdb
import time
import numpy as np
import cv2
import kornia
import torch
from nnutils.train_utils import LASRTrainer
import torch.backends.cudnn as cudnn
cudnn.benchmark = True
flags.DEFINE_integer('local_rank', 0, 'for distributed training')
flags.DEFINE_integer('ngpu', 1, 'number of gpus to use')
flags.DEFINE_boolean('use_gtpose', True, 'if true uses gt pose for projection, but camera still gets trained.')
flags.DEFINE_string('sil_path', 'none', 'additional silouette path')
opts = flags.FLAGS
def main(_):
torch.cuda.set_device(opts.local_rank)
world_size = opts.ngpu
torch.distributed.init_process_group(
'nccl',
init_method='env://',
world_size=world_size,
rank=opts.local_rank,
)
print('%d/%d'%(world_size,opts.local_rank))
torch.manual_seed(0)
torch.cuda.manual_seed(1)
torch.manual_seed(0)
trainer = LASRTrainer(opts)
trainer.init_training()
trainer.train()
if __name__ == '__main__':
app.run(main)