Skip to content

Commit

Permalink
add use_device() funcion
Browse files Browse the repository at this point in the history
can switch to a specific device
  • Loading branch information
Tps-F committed Mar 7, 2024
1 parent bf31c5a commit cf00b3f
Show file tree
Hide file tree
Showing 2 changed files with 68 additions and 51 deletions.
1 change: 1 addition & 0 deletions rvc/configs/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
from rvc.configs.config import Config
118 changes: 67 additions & 51 deletions rvc/configs/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -105,6 +105,69 @@ def has_mps() -> bool:
def has_xpu() -> bool:
return hasattr(torch, "xpu") and torch.xpu.is_available()

def params_config(self) -> tuple:
if self.gpu_mem is not None and self.gpu_mem <= 4:
x_pad = 1
x_query = 5
x_center = 30
x_max = 32
elif self.is_half:
# 6G PU_RAM conf
x_pad = 3
x_query = 10
x_center = 60
x_max = 65
else:
# 5G GPU_RAM conf
x_pad = 1
x_query = 6
x_center = 38
x_max = 41
return x_pad, x_query, x_center, x_max

def use_cuda(self) -> None:
if self.has_xpu():
self.device = self.instead = "xpu:0"
self.is_half = True
i_device = int(self.device.split(":")[-1])
self.gpu_name = torch.cuda.get_device_name(i_device)
if (
("16" in self.gpu_name and "V100" not in self.gpu_name.upper())
or "P40" in self.gpu_name.upper()
or "P10" in self.gpu_name.upper()
or "1060" in self.gpu_name
or "1070" in self.gpu_name
or "1080" in self.gpu_name
):
logger.info(f"Found GPU {self.gpu_name}, force to fp32")
self.is_half = False
self.use_fp32_config()
else:
logger.info(f"Found GPU {self.gpu_name}")
self.gpu_mem = int(
torch.cuda.get_device_properties(i_device).total_memory / 1024 / 1024 / 1024
+ 0.4
)

def use_mps(self) -> None:
self.device = self.instead = "mps"
self.is_half = False
self.use_fp32_config()
self.params_config()

def use_dml(self) -> None:
import torch_directml

self.device = torch_directml.device(torch_directml.default_device())
self.is_half = False
self.params_config()

def use_cpu(self) -> None:
self.device = self.instead = "cpu"
self.is_half = False
self.use_fp32_config()
self.params_config()

def use_fp32_config(self) -> None:
for config_file, data in self.json_config.items():
try:
Expand All @@ -117,65 +180,18 @@ def use_fp32_config(self) -> None:

def device_config(self) -> tuple:
if torch.cuda.is_available():
if self.has_xpu():
self.device = self.instead = "xpu:0"
self.is_half = True
i_device = int(self.device.split(":")[-1])
self.gpu_name = torch.cuda.get_device_name(i_device)
if (
("16" in self.gpu_name and "V100" not in self.gpu_name.upper())
or "P40" in self.gpu_name.upper()
or "P10" in self.gpu_name.upper()
or "1060" in self.gpu_name
or "1070" in self.gpu_name
or "1080" in self.gpu_name
):
logger.info(f"Found GPU {self.gpu_name}, force to fp32")
self.is_half = False
self.use_fp32_config()
else:
logger.info(f"Found GPU {self.gpu_name}")
self.gpu_mem = int(
torch.cuda.get_device_properties(i_device).total_memory
/ 1024
/ 1024
/ 1024
+ 0.4
)
self.use_cuda()
elif self.has_mps():
logger.info("No supported Nvidia GPU found")
self.device = self.instead = "mps"
self.is_half = False
self.use_fp32_config()
self.use_mps()
elif self.dml:
import torch_directml

self.device = torch_directml.device(torch_directml.default_device())
self.is_half = False
self.use_dml()
else:
logger.info("No supported Nvidia GPU found")
self.device = self.instead = "cpu"
self.is_half = False
self.use_fp32_config()

if self.gpu_mem is not None and self.gpu_mem <= 4:
x_pad = 1
x_query = 5
x_center = 30
x_max = 32
elif self.is_half:
# 6G PU_RAM conf
x_pad = 3
x_query = 10
x_center = 60
x_max = 65
else:
# 5G GPU_RAM conf
x_pad = 1
x_query = 6
x_center = 38
x_max = 41

logger.info(f"Use {self.dml or self.instead} instead")
logger.info(f"is_half:{self.is_half}, device:{self.device}")
return x_pad, x_query, x_center, x_max
return self.params_config()

0 comments on commit cf00b3f

Please sign in to comment.