You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
return _VF.meshgrid(tensors, **kwargs) # type: ignore[attr-defined] Traceback (most recent call last): File "F:\USB Backup\Clemson University\Course\Artificial Intellengent\Project\data-efficient-gans-master\data-efficient-gans-master\DiffAugment-stylegan2-pytorch\train.py", line 554, in main() # pylint: disable=no-value-for-parameter File "C:\Users\Dinner.conda\envs\pytorch\lib\site-packages\click\core.py", line 1130, in call return self.main(*args, **kwargs) File "C:\Users\Dinner.conda\envs\pytorch\lib\site-packages\click\core.py", line 1055, in main rv = self.invoke(ctx) File "C:\Users\Dinner.conda\envs\pytorch\lib\site-packages\click\core.py", line 1404, in invoke return ctx.invoke(self.callback, **ctx.params) File "C:\Users\Dinner.conda\envs\pytorch\lib\site-packages\click\core.py", line 760, in invoke return _callback(*args, **kwargs) File "C:\Users\Dinner.conda\envs\pytorch\lib\site-packages\click\decorators.py", line 26, in new_func return f(get_current_context(), *args, **kwargs) File "F:\USB Backup\Clemson University\Course\Artificial Intellengent\Project\data-efficient-gans-master\data-efficient-gans-master\DiffAugment-stylegan2-pytorch\train.py", line 547, in main subprocess_fn(rank=0, args=args, temp_dir=temp_dir) File "F:\USB Backup\Clemson University\Course\Artificial Intellengent\Project\data-efficient-gans-master\data-efficient-gans-master\DiffAugment-stylegan2-pytorch\train.py", line 398, in subprocess_fn training_loop.training_loop(rank=rank, **args) File "F:\USB Backup\Clemson University\Course\Artificial Intellengent\Project\data-efficient-gans-master\data-efficient-gans-master\DiffAugment-stylegan2-pytorch\training\training_loop.py", line 284, in training_loop loss.accumulate_gradients(phase=phase.name, real_img=real_img, real_c=real_c, gen_z=gen_z, gen_c=gen_c, sync=sync, gain=gain) File "F:\USB Backup\Clemson University\Course\Artificial Intellengent\Project\data-efficient-gans-master\data-efficient-gans-master\DiffAugment-stylegan2-pytorch\training\loss.py", line 79, in accumulate_gradients loss_Gmain.mean().mul(gain).backward() File "C:\Users\Dinner.conda\envs\pytorch\lib\site-packages\torch_tensor.py", line 363, in backward torch.autograd.backward(self, gradient, retain_graph, create_graph, inputs=inputs) File "C:\Users\Dinner.conda\envs\pytorch\lib\site-packages\torch\autograd_init.py", line 173, in backward Variable._execution_engine.run_backward( # Calls into the C++ engine to run the backward pass File "C:\Users\Dinner.conda\envs\pytorch\lib\site-packages\torch\autograd\function.py", line 253, in apply return user_fn(self, *args) File "F:\USB Backup\Clemson University\Course\Artificial Intellengent\Project\data-efficient-gans-master\data-efficient-gans-master\DiffAugment-stylegan2-pytorch\torch_utils\ops\conv2d_gradfix.py", line 133, in backward grad_weight = Conv2dGradWeight.apply(grad_output, input) File "F:\USB Backup\Clemson University\Course\Artificial Intellengent\Project\data-efficient-gans-master\data-efficient-gans-master\DiffAugment-stylegan2-pytorch\torch_utils\ops\conv2d_gradfix.py", line 145, in forward op = torch._C._jit_get_operation('aten::cudnn_convolution_backward_weight' if not transpose else 'aten::cudnn_convolution_transpose_backward_weight') RuntimeError: No such operator aten::cudnn_convolution_backward_weight
My Cuda version:
nvcc: NVIDIA (R) Cuda compiler driver Copyright (c) 2005-2021 NVIDIA Corporation Built on Sun_Mar_21_19:24:09_Pacific_Daylight_Time_2021 Cuda compilation tools, release 11.3, V11.3.58 Build cuda_11.3.r11.3/compiler.29745058_0
Pytorch version:
1.11.0
How should I fix it?
The text was updated successfully, but these errors were encountered:
I'm trying to create a new model by myself by using the following command:
And I ran into the Error:
return _VF.meshgrid(tensors, **kwargs) # type: ignore[attr-defined] Traceback (most recent call last): File "F:\USB Backup\Clemson University\Course\Artificial Intellengent\Project\data-efficient-gans-master\data-efficient-gans-master\DiffAugment-stylegan2-pytorch\train.py", line 554, in main() # pylint: disable=no-value-for-parameter File "C:\Users\Dinner.conda\envs\pytorch\lib\site-packages\click\core.py", line 1130, in call return self.main(*args, **kwargs) File "C:\Users\Dinner.conda\envs\pytorch\lib\site-packages\click\core.py", line 1055, in main rv = self.invoke(ctx) File "C:\Users\Dinner.conda\envs\pytorch\lib\site-packages\click\core.py", line 1404, in invoke return ctx.invoke(self.callback, **ctx.params) File "C:\Users\Dinner.conda\envs\pytorch\lib\site-packages\click\core.py", line 760, in invoke return _callback(*args, **kwargs) File "C:\Users\Dinner.conda\envs\pytorch\lib\site-packages\click\decorators.py", line 26, in new_func return f(get_current_context(), *args, **kwargs) File "F:\USB Backup\Clemson University\Course\Artificial Intellengent\Project\data-efficient-gans-master\data-efficient-gans-master\DiffAugment-stylegan2-pytorch\train.py", line 547, in main subprocess_fn(rank=0, args=args, temp_dir=temp_dir) File "F:\USB Backup\Clemson University\Course\Artificial Intellengent\Project\data-efficient-gans-master\data-efficient-gans-master\DiffAugment-stylegan2-pytorch\train.py", line 398, in subprocess_fn training_loop.training_loop(rank=rank, **args) File "F:\USB Backup\Clemson University\Course\Artificial Intellengent\Project\data-efficient-gans-master\data-efficient-gans-master\DiffAugment-stylegan2-pytorch\training\training_loop.py", line 284, in training_loop loss.accumulate_gradients(phase=phase.name, real_img=real_img, real_c=real_c, gen_z=gen_z, gen_c=gen_c, sync=sync, gain=gain) File "F:\USB Backup\Clemson University\Course\Artificial Intellengent\Project\data-efficient-gans-master\data-efficient-gans-master\DiffAugment-stylegan2-pytorch\training\loss.py", line 79, in accumulate_gradients loss_Gmain.mean().mul(gain).backward() File "C:\Users\Dinner.conda\envs\pytorch\lib\site-packages\torch_tensor.py", line 363, in backward torch.autograd.backward(self, gradient, retain_graph, create_graph, inputs=inputs) File "C:\Users\Dinner.conda\envs\pytorch\lib\site-packages\torch\autograd_init.py", line 173, in backward Variable._execution_engine.run_backward( # Calls into the C++ engine to run the backward pass File "C:\Users\Dinner.conda\envs\pytorch\lib\site-packages\torch\autograd\function.py", line 253, in apply return user_fn(self, *args) File "F:\USB Backup\Clemson University\Course\Artificial Intellengent\Project\data-efficient-gans-master\data-efficient-gans-master\DiffAugment-stylegan2-pytorch\torch_utils\ops\conv2d_gradfix.py", line 133, in backward grad_weight = Conv2dGradWeight.apply(grad_output, input) File "F:\USB Backup\Clemson University\Course\Artificial Intellengent\Project\data-efficient-gans-master\data-efficient-gans-master\DiffAugment-stylegan2-pytorch\torch_utils\ops\conv2d_gradfix.py", line 145, in forward op = torch._C._jit_get_operation('aten::cudnn_convolution_backward_weight' if not transpose else 'aten::cudnn_convolution_transpose_backward_weight') RuntimeError: No such operator aten::cudnn_convolution_backward_weight
My Cuda version:
nvcc: NVIDIA (R) Cuda compiler driver Copyright (c) 2005-2021 NVIDIA Corporation Built on Sun_Mar_21_19:24:09_Pacific_Daylight_Time_2021 Cuda compilation tools, release 11.3, V11.3.58 Build cuda_11.3.r11.3/compiler.29745058_0
Pytorch version:
1.11.0
How should I fix it?
The text was updated successfully, but these errors were encountered: