Skip to content

Commit ef7e925

Browse files
authored
Merge branch 'XuehaiPan:main' into main
2 parents 984ca6d + 201caef commit ef7e925

File tree

7 files changed

+21
-25
lines changed

7 files changed

+21
-25
lines changed

.pre-commit-config.yaml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,7 @@ repos:
2525
- id: debug-statements
2626
- id: double-quote-string-fixer
2727
- repo: https://github.com/astral-sh/ruff-pre-commit
28-
rev: v0.3.0
28+
rev: v0.3.2
2929
hooks:
3030
- id: ruff
3131
args: [--fix, --exit-non-zero-on-fix]

.readthedocs.yaml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@ version: 2
77

88
# Set the version of Python and other tools you might need
99
build:
10-
os: ubuntu-22.04
10+
os: ubuntu-lts-latest
1111
tools:
1212
python: "3.8"
1313
jobs:

nvitop/api/device.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -746,7 +746,7 @@ def __init__(
746746

747747
def __repr__(self) -> str:
748748
"""Return a string representation of the device."""
749-
return '{}(index={}, name="{}", total_memory={})'.format(
749+
return '{}(index={}, name={!r}, total_memory={})'.format( # noqa: UP032
750750
self.__class__.__name__,
751751
self.index,
752752
self.name(),
@@ -2955,7 +2955,7 @@ def __init__(
29552955

29562956
def __repr__(self) -> str:
29572957
"""Return a string representation of the CUDA device."""
2958-
return '{}(cuda_index={}, nvml_index={}, name="{}", total_memory={})'.format(
2958+
return '{}(cuda_index={}, nvml_index={}, name="{}", total_memory={})'.format( # noqa: UP032
29592959
self.__class__.__name__,
29602960
self.cuda_index,
29612961
self.index,

nvitop/api/libcuda.py

Lines changed: 6 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -259,10 +259,9 @@ def __repr__(self) -> str:
259259
)
260260
if self.value not in CUDAError._errcode_to_name:
261261
CUDAError._errcode_to_name[self.value] = cuGetErrorName(self.value)
262-
return '{} Code: {} ({}).'.format(
263-
CUDAError._errcode_to_string[self.value],
264-
CUDAError._errcode_to_name[self.value],
265-
self.value,
262+
return (
263+
f'{CUDAError._errcode_to_string[self.value]} '
264+
f'Code: {CUDAError._errcode_to_name[self.value]} ({self.value}).'
266265
)
267266
except CUDAError:
268267
return f'CUDA Error with code {self.value}.'
@@ -316,10 +315,9 @@ def new(cls: type[CUDAError]) -> CUDAError:
316315
new_error_class = type(class_name, (CUDAError,), {'__new__': gen_new(err_val)})
317316
new_error_class.__module__ = __name__
318317
if err_val in CUDAError._errcode_to_string:
319-
new_error_class.__doc__ = 'CUDA Error: {} Code: :data:`{}` ({}).'.format(
320-
CUDAError._errcode_to_string[err_val],
321-
err_name,
322-
err_val,
318+
new_error_class.__doc__ = (
319+
f'CUDA Error: {CUDAError._errcode_to_string[err_val]} '
320+
f'Code: :data:`{err_name}` ({err_val}).'
323321
)
324322
else:
325323
new_error_class.__doc__ = f'CUDA Error with code :data:`{err_name}` ({err_val})'

nvitop/api/libcudart.py

Lines changed: 6 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -307,10 +307,9 @@ def __repr__(self) -> str:
307307
)
308308
if self.value not in cudaError._errcode_to_name:
309309
cudaError._errcode_to_name[self.value] = cudaGetErrorName(self.value)
310-
return '{} Code: {} ({}).'.format(
311-
cudaError._errcode_to_string[self.value],
312-
cudaError._errcode_to_name[self.value],
313-
self.value,
310+
return (
311+
f'{cudaError._errcode_to_string[self.value]} '
312+
f'Code: {cudaError._errcode_to_name[self.value]} ({self.value}).'
314313
)
315314
except cudaError:
316315
return f'CUDA Error with code {self.value}.'
@@ -367,10 +366,9 @@ def new(cls: type[cudaError]) -> cudaError:
367366
new_error_class = type(class_name, (cudaError,), {'__new__': gen_new(err_val)})
368367
new_error_class.__module__ = __name__
369368
if err_val in cudaError._errcode_to_string:
370-
new_error_class.__doc__ = 'cudaError: {} Code: :data:`{}` ({}).'.format(
371-
cudaError._errcode_to_string[err_val],
372-
err_name,
373-
err_val,
369+
new_error_class.__doc__ = (
370+
f'cudaError: {cudaError._errcode_to_string[err_val]} '
371+
f'Code: :data:`{err_name}` ({err_val}).'
374372
)
375373
else:
376374
new_error_class.__doc__ = f'CUDA Error with code :data:`{err_name}` ({err_val})'

nvitop/api/process.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -535,7 +535,7 @@ def __init__(
535535

536536
def __repr__(self) -> str:
537537
"""Return a string representation of the GPU process."""
538-
return '{}(pid={}, gpu_memory={}, type={}, device={}, host={})'.format(
538+
return '{}(pid={}, gpu_memory={}, type={}, device={}, host={})'.format( # noqa: UP032
539539
self.__class__.__name__,
540540
self.pid,
541541
self.gpu_memory_human(),

nvitop/gui/screens/metrics.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -148,31 +148,31 @@ def format_max_cpu_percent(value):
148148
def format_host_memory(value):
149149
if value is NA:
150150
return f'HOST-MEM: {value}'
151-
return 'HOST-MEM: {} ({:.1f}%)'.format(
151+
return 'HOST-MEM: {} ({:.1f}%)'.format( # noqa: UP032
152152
bytes2human(value),
153153
round(100.0 * value / total_host_memory, 1),
154154
)
155155

156156
def format_max_host_memory(value):
157157
if value is NA:
158158
return f'MAX HOST-MEM: {value}'
159-
return 'MAX HOST-MEM: {} ({:.1f}%) / {}'.format(
159+
return 'MAX HOST-MEM: {} ({:.1f}%) / {}'.format( # noqa: UP032
160160
bytes2human(value),
161161
round(100.0 * value / total_host_memory, 1),
162162
total_host_memory_human,
163163
)
164164

165165
def format_gpu_memory(value):
166166
if value is not NA and total_gpu_memory is not NA:
167-
return 'GPU-MEM: {} ({:.1f}%)'.format(
167+
return 'GPU-MEM: {} ({:.1f}%)'.format( # noqa: UP032
168168
bytes2human(value),
169169
round(100.0 * value / total_gpu_memory, 1),
170170
)
171171
return f'GPU-MEM: {value}'
172172

173173
def format_max_gpu_memory(value):
174174
if value is not NA and total_gpu_memory is not NA:
175-
return 'MAX GPU-MEM: {} ({:.1f}%) / {}'.format(
175+
return 'MAX GPU-MEM: {} ({:.1f}%) / {}'.format( # noqa: UP032
176176
bytes2human(value),
177177
round(100.0 * value / total_gpu_memory, 1),
178178
total_gpu_memory_human,

0 commit comments

Comments
 (0)