Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[Misc] Remove deprecated names #10817

Merged
merged 3 commits into from
Dec 3, 2024
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 6 additions & 2 deletions vllm/engine/async_llm_engine.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,8 @@
List, Mapping, Optional, Set, Tuple, Type, Union, overload)
from weakref import ReferenceType

from typing_extensions import deprecated

import vllm.envs as envs
from vllm.config import (DecodingConfig, LoRAConfig, ModelConfig,
ParallelConfig, SchedulerConfig, VllmConfig)
Expand Down Expand Up @@ -422,7 +424,8 @@ async def get_tokenizer_async(self,
return await (
self.get_tokenizer_group().get_lora_tokenizer_async(lora_request))

@overload # DEPRECATED
@overload
@deprecated("'inputs' will be renamed to 'prompt")
async def add_request_async(
self,
request_id: str,
Expand Down Expand Up @@ -894,7 +897,8 @@ async def run_engine_loop(engine_ref: ReferenceType):

# This method does not need to be async, but kept that way
# for backwards compatibility.
@overload # DEPRECATED
@overload
@deprecated("'inputs' will be renamed to 'prompt")
def add_request(
self,
request_id: str,
Expand Down
5 changes: 3 additions & 2 deletions vllm/engine/llm_engine.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
from typing import Set, Type, Union, cast, overload

import torch
from typing_extensions import TypeVar
from typing_extensions import TypeVar, deprecated

import vllm.envs as envs
from vllm.config import (DecodingConfig, LoRAConfig, ModelConfig,
Expand Down Expand Up @@ -719,7 +719,8 @@ def _add_processed_request(
def stop_remote_worker_execution_loop(self) -> None:
self.model_executor.stop_remote_worker_execution_loop()

@overload # DEPRECATED
@overload
@deprecated("'inputs' will be renamed to 'prompt")
def add_request(
self,
request_id: str,
Expand Down
5 changes: 4 additions & 1 deletion vllm/engine/multiprocessing/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,8 @@
from enum import Enum
from typing import List, Mapping, Optional, Union, overload

from typing_extensions import deprecated

from vllm import PoolingParams
from vllm.inputs import PromptType
from vllm.lora.request import LoRARequest
Expand Down Expand Up @@ -32,7 +34,8 @@ class RPCProcessRequest:
prompt_adapter_request: Optional[PromptAdapterRequest] = None
priority: int = 0

@overload # DEPRECATED
@overload
@deprecated("'inputs' will be renamed to 'prompt")
def __init__(
self,
*,
Expand Down
7 changes: 5 additions & 2 deletions vllm/engine/multiprocessing/client.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
import psutil
import zmq
import zmq.asyncio
from typing_extensions import deprecated
from zmq import Frame # type: ignore[attr-defined]
from zmq.asyncio import Socket

Expand Down Expand Up @@ -414,7 +415,8 @@ def errored(self) -> bool:
def dead_error(self) -> BaseException:
return ENGINE_DEAD_ERROR(self._errored_with)

@overload # DEPRECATED
@overload
@deprecated("'inputs' will be renamed to 'prompt")
def generate(
self,
*,
Expand Down Expand Up @@ -485,7 +487,8 @@ def generate(
lora_request, trace_headers,
prompt_adapter_request, priority)

@overload # DEPRECATED
@overload
@deprecated("'inputs' will be renamed to 'prompt")
def encode(
self,
*,
Expand Down
31 changes: 0 additions & 31 deletions vllm/inputs/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,34 +38,3 @@
"InputProcessingContext",
"InputRegistry",
]


def __getattr__(name: str):
import warnings

if name == "PromptInput":
msg = ("PromptInput has been renamed to PromptType. "
"The original name will be removed in an upcoming version.")

warnings.warn(DeprecationWarning(msg), stacklevel=2)

return PromptType

if name == "LLMInputs":
msg = ("LLMInputs has been renamed to DecoderOnlyInputs. "
"The original name will be removed in an upcoming version.")

warnings.warn(DeprecationWarning(msg), stacklevel=2)

return DecoderOnlyInputs

if name == "EncoderDecoderLLMInputs":
msg = (
"EncoderDecoderLLMInputs has been renamed to EncoderDecoderInputs. "
"The original name will be removed in an upcoming version.")

warnings.warn(DeprecationWarning(msg), stacklevel=2)

return EncoderDecoderInputs

raise AttributeError(f"module {__name__!r} has no attribute {name!r}")
31 changes: 0 additions & 31 deletions vllm/inputs/data.py
Original file line number Diff line number Diff line change
Expand Up @@ -358,34 +358,3 @@ def to_enc_dec_tuple_list(
return [(enc_dec_prompt["encoder_prompt"],
enc_dec_prompt["decoder_prompt"])
for enc_dec_prompt in enc_dec_prompts]


def __getattr__(name: str):
import warnings

if name == "PromptInput":
msg = ("PromptInput has been renamed to PromptType. "
"The original name will be removed in an upcoming version.")

warnings.warn(DeprecationWarning(msg), stacklevel=2)

return PromptType

if name == "LLMInputs":
msg = ("LLMInputs has been renamed to DecoderOnlyInputs. "
"The original name will be removed in an upcoming version.")

warnings.warn(DeprecationWarning(msg), stacklevel=2)

return DecoderOnlyInputs

if name == "EncoderDecoderLLMInputs":
msg = (
"EncoderDecoderLLMInputs has been renamed to EncoderDecoderInputs. "
"The original name will be removed in an upcoming version.")

warnings.warn(DeprecationWarning(msg), stacklevel=2)

return EncoderDecoderInputs

raise AttributeError(f"module {__name__!r} has no attribute {name!r}")
15 changes: 0 additions & 15 deletions vllm/multimodal/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,18 +27,3 @@
"MULTIMODAL_REGISTRY",
"MultiModalRegistry",
]


def __getattr__(name: str):
import warnings

if name == "MultiModalInputs":
msg = ("MultiModalInputs has been renamed to MultiModalKwargs. "
"The original name will take another meaning in an upcoming "
"version.")

warnings.warn(DeprecationWarning(msg), stacklevel=2)

return MultiModalKwargs

raise AttributeError(f"module {__name__!r} has no attribute {name!r}")
15 changes: 0 additions & 15 deletions vllm/multimodal/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -433,18 +433,3 @@ def index_map(self) -> "IndexMap":

return MultiModalPlaceholderMap.IndexMap(src=src_indices,
dest=dest_indices)


def __getattr__(name: str):
import warnings

if name == "MultiModalInputs":
msg = ("MultiModalInputs has been renamed to MultiModalKwargs. "
"The original name will take another meaning in an upcoming "
"version.")

warnings.warn(DeprecationWarning(msg), stacklevel=2)

return MultiModalKwargs

raise AttributeError(f"module {__name__!r} has no attribute {name!r}")
Loading