Skip to content

Commit a9af30b

Browse files
committed
fix(vllm): No module named 'ChatTTS.model'
1 parent e508fee commit a9af30b

10 files changed

+23
-22
lines changed

ChatTTS/model/velocity/block_manager.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@
44
from typing import Dict, List, Optional, Set, Tuple
55

66
from vllm.block import PhysicalTokenBlock
7-
from ChatTTS.model.velocity.sequence import Sequence, SequenceGroup, SequenceStatus
7+
from .sequence import Sequence, SequenceGroup, SequenceStatus
88
from vllm.utils import Device
99

1010
# Mapping: logical block number -> physical block.

ChatTTS/model/velocity/llm.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -3,10 +3,10 @@
33
from tqdm import tqdm
44
from transformers import PreTrainedTokenizer, PreTrainedTokenizerFast
55

6-
from ChatTTS.model.velocity.configs import EngineArgs
7-
from ChatTTS.model.velocity.llm_engine import LLMEngine
8-
from ChatTTS.model.velocity.output import RequestOutput
9-
from ChatTTS.model.velocity.sampling_params import SamplingParams
6+
from .configs import EngineArgs
7+
from .llm_engine import LLMEngine
8+
from .output import RequestOutput
9+
from .sampling_params import SamplingParams
1010
from vllm.utils import Counter
1111

1212

ChatTTS/model/velocity/llm_engine.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -5,14 +5,14 @@
55
from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Tuple, Union
66

77
from vllm.config import CacheConfig, ModelConfig, ParallelConfig, SchedulerConfig
8-
from ChatTTS.model.velocity.scheduler import Scheduler, SchedulerOutputs
9-
from ChatTTS.model.velocity.configs import EngineArgs
8+
from .scheduler import Scheduler, SchedulerOutputs
9+
from .configs import EngineArgs
1010
from vllm.engine.metrics import record_metrics
1111
from vllm.engine.ray_utils import RayWorkerVllm, initialize_cluster, ray
1212
from vllm.logger import init_logger
13-
from ChatTTS.model.velocity.output import RequestOutput
14-
from ChatTTS.model.velocity.sampling_params import SamplingParams
15-
from ChatTTS.model.velocity.sequence import (
13+
from .output import RequestOutput
14+
from .sampling_params import SamplingParams
15+
from .sequence import (
1616
SamplerOutput,
1717
Sequence,
1818
SequenceGroup,
@@ -127,7 +127,7 @@ def __init__(
127127
def _init_workers(self):
128128
# Lazy import the Worker to avoid importing torch.cuda/xformers
129129
# before CUDA_VISIBLE_DEVICES is set in the Worker
130-
from ChatTTS.model.velocity.worker import Worker
130+
from .worker import Worker
131131

132132
assert (
133133
self.parallel_config.world_size == 1

ChatTTS/model/velocity/model_loader.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@ def _set_default_torch_dtype(dtype: torch.dtype):
2424

2525
def _get_model_architecture(config: PretrainedConfig) -> Type[nn.Module]:
2626
model_cls = getattr(
27-
importlib.import_module("ChatTTS.model.velocity.llama"), "LlamaModel", None
27+
importlib.import_module(".llama"), "LlamaModel", None
2828
)
2929
return model_cls
3030

ChatTTS/model/velocity/model_runner.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -5,24 +5,24 @@
55
import torch
66
import torch.nn as nn
77

8-
from ChatTTS.model.velocity.configs import ModelConfig, ParallelConfig, SchedulerConfig
8+
from .configs import ModelConfig, ParallelConfig, SchedulerConfig
99
from vllm.logger import init_logger
10-
from ChatTTS.model.velocity.model_loader import get_model
10+
from .model_loader import get_model
1111
from vllm.model_executor import InputMetadata, SamplingMetadata
1212
from vllm.model_executor.parallel_utils.communication_op import (
1313
broadcast,
1414
broadcast_object_list,
1515
)
16-
from ChatTTS.model.velocity.sampling_params import SamplingParams, SamplingType
17-
from ChatTTS.model.velocity.sequence import (
16+
from .sampling_params import SamplingParams, SamplingType
17+
from .sequence import (
1818
SamplerOutput,
1919
SequenceData,
2020
SequenceGroupMetadata,
2121
SequenceGroupOutput,
2222
SequenceOutput,
2323
)
2424
from vllm.utils import in_wsl
25-
from ChatTTS.model.velocity.post_model import PostModel, Sampler
25+
from .post_model import PostModel, Sampler
2626
from safetensors.torch import safe_open
2727

2828
logger = init_logger(__name__)

ChatTTS/model/velocity/output.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
from typing import List, Optional
22
import torch
33

4-
from ChatTTS.model.velocity.sequence import (
4+
from .sequence import (
55
PromptLogprobs,
66
SampleLogprobs,
77
SequenceGroup,

ChatTTS/model/velocity/scheduler.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3,10 +3,10 @@
33
from typing import Dict, Iterable, List, Optional, Tuple, Union
44

55
from vllm.config import CacheConfig, SchedulerConfig
6-
from ChatTTS.model.velocity.block_manager import AllocStatus, BlockSpaceManager
6+
from .block_manager import AllocStatus, BlockSpaceManager
77
from vllm.core.policy import PolicyFactory
88
from vllm.logger import init_logger
9-
from ChatTTS.model.velocity.sequence import (
9+
from .sequence import (
1010
Sequence,
1111
SequenceData,
1212
SequenceGroup,

ChatTTS/model/velocity/sequence.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55
from typing import Dict, List, Optional, Union
66
import torch
77
from vllm.block import LogicalTokenBlock
8-
from ChatTTS.model.velocity.sampling_params import SamplingParams
8+
from .sampling_params import SamplingParams
99

1010
PromptLogprobs = List[Optional[Dict[int, float]]]
1111
SampleLogprobs = List[Dict[int, float]]

ChatTTS/model/velocity/worker.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@
1212
from vllm.model_executor.parallel_utils.parallel_state import initialize_model_parallel
1313
from vllm.sequence import SamplerOutput, SequenceGroupMetadata
1414
from vllm.worker.cache_engine import CacheEngine
15-
from ChatTTS.model.velocity.model_runner import ModelRunner
15+
from .model_runner import ModelRunner
1616

1717

1818
class Worker:

requirements.txt

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -15,3 +15,4 @@ nemo_text_processing; sys_platform == 'linux'
1515
av
1616
pydub
1717
safetensors
18+
vllm>=0.2.7; sys_platform == 'linux'

0 commit comments

Comments
 (0)