From 1c2e05a07e0261b30535613d307c721d20d326d9 Mon Sep 17 00:00:00 2001 From: yangzichao01 Date: Tue, 29 Jul 2025 15:06:27 +0800 Subject: [PATCH 01/17] add v1/models interface related --- fastdeploy/engine/args_utils.py | 10 ++ fastdeploy/entrypoints/openai/api_server.py | 28 ++++++ fastdeploy/entrypoints/openai/protocol.py | 32 +++++++ .../entrypoints/openai/serving_models.py | 95 +++++++++++++++++++ 4 files changed, 165 insertions(+) create mode 100644 fastdeploy/entrypoints/openai/serving_models.py diff --git a/fastdeploy/engine/args_utils.py b/fastdeploy/engine/args_utils.py index ee74a8a6b3..21cbe8484d 100644 --- a/fastdeploy/engine/args_utils.py +++ b/fastdeploy/engine/args_utils.py @@ -44,6 +44,10 @@ class EngineArgs: """ The name or path of the model to be used. """ + served_model_name: Optional[str] = None + """ + The name of the model being served. + """ revision: Optional[str] = "master" """ The revision for downloading models. @@ -332,6 +336,12 @@ def add_cli_args(parser: FlexibleArgumentParser) -> FlexibleArgumentParser: default=EngineArgs.model, help="Model name or path to be used.", ) + model_group.add_argument( + "--served-model-name", + type=nullable_str, + default=EngineArgs.served_model_name, + help="Served model name", + ) model_group.add_argument( "--revision", type=nullable_str, diff --git a/fastdeploy/entrypoints/openai/api_server.py b/fastdeploy/entrypoints/openai/api_server.py index 61fa5d3965..dd51f11a2e 100644 --- a/fastdeploy/entrypoints/openai/api_server.py +++ b/fastdeploy/entrypoints/openai/api_server.py @@ -36,9 +36,11 @@ CompletionResponse, ControlSchedulerRequest, ErrorResponse, + ModelList, ) from fastdeploy.entrypoints.openai.serving_chat import OpenAIServingChat from fastdeploy.entrypoints.openai.serving_completion import OpenAIServingCompletion +from fastdeploy.entrypoints.openai.serving_models import ModelPath, OpenAIServingModels from fastdeploy.metrics.metrics import ( EXCLUDE_LABELS, cleanup_prometheus_files, @@ -104,6 +106,13 @@ async def lifespan(app: FastAPI): else: pid = os.getpid() api_server_logger.info(f"{pid}") + + if args.served_model_name is not None: + served_model_names = args.served_model_name + else: + served_model_names = args.model + model_paths = [ModelPath(name=served_model_names, model_path=args.model)] + engine_client = EngineClient( args.tokenizer, args.max_model_len, @@ -118,11 +127,13 @@ async def lifespan(app: FastAPI): app.state.dynamic_load_weight = args.dynamic_load_weight chat_handler = OpenAIServingChat(engine_client, pid, args.ips) completion_handler = OpenAIServingCompletion(engine_client, pid, args.ips) + model_handler = OpenAIServingModels(engine_client, model_paths, args.max_model_len, pid, args.ips) engine_client.create_zmq_client(model=pid, mode=zmq.PUSH) engine_client.pid = pid app.state.engine_client = engine_client app.state.chat_handler = chat_handler app.state.completion_handler = completion_handler + app.state.model_handler = model_handler yield # close zmq try: @@ -232,6 +243,23 @@ async def create_completion(request: CompletionRequest): return StreamingResponse(content=generator, media_type="text/event-stream") +@app.get("v1/models") +def list_models() -> Response: + """ + List all available models. + """ + if app.state.dynamic_load_weight: + status, msg = app.state.engine_client.is_workers_alive() + if not status: + return JSONResponse(content={"error": "Worker Service Not Healthy"}, status_code=304) + + models = app.state.model_handler.list_models() + if isinstance(models, ErrorResponse): + return JSONResponse(content=models.model_dump(), status_code=models.code) + elif isinstance(models, ModelList): + return JSONResponse(content=models.model_dump()) + + @app.get("/update_model_weight") def update_model_weight(request: Request) -> Response: """ diff --git a/fastdeploy/entrypoints/openai/protocol.py b/fastdeploy/entrypoints/openai/protocol.py index ca6232dfbb..a9e83e32d8 100644 --- a/fastdeploy/entrypoints/openai/protocol.py +++ b/fastdeploy/entrypoints/openai/protocol.py @@ -18,6 +18,7 @@ import json import time +import uuid from typing import Any, List, Literal, Optional, Union from pydantic import BaseModel, Field, model_validator @@ -55,6 +56,37 @@ class UsageInfo(BaseModel): prompt_tokens_details: Optional[PromptTokenUsageInfo] = None +class ModelPermission(BaseModel): + id: str = Field(default_factory=lambda: f"modelperm-{str(uuid.uuid4().hex)}") + object: str = "model_permission" + created: int = Field(default_factory=lambda: int(time.time())) + allow_create_engine: bool = False + allow_sampling: bool = True + allow_logprobs: bool = True + allow_search_indices: bool = False + allow_view: bool = True + allow_fine_tuning: bool = False + organization: str = "*" + group: Optional[str] = None + is_blocking: bool = False + + +class ModelInfo(BaseModel): + id: str + object: str = "model" + created: int = Field(default_factory=lambda: int(time.time())) + owned_by: str = "FastDeploy" + root: Optional[str] = None + parent: Optional[str] = None + max_model_len: Optional[int] = None + permission: list[ModelPermission] = Field(default_factory=list) + + +class ModelList(BaseModel): + object: str = "list" + data: list[ModelInfo] = Field(default_factory=list) + + class FunctionCall(BaseModel): """ Function call. diff --git a/fastdeploy/entrypoints/openai/serving_models.py b/fastdeploy/entrypoints/openai/serving_models.py new file mode 100644 index 0000000000..a72043730f --- /dev/null +++ b/fastdeploy/entrypoints/openai/serving_models.py @@ -0,0 +1,95 @@ +""" +# Copyright (c) 2025 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License" +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" + +from dataclasses import dataclass + +from fastdeploy.entrypoints.engine_client import EngineClient +from fastdeploy.entrypoints.openai.protocol import ( + ErrorResponse, + ModelInfo, + ModelList, + ModelPermission, +) +from fastdeploy.utils import api_server_logger, get_host_ip + + +@dataclass +class ModelPath: + name: str + model_path: str + + +class OpenAIServingModels: + """ + Shared instance to hold data about the loaded models + """ + + def __init__( + self, + engine_client: EngineClient, + model_paths: list[ModelPath], + max_model_len: int, + pid, + ips, + ): + self.engine_client = engine_client + self.model_paths = model_paths + self.max_model_len = max_model_len + self.pid = pid + self.master_ip = ips + self.host_ip = get_host_ip() + if self.master_ip is not None: + if isinstance(self.master_ip, list): + self.master_ip = self.master_ip[0] + else: + self.master_ip = self.master_ip.split(",")[0] + + def _check_master(self): + if self.master_ip is None: + return True + if self.host_ip == self.master_ip: + return True + return False + + def is_supported_model(self, model_name) -> bool: + """ + Check whether the specified model is supported. + """ + return any(model.name == model_name for model in self.model_paths) + + def model_name(self) -> str: + """ + Returns the current model name. + """ + return self.model_paths[0].name + + async def list_models(self) -> ModelList: + """ + Show available models. + """ + if not self._check_master(): + err_msg = ( + f"Only master node can accept models request, please send request to master node: {self.pod_ips[0]}" + ) + api_server_logger.error(err_msg) + return ErrorResponse(message=err_msg, code=400) + model_infos = [ + ModelInfo( + id=model.name, max_model_len=self.max_model_len, root=model.model_path, permission=[ModelPermission()] + ) + for model in self.model_paths + ] + return ModelList(data=model_infos) From f4215687d8801b0f1e04fc5439f003317c82b9c4 Mon Sep 17 00:00:00 2001 From: yangzichao01 Date: Tue, 29 Jul 2025 17:13:45 +0800 Subject: [PATCH 02/17] add model parameters --- fastdeploy/entrypoints/openai/api_server.py | 30 ++++++++++++++----- fastdeploy/entrypoints/openai/serving_chat.py | 15 +++++++++- .../entrypoints/openai/serving_completion.py | 15 +++++++++- .../entrypoints/openai/serving_models.py | 4 +-- 4 files changed, 53 insertions(+), 11 deletions(-) diff --git a/fastdeploy/entrypoints/openai/api_server.py b/fastdeploy/entrypoints/openai/api_server.py index dd51f11a2e..02e9f88731 100644 --- a/fastdeploy/entrypoints/openai/api_server.py +++ b/fastdeploy/entrypoints/openai/api_server.py @@ -125,15 +125,31 @@ async def lifespan(app: FastAPI): args.data_parallel_size, ) app.state.dynamic_load_weight = args.dynamic_load_weight - chat_handler = OpenAIServingChat(engine_client, pid, args.ips) - completion_handler = OpenAIServingCompletion(engine_client, pid, args.ips) - model_handler = OpenAIServingModels(engine_client, model_paths, args.max_model_len, pid, args.ips) + model_handler = OpenAIServingModels( + engine_client, + model_paths, + args.max_model_len, + pid, + args.ips, + ) + app.state.model_handler = model_handler + chat_handler = OpenAIServingChat( + engine_client, + app.state.model_handler, + pid, + args.ips, + ) + completion_handler = OpenAIServingCompletion( + engine_client, + app.state.model_handler, + pid, + args.ips, + ) engine_client.create_zmq_client(model=pid, mode=zmq.PUSH) engine_client.pid = pid app.state.engine_client = engine_client app.state.chat_handler = chat_handler app.state.completion_handler = completion_handler - app.state.model_handler = model_handler yield # close zmq try: @@ -243,8 +259,8 @@ async def create_completion(request: CompletionRequest): return StreamingResponse(content=generator, media_type="text/event-stream") -@app.get("v1/models") -def list_models() -> Response: +@app.get("/v1/models") +async def list_models() -> Response: """ List all available models. """ @@ -253,7 +269,7 @@ def list_models() -> Response: if not status: return JSONResponse(content={"error": "Worker Service Not Healthy"}, status_code=304) - models = app.state.model_handler.list_models() + models = await app.state.model_handler.list_models() if isinstance(models, ErrorResponse): return JSONResponse(content=models.model_dump(), status_code=models.code) elif isinstance(models, ModelList): diff --git a/fastdeploy/entrypoints/openai/serving_chat.py b/fastdeploy/entrypoints/openai/serving_chat.py index 86da7eaea9..423ea72084 100644 --- a/fastdeploy/entrypoints/openai/serving_chat.py +++ b/fastdeploy/entrypoints/openai/serving_chat.py @@ -25,6 +25,7 @@ import numpy as np from aiozmq import zmq +from fastdeploy.entrypoints.engine_client import EngineClient from fastdeploy.entrypoints.openai.protocol import ( ChatCompletionRequest, ChatCompletionResponse, @@ -39,6 +40,7 @@ PromptTokenUsageInfo, UsageInfo, ) +from fastdeploy.entrypoints.openai.serving_models import OpenAIServingModels from fastdeploy.metrics.work_metrics import work_process_metrics from fastdeploy.utils import api_server_logger, get_host_ip from fastdeploy.worker.output import LogprobsLists @@ -49,8 +51,15 @@ class OpenAIServingChat: OpenAI-style chat completions serving """ - def __init__(self, engine_client, pid, ips): + def __init__( + self, + engine_client: EngineClient, + models: OpenAIServingModels, + pid: int, + ips, + ): self.engine_client = engine_client + self.models = models self.pid = pid self.master_ip = ips self.host_ip = get_host_ip() @@ -76,6 +85,10 @@ async def create_chat_completion(self, request: ChatCompletionRequest): err_msg = f"Only master node can accept completion request, please send request to master node: {self.pod_ips[0]}" api_server_logger.error(err_msg) return ErrorResponse(message=err_msg, code=400) + if not self.models.is_supported_model(request.model): + err_msg = f"Unsupported model: {request.model}" + api_server_logger.error(err_msg) + raise ErrorResponse(message=err_msg, code=400) if request.user is not None: request_id = f"chatcmpl-{request.user}-{uuid.uuid4()}" else: diff --git a/fastdeploy/entrypoints/openai/serving_completion.py b/fastdeploy/entrypoints/openai/serving_completion.py index a7a058858c..5eb0e929bc 100644 --- a/fastdeploy/entrypoints/openai/serving_completion.py +++ b/fastdeploy/entrypoints/openai/serving_completion.py @@ -25,6 +25,7 @@ from aiozmq import zmq from fastdeploy.engine.request import RequestOutput +from fastdeploy.entrypoints.engine_client import EngineClient from fastdeploy.entrypoints.openai.protocol import ( CompletionRequest, CompletionResponse, @@ -34,12 +35,20 @@ ErrorResponse, UsageInfo, ) +from fastdeploy.entrypoints.openai.serving_models import OpenAIServingModels from fastdeploy.utils import api_server_logger, get_host_ip class OpenAIServingCompletion: - def __init__(self, engine_client, pid, ips): + def __init__( + self, + engine_client: EngineClient, + models: OpenAIServingModels, + pid: int, + ips, + ): self.engine_client = engine_client + self.models = models self.pid = pid self.master_ip = ips self.host_ip = get_host_ip() @@ -64,6 +73,10 @@ async def create_completion(self, request: CompletionRequest): err_msg = f"Only master node can accept completion request, please send request to master node: {self.pod_ips[0]}" api_server_logger.error(err_msg) return ErrorResponse(message=err_msg, code=400) + if not self.models.is_supported_model(request.model): + err_msg = f"Unsupported model: {request.model}" + api_server_logger.error(err_msg) + raise ErrorResponse(message=err_msg, code=400) created_time = int(time.time()) if request.user is not None: request_id = f"cmpl-{request.user}-{uuid.uuid4()}" diff --git a/fastdeploy/entrypoints/openai/serving_models.py b/fastdeploy/entrypoints/openai/serving_models.py index a72043730f..76995b2804 100644 --- a/fastdeploy/entrypoints/openai/serving_models.py +++ b/fastdeploy/entrypoints/openai/serving_models.py @@ -34,7 +34,7 @@ class ModelPath: class OpenAIServingModels: """ - Shared instance to hold data about the loaded models + OpenAI-style models serving """ def __init__( @@ -42,7 +42,7 @@ def __init__( engine_client: EngineClient, model_paths: list[ModelPath], max_model_len: int, - pid, + pid: int, ips, ): self.engine_client = engine_client From 5446d4a852b34229bd2de405cd611fbf4b172cdc Mon Sep 17 00:00:00 2001 From: yangzichao01 Date: Tue, 29 Jul 2025 19:25:12 +0800 Subject: [PATCH 03/17] default model verification --- fastdeploy/entrypoints/openai/serving_models.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/fastdeploy/entrypoints/openai/serving_models.py b/fastdeploy/entrypoints/openai/serving_models.py index 76995b2804..2cbe6b3d3b 100644 --- a/fastdeploy/entrypoints/openai/serving_models.py +++ b/fastdeploy/entrypoints/openai/serving_models.py @@ -68,6 +68,8 @@ def is_supported_model(self, model_name) -> bool: """ Check whether the specified model is supported. """ + if model_name == "default": + return True return any(model.name == model_name for model in self.model_paths) def model_name(self) -> str: From a57341c6c5900dfcc75f8d2431927634a677f267 Mon Sep 17 00:00:00 2001 From: yangzichao01 Date: Tue, 29 Jul 2025 21:19:58 +0800 Subject: [PATCH 04/17] unit test --- fastdeploy/entrypoints/openai/serving_chat.py | 2 +- .../entrypoints/openai/serving_completion.py | 2 +- .../entrypoints/openai/test_serving_models.py | 45 +++++++++++++++++++ 3 files changed, 47 insertions(+), 2 deletions(-) create mode 100644 test/entrypoints/openai/test_serving_models.py diff --git a/fastdeploy/entrypoints/openai/serving_chat.py b/fastdeploy/entrypoints/openai/serving_chat.py index 423ea72084..86b5802b2b 100644 --- a/fastdeploy/entrypoints/openai/serving_chat.py +++ b/fastdeploy/entrypoints/openai/serving_chat.py @@ -88,7 +88,7 @@ async def create_chat_completion(self, request: ChatCompletionRequest): if not self.models.is_supported_model(request.model): err_msg = f"Unsupported model: {request.model}" api_server_logger.error(err_msg) - raise ErrorResponse(message=err_msg, code=400) + return ErrorResponse(message=err_msg, code=400) if request.user is not None: request_id = f"chatcmpl-{request.user}-{uuid.uuid4()}" else: diff --git a/fastdeploy/entrypoints/openai/serving_completion.py b/fastdeploy/entrypoints/openai/serving_completion.py index 5eb0e929bc..538f2e93e5 100644 --- a/fastdeploy/entrypoints/openai/serving_completion.py +++ b/fastdeploy/entrypoints/openai/serving_completion.py @@ -76,7 +76,7 @@ async def create_completion(self, request: CompletionRequest): if not self.models.is_supported_model(request.model): err_msg = f"Unsupported model: {request.model}" api_server_logger.error(err_msg) - raise ErrorResponse(message=err_msg, code=400) + return ErrorResponse(message=err_msg, code=400) created_time = int(time.time()) if request.user is not None: request_id = f"cmpl-{request.user}-{uuid.uuid4()}" diff --git a/test/entrypoints/openai/test_serving_models.py b/test/entrypoints/openai/test_serving_models.py new file mode 100644 index 0000000000..675d09ed36 --- /dev/null +++ b/test/entrypoints/openai/test_serving_models.py @@ -0,0 +1,45 @@ +from unittest.mock import MagicMock + +import pytest + +from fastdeploy.entrypoints.engine_client import EngineClient +from fastdeploy.entrypoints.openai.protocol import ModelInfo, ModelList +from fastdeploy.entrypoints.openai.serving_models import ModelPath, OpenAIServingModels +from fastdeploy.utils import get_host_ip + +MODEL_NAME = "baidu/ERNIE-4.5-0.3B-PT" +MODEL_PATHS = [ModelPath(name=MODEL_NAME, model_path=MODEL_NAME)] +MAX_MODEL_LEN = 2048 + + +async def _async_serving_models_init() -> OpenAIServingModels: + mock_engine_client = MagicMock(spec=EngineClient) + + serving_models = OpenAIServingModels( + engine_client=mock_engine_client, + model_paths=MODEL_PATHS, + max_model_len=MAX_MODEL_LEN, + pid=1, + ips=[get_host_ip()], + ) + + return serving_models + + +@pytest.mark.asyncio +async def test_serving_model_name(): + serving_models = await _async_serving_models_init() + assert serving_models.model_name(None) == MODEL_NAME + + +@pytest.mark.asyncio +async def test_list_models(serving_models): + serving_models = await _async_serving_models_init() + result = serving_models.list_models() + assert isinstance(result, ModelList) + assert isinstance(result.data[0], ModelInfo) + assert result.object == "list" + assert len(result.data) == 1 + assert result.data[0].id == MODEL_NAME + assert result.data[0].max_model_len == MAX_MODEL_LEN + assert result.data[0].root == MODEL_PATHS[0].model_path From 81ce7894d3740229d0d256ef897ad3864eedaa42 Mon Sep 17 00:00:00 2001 From: yangzichao01 Date: Tue, 29 Jul 2025 21:30:55 +0800 Subject: [PATCH 05/17] check model err_msg --- fastdeploy/entrypoints/openai/serving_chat.py | 2 +- fastdeploy/entrypoints/openai/serving_completion.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/fastdeploy/entrypoints/openai/serving_chat.py b/fastdeploy/entrypoints/openai/serving_chat.py index 86b5802b2b..62fa8ff4fd 100644 --- a/fastdeploy/entrypoints/openai/serving_chat.py +++ b/fastdeploy/entrypoints/openai/serving_chat.py @@ -86,7 +86,7 @@ async def create_chat_completion(self, request: ChatCompletionRequest): api_server_logger.error(err_msg) return ErrorResponse(message=err_msg, code=400) if not self.models.is_supported_model(request.model): - err_msg = f"Unsupported model: {request.model}" + err_msg = f"Unsupported model: {request.model}, support {', '.join([x.name for x in self.models.model_paths])} or default" api_server_logger.error(err_msg) return ErrorResponse(message=err_msg, code=400) if request.user is not None: diff --git a/fastdeploy/entrypoints/openai/serving_completion.py b/fastdeploy/entrypoints/openai/serving_completion.py index 538f2e93e5..1f3ee42536 100644 --- a/fastdeploy/entrypoints/openai/serving_completion.py +++ b/fastdeploy/entrypoints/openai/serving_completion.py @@ -74,7 +74,7 @@ async def create_completion(self, request: CompletionRequest): api_server_logger.error(err_msg) return ErrorResponse(message=err_msg, code=400) if not self.models.is_supported_model(request.model): - err_msg = f"Unsupported model: {request.model}" + err_msg = f"Unsupported model: {request.model}, support {', '.join([x.name for x in self.models.model_paths])} or default" api_server_logger.error(err_msg) return ErrorResponse(message=err_msg, code=400) created_time = int(time.time()) From 2cba65dc363772a960611d77b4e5ca47105ef56f Mon Sep 17 00:00:00 2001 From: yangzichao01 Date: Wed, 30 Jul 2025 10:39:46 +0800 Subject: [PATCH 06/17] unit test --- fastdeploy/entrypoints/openai/serving_models.py | 2 -- test/entrypoints/openai/test_serving_models.py | 9 ++++----- 2 files changed, 4 insertions(+), 7 deletions(-) diff --git a/fastdeploy/entrypoints/openai/serving_models.py b/fastdeploy/entrypoints/openai/serving_models.py index 2cbe6b3d3b..98ebe1095d 100644 --- a/fastdeploy/entrypoints/openai/serving_models.py +++ b/fastdeploy/entrypoints/openai/serving_models.py @@ -42,13 +42,11 @@ def __init__( engine_client: EngineClient, model_paths: list[ModelPath], max_model_len: int, - pid: int, ips, ): self.engine_client = engine_client self.model_paths = model_paths self.max_model_len = max_model_len - self.pid = pid self.master_ip = ips self.host_ip = get_host_ip() if self.master_ip is not None: diff --git a/test/entrypoints/openai/test_serving_models.py b/test/entrypoints/openai/test_serving_models.py index 675d09ed36..e85b78c8ab 100644 --- a/test/entrypoints/openai/test_serving_models.py +++ b/test/entrypoints/openai/test_serving_models.py @@ -19,8 +19,7 @@ async def _async_serving_models_init() -> OpenAIServingModels: engine_client=mock_engine_client, model_paths=MODEL_PATHS, max_model_len=MAX_MODEL_LEN, - pid=1, - ips=[get_host_ip()], + ips=get_host_ip(), ) return serving_models @@ -29,13 +28,13 @@ async def _async_serving_models_init() -> OpenAIServingModels: @pytest.mark.asyncio async def test_serving_model_name(): serving_models = await _async_serving_models_init() - assert serving_models.model_name(None) == MODEL_NAME + assert serving_models.model_name() == MODEL_NAME @pytest.mark.asyncio -async def test_list_models(serving_models): +async def test_list_models(): serving_models = await _async_serving_models_init() - result = serving_models.list_models() + result = await serving_models.list_models() assert isinstance(result, ModelList) assert isinstance(result.data[0], ModelInfo) assert result.object == "list" From 0cf44dadc0d68a040551e629fadba605f5b8cb0f Mon Sep 17 00:00:00 2001 From: yangzichao01 Date: Wed, 30 Jul 2025 11:38:01 +0800 Subject: [PATCH 07/17] type annotation --- fastdeploy/entrypoints/openai/api_server.py | 1 - fastdeploy/entrypoints/openai/serving_chat.py | 4 ++-- fastdeploy/entrypoints/openai/serving_completion.py | 4 ++-- fastdeploy/entrypoints/openai/serving_models.py | 3 ++- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/fastdeploy/entrypoints/openai/api_server.py b/fastdeploy/entrypoints/openai/api_server.py index 02e9f88731..b91b51dd48 100644 --- a/fastdeploy/entrypoints/openai/api_server.py +++ b/fastdeploy/entrypoints/openai/api_server.py @@ -129,7 +129,6 @@ async def lifespan(app: FastAPI): engine_client, model_paths, args.max_model_len, - pid, args.ips, ) app.state.model_handler = model_handler diff --git a/fastdeploy/entrypoints/openai/serving_chat.py b/fastdeploy/entrypoints/openai/serving_chat.py index 62fa8ff4fd..a32ed02f21 100644 --- a/fastdeploy/entrypoints/openai/serving_chat.py +++ b/fastdeploy/entrypoints/openai/serving_chat.py @@ -18,7 +18,7 @@ import time import traceback import uuid -from typing import List, Optional +from typing import List, Optional, Union import aiozmq import msgpack @@ -56,7 +56,7 @@ def __init__( engine_client: EngineClient, models: OpenAIServingModels, pid: int, - ips, + ips: Union[List[str], str], ): self.engine_client = engine_client self.models = models diff --git a/fastdeploy/entrypoints/openai/serving_completion.py b/fastdeploy/entrypoints/openai/serving_completion.py index 1f3ee42536..0c69840e5f 100644 --- a/fastdeploy/entrypoints/openai/serving_completion.py +++ b/fastdeploy/entrypoints/openai/serving_completion.py @@ -17,7 +17,7 @@ import asyncio import time import uuid -from typing import List +from typing import List, Union import aiozmq import msgpack @@ -45,7 +45,7 @@ def __init__( engine_client: EngineClient, models: OpenAIServingModels, pid: int, - ips, + ips: Union[List[str], str], ): self.engine_client = engine_client self.models = models diff --git a/fastdeploy/entrypoints/openai/serving_models.py b/fastdeploy/entrypoints/openai/serving_models.py index 98ebe1095d..c1f16b2b2f 100644 --- a/fastdeploy/entrypoints/openai/serving_models.py +++ b/fastdeploy/entrypoints/openai/serving_models.py @@ -15,6 +15,7 @@ """ from dataclasses import dataclass +from typing import List, Union from fastdeploy.entrypoints.engine_client import EngineClient from fastdeploy.entrypoints.openai.protocol import ( @@ -42,7 +43,7 @@ def __init__( engine_client: EngineClient, model_paths: list[ModelPath], max_model_len: int, - ips, + ips: Union[List[str], str], ): self.engine_client = engine_client self.model_paths = model_paths From a2145652217f9874cd15eebad5fd022c0295e722 Mon Sep 17 00:00:00 2001 From: yangzichao01 Date: Wed, 30 Jul 2025 11:57:13 +0800 Subject: [PATCH 08/17] model parameter in response --- fastdeploy/entrypoints/openai/serving_chat.py | 2 ++ fastdeploy/entrypoints/openai/serving_completion.py | 2 ++ 2 files changed, 4 insertions(+) diff --git a/fastdeploy/entrypoints/openai/serving_chat.py b/fastdeploy/entrypoints/openai/serving_chat.py index a32ed02f21..80aa13d3a6 100644 --- a/fastdeploy/entrypoints/openai/serving_chat.py +++ b/fastdeploy/entrypoints/openai/serving_chat.py @@ -85,6 +85,8 @@ async def create_chat_completion(self, request: ChatCompletionRequest): err_msg = f"Only master node can accept completion request, please send request to master node: {self.pod_ips[0]}" api_server_logger.error(err_msg) return ErrorResponse(message=err_msg, code=400) + if request.model == "default": + request.model = self.models.model_name() if not self.models.is_supported_model(request.model): err_msg = f"Unsupported model: {request.model}, support {', '.join([x.name for x in self.models.model_paths])} or default" api_server_logger.error(err_msg) diff --git a/fastdeploy/entrypoints/openai/serving_completion.py b/fastdeploy/entrypoints/openai/serving_completion.py index 0c69840e5f..08b8606d3b 100644 --- a/fastdeploy/entrypoints/openai/serving_completion.py +++ b/fastdeploy/entrypoints/openai/serving_completion.py @@ -73,6 +73,8 @@ async def create_completion(self, request: CompletionRequest): err_msg = f"Only master node can accept completion request, please send request to master node: {self.pod_ips[0]}" api_server_logger.error(err_msg) return ErrorResponse(message=err_msg, code=400) + if request.model == "default": + request.model = self.models.model_name() if not self.models.is_supported_model(request.model): err_msg = f"Unsupported model: {request.model}, support {', '.join([x.name for x in self.models.model_paths])} or default" api_server_logger.error(err_msg) From bba5c0e62f71e7a05db83e46d6f2725e0b85b5b4 Mon Sep 17 00:00:00 2001 From: yangzichao01 Date: Wed, 30 Jul 2025 14:43:58 +0800 Subject: [PATCH 09/17] modify document description --- docs/parameters.md | 5 +++-- docs/zh/parameters.md | 4 +++- fastdeploy/entrypoints/openai/api_server.py | 1 - fastdeploy/entrypoints/openai/serving_models.py | 3 --- 4 files changed, 6 insertions(+), 7 deletions(-) diff --git a/docs/parameters.md b/docs/parameters.md index c52fc9ac6f..1ea98d5e53 100644 --- a/docs/parameters.md +++ b/docs/parameters.md @@ -34,7 +34,7 @@ When using FastDeploy to deploy models (including offline inference and service | ```static_decode_blocks``` | `int` | During inference, each request is forced to allocate corresponding number of blocks from Prefill's KVCache for Decode use, default: 2 | | ```reasoning_parser``` | `str` | Specify the reasoning parser to extract reasoning content from model output | | ```use_cudagraph``` | `bool` | Whether to use cuda graph, default: False | -|```graph_optimization_config``` | `str` | Parameters related to graph optimization can be configured, with default values of'{"use_cudagraph":false, "graph_opt_level":0, "cudagraph_capture_sizes": null }' | +|```graph_optimization_config``` | `str` | Parameters related to graph optimization can be configured, with default values of'{"use_cudagraph":false, "graph_opt_level":0, "cudagraph_capture_sizes": null }' | | ```enable_custom_all_reduce``` | `bool` | Enable Custom all-reduce, default: False | | ```splitwise_role``` | `str` | Whether to enable splitwise inference, default value: mixed, supported parameters: ["mixed", "decode", "prefill"] | | ```innode_prefill_ports``` | `str` | Internal engine startup ports for prefill instances (only required for single-machine PD separation), default: None | @@ -44,7 +44,8 @@ When using FastDeploy to deploy models (including offline inference and service | ```dynamic_load_weight``` | `int` | Whether to enable dynamic weight loading, default: 0 | | ```enable_expert_parallel``` | `bool` | Whether to enable expert parallel | | ```enable_logprob``` | `bool` | Whether to enable return log probabilities of the output tokens or not. If true, returns the log probabilities of each output token returned in the content of message.If logrpob is not used, this parameter can be omitted when starting | - +| ```served_model_name```| `str`| The model name used in the API. If not specified, the model name will be the same as the --model argument | +| ```revision``` | `str` | The specific model version to use. It can be a branch name, a tag name, or a commit id. | ## 1. Relationship between KVCache allocation, ```num_gpu_blocks_override``` and ```block_size```? During FastDeploy inference, GPU memory is occupied by ```model weights```, ```preallocated KVCache blocks``` and ```model computation intermediate activation values```. The preallocated KVCache blocks are determined by ```num_gpu_blocks_override```, with ```block_size``` (default: 64) as its unit, meaning one block can store KVCache for 64 Tokens. diff --git a/docs/zh/parameters.md b/docs/zh/parameters.md index fbf57a971c..1f754deb2c 100644 --- a/docs/zh/parameters.md +++ b/docs/zh/parameters.md @@ -32,7 +32,7 @@ | ```static_decode_blocks``` | `int` | 推理过程中,每条请求强制从Prefill的KVCache分配对应块数给Decode使用,默认2| | ```reasoning_parser``` | `str` | 指定要使用的推理解析器,以便从模型输出中提取推理内容 | | ```use_cudagraph``` | `bool` | 是否使用cuda graph,默认False | -|```graph_optimization_config``` | `str` | 可以配置计算图优化相关的参数,默认值为'{"use_cudagraph":false, "graph_opt_level":0, "cudagraph_capture_sizes": null }' | +|```graph_optimization_config``` | `str` | 可以配置计算图优化相关的参数,默认值为'{"use_cudagraph":false, "graph_opt_level":0, "cudagraph_capture_sizes": null }' | | ```enable_custom_all_reduce``` | `bool` | 开启Custom all-reduce,默认False | | ```splitwise_role``` | `str` | 是否开启splitwise推理,默认值mixed, 支持参数为["mixed", "decode", "prefill"] | | ```innode_prefill_ports``` | `str` | prefill 实例内部引擎启动端口 (仅单机PD分离需要),默认值None | @@ -42,6 +42,8 @@ | ```dynamic_load_weight``` | `int` | 是否动态加载权重,默认0 | | ```enable_expert_parallel``` | `bool` | 是否启用专家并行 | | ```enable_logprob``` | `bool` | 是否启用输出token返回logprob。如果未使用 logrpob,则在启动时可以省略此参数。 | +| ```served_model_name``` | `str` | API 中使用的模型名称,如果未指定,模型名称将与--model参数相同 | +| ```revision``` | `str` | 自动下载模型时,用于指定模型的Git版本,分支名或tag | ## 1. KVCache分配与```num_gpu_blocks_override```、```block_size```的关系? diff --git a/fastdeploy/entrypoints/openai/api_server.py b/fastdeploy/entrypoints/openai/api_server.py index b91b51dd48..ca831073fc 100644 --- a/fastdeploy/entrypoints/openai/api_server.py +++ b/fastdeploy/entrypoints/openai/api_server.py @@ -126,7 +126,6 @@ async def lifespan(app: FastAPI): ) app.state.dynamic_load_weight = args.dynamic_load_weight model_handler = OpenAIServingModels( - engine_client, model_paths, args.max_model_len, args.ips, diff --git a/fastdeploy/entrypoints/openai/serving_models.py b/fastdeploy/entrypoints/openai/serving_models.py index c1f16b2b2f..cb2d527f8c 100644 --- a/fastdeploy/entrypoints/openai/serving_models.py +++ b/fastdeploy/entrypoints/openai/serving_models.py @@ -17,7 +17,6 @@ from dataclasses import dataclass from typing import List, Union -from fastdeploy.entrypoints.engine_client import EngineClient from fastdeploy.entrypoints.openai.protocol import ( ErrorResponse, ModelInfo, @@ -40,12 +39,10 @@ class OpenAIServingModels: def __init__( self, - engine_client: EngineClient, model_paths: list[ModelPath], max_model_len: int, ips: Union[List[str], str], ): - self.engine_client = engine_client self.model_paths = model_paths self.max_model_len = max_model_len self.master_ip = ips From ca08ee1ca63624c907fbab53ef42016129129654 Mon Sep 17 00:00:00 2001 From: yangzichao01 Date: Wed, 30 Jul 2025 14:46:18 +0800 Subject: [PATCH 10/17] modify document description --- docs/parameters.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/parameters.md b/docs/parameters.md index 1ea98d5e53..48e9c73bbe 100644 --- a/docs/parameters.md +++ b/docs/parameters.md @@ -45,7 +45,7 @@ When using FastDeploy to deploy models (including offline inference and service | ```enable_expert_parallel``` | `bool` | Whether to enable expert parallel | | ```enable_logprob``` | `bool` | Whether to enable return log probabilities of the output tokens or not. If true, returns the log probabilities of each output token returned in the content of message.If logrpob is not used, this parameter can be omitted when starting | | ```served_model_name```| `str`| The model name used in the API. If not specified, the model name will be the same as the --model argument | -| ```revision``` | `str` | The specific model version to use. It can be a branch name, a tag name, or a commit id. | +| ```revision``` | `str` | The specific model version to use. It can be a branch name, a tag name, or a commit id. If unspecified, will use the default version. | ## 1. Relationship between KVCache allocation, ```num_gpu_blocks_override``` and ```block_size```? During FastDeploy inference, GPU memory is occupied by ```model weights```, ```preallocated KVCache blocks``` and ```model computation intermediate activation values```. The preallocated KVCache blocks are determined by ```num_gpu_blocks_override```, with ```block_size``` (default: 64) as its unit, meaning one block can store KVCache for 64 Tokens. From 08ef40f0384221f653b60575b97a7e25ecd22abe Mon Sep 17 00:00:00 2001 From: yangzichao01 Date: Thu, 7 Aug 2025 13:31:57 +0800 Subject: [PATCH 11/17] unit test --- .../entrypoints/openai/test_serving_models.py | 55 +++++++++++-------- 1 file changed, 31 insertions(+), 24 deletions(-) diff --git a/test/entrypoints/openai/test_serving_models.py b/test/entrypoints/openai/test_serving_models.py index e85b78c8ab..cb8100f00e 100644 --- a/test/entrypoints/openai/test_serving_models.py +++ b/test/entrypoints/openai/test_serving_models.py @@ -1,8 +1,6 @@ -from unittest.mock import MagicMock +import asyncio +import unittest -import pytest - -from fastdeploy.entrypoints.engine_client import EngineClient from fastdeploy.entrypoints.openai.protocol import ModelInfo, ModelList from fastdeploy.entrypoints.openai.serving_models import ModelPath, OpenAIServingModels from fastdeploy.utils import get_host_ip @@ -13,32 +11,41 @@ async def _async_serving_models_init() -> OpenAIServingModels: - mock_engine_client = MagicMock(spec=EngineClient) - - serving_models = OpenAIServingModels( - engine_client=mock_engine_client, + """异步初始化 OpenAIServingModels 实例""" + return OpenAIServingModels( model_paths=MODEL_PATHS, max_model_len=MAX_MODEL_LEN, ips=get_host_ip(), ) - return serving_models +class TestOpenAIServingModels(unittest.TestCase): + """测试 OpenAIServingModels 的 unittest 版本""" + + def test_serving_model_name(self): + """测试模型名称获取""" + # 通过 asyncio.run() 执行异步初始化 + serving_models = asyncio.run(_async_serving_models_init()) + self.assertEqual(serving_models.model_name(), MODEL_NAME) + + def test_list_models(self): + """测试模型列表功能""" + serving_models = asyncio.run(_async_serving_models_init()) + + # 通过 asyncio.run() 执行异步方法 + result = asyncio.run(serving_models.list_models()) + + # 验证返回类型和内容 + self.assertIsInstance(result, ModelList) + self.assertEqual(len(result.data), 1) -@pytest.mark.asyncio -async def test_serving_model_name(): - serving_models = await _async_serving_models_init() - assert serving_models.model_name() == MODEL_NAME + model_info = result.data[0] + self.assertIsInstance(model_info, ModelInfo) + self.assertEqual(model_info.id, MODEL_NAME) + self.assertEqual(model_info.max_model_len, MAX_MODEL_LEN) + self.assertEqual(model_info.root, MODEL_PATHS[0].model_path) + self.assertEqual(result.object, "list") -@pytest.mark.asyncio -async def test_list_models(): - serving_models = await _async_serving_models_init() - result = await serving_models.list_models() - assert isinstance(result, ModelList) - assert isinstance(result.data[0], ModelInfo) - assert result.object == "list" - assert len(result.data) == 1 - assert result.data[0].id == MODEL_NAME - assert result.data[0].max_model_len == MAX_MODEL_LEN - assert result.data[0].root == MODEL_PATHS[0].model_path +if __name__ == "__main__": + unittest.main() From 5ad7d0f293e3b03ec2ab670173806943050fdaa1 Mon Sep 17 00:00:00 2001 From: yangzichao01 Date: Mon, 11 Aug 2025 13:32:19 +0800 Subject: [PATCH 12/17] verification --- fastdeploy/entrypoints/openai/api_server.py | 4 +++- fastdeploy/entrypoints/openai/serving_models.py | 3 +++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/fastdeploy/entrypoints/openai/api_server.py b/fastdeploy/entrypoints/openai/api_server.py index 2c619432b9..b012554730 100644 --- a/fastdeploy/entrypoints/openai/api_server.py +++ b/fastdeploy/entrypoints/openai/api_server.py @@ -126,9 +126,11 @@ async def lifespan(app: FastAPI): if args.served_model_name is not None: served_model_names = args.served_model_name + verification = True else: served_model_names = args.model - model_paths = [ModelPath(name=served_model_names, model_path=args.model)] + verification = False + model_paths = [ModelPath(name=served_model_names, model_path=args.model, verification=verification)] engine_client = EngineClient( args.model, diff --git a/fastdeploy/entrypoints/openai/serving_models.py b/fastdeploy/entrypoints/openai/serving_models.py index cb2d527f8c..90822294cc 100644 --- a/fastdeploy/entrypoints/openai/serving_models.py +++ b/fastdeploy/entrypoints/openai/serving_models.py @@ -30,6 +30,7 @@ class ModelPath: name: str model_path: str + verification: bool = False class OpenAIServingModels: @@ -64,6 +65,8 @@ def is_supported_model(self, model_name) -> bool: """ Check whether the specified model is supported. """ + if self.model_paths[0].verification is False: + return True if model_name == "default": return True return any(model.name == model_name for model in self.model_paths) From ce47277d839f26e341a53e5586303c9ce3d0d149 Mon Sep 17 00:00:00 2001 From: yangzichao01 Date: Mon, 11 Aug 2025 15:10:24 +0800 Subject: [PATCH 13/17] verification update --- fastdeploy/entrypoints/openai/serving_chat.py | 4 ++-- fastdeploy/entrypoints/openai/serving_completion.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/fastdeploy/entrypoints/openai/serving_chat.py b/fastdeploy/entrypoints/openai/serving_chat.py index 7d10ddf4bc..258f63791b 100644 --- a/fastdeploy/entrypoints/openai/serving_chat.py +++ b/fastdeploy/entrypoints/openai/serving_chat.py @@ -49,7 +49,7 @@ class OpenAIServingChat: OpenAI-style chat completions serving """ - def __init__(self, models, engine_client, pid, ips, max_waiting_time): + def __init__(self, engine_client, models, pid, ips, max_waiting_time): self.engine_client = engine_client self.models = models self.pid = pid @@ -78,7 +78,7 @@ async def create_chat_completion(self, request: ChatCompletionRequest): err_msg = f"Only master node can accept completion request, please send request to master node: {self.pod_ips[0]}" api_server_logger.error(err_msg) return ErrorResponse(message=err_msg, code=400) - if request.model == "default": + if request.model == "default" or self.models.model_paths[0].verification is False: request.model = self.models.model_name() if not self.models.is_supported_model(request.model): err_msg = f"Unsupported model: {request.model}, support {', '.join([x.name for x in self.models.model_paths])} or default" diff --git a/fastdeploy/entrypoints/openai/serving_completion.py b/fastdeploy/entrypoints/openai/serving_completion.py index 336248e68e..b90cdcb457 100644 --- a/fastdeploy/entrypoints/openai/serving_completion.py +++ b/fastdeploy/entrypoints/openai/serving_completion.py @@ -40,7 +40,7 @@ class OpenAIServingCompletion: - def __init__(self, models, engine_client, pid, ips, max_waiting_time): + def __init__(self, engine_client, models, pid, ips, max_waiting_time): self.engine_client = engine_client self.models = models self.pid = pid @@ -68,7 +68,7 @@ async def create_completion(self, request: CompletionRequest): err_msg = f"Only master node can accept completion request, please send request to master node: {self.pod_ips[0]}" api_server_logger.error(err_msg) return ErrorResponse(message=err_msg, code=400) - if request.model == "default": + if request.model == "default" or self.models.model_paths[0].verification is False: request.model = self.models.model_name() if not self.models.is_supported_model(request.model): err_msg = f"Unsupported model: {request.model}, support {', '.join([x.name for x in self.models.model_paths])} or default" From e2940c080d6b34f2ececbaf7700bf3fafdf9fb35 Mon Sep 17 00:00:00 2001 From: yangzichao01 Date: Mon, 11 Aug 2025 15:31:21 +0800 Subject: [PATCH 14/17] model_name --- fastdeploy/entrypoints/openai/serving_chat.py | 5 ++--- fastdeploy/entrypoints/openai/serving_completion.py | 5 ++--- fastdeploy/entrypoints/openai/serving_models.py | 8 ++++---- 3 files changed, 8 insertions(+), 10 deletions(-) diff --git a/fastdeploy/entrypoints/openai/serving_chat.py b/fastdeploy/entrypoints/openai/serving_chat.py index 258f63791b..2cfdaa0f80 100644 --- a/fastdeploy/entrypoints/openai/serving_chat.py +++ b/fastdeploy/entrypoints/openai/serving_chat.py @@ -78,9 +78,8 @@ async def create_chat_completion(self, request: ChatCompletionRequest): err_msg = f"Only master node can accept completion request, please send request to master node: {self.pod_ips[0]}" api_server_logger.error(err_msg) return ErrorResponse(message=err_msg, code=400) - if request.model == "default" or self.models.model_paths[0].verification is False: - request.model = self.models.model_name() - if not self.models.is_supported_model(request.model): + is_supported, request.model = self.models.is_supported_model(request.model) + if not is_supported: err_msg = f"Unsupported model: {request.model}, support {', '.join([x.name for x in self.models.model_paths])} or default" api_server_logger.error(err_msg) return ErrorResponse(message=err_msg, code=400) diff --git a/fastdeploy/entrypoints/openai/serving_completion.py b/fastdeploy/entrypoints/openai/serving_completion.py index b90cdcb457..3782bd9fe8 100644 --- a/fastdeploy/entrypoints/openai/serving_completion.py +++ b/fastdeploy/entrypoints/openai/serving_completion.py @@ -68,9 +68,8 @@ async def create_completion(self, request: CompletionRequest): err_msg = f"Only master node can accept completion request, please send request to master node: {self.pod_ips[0]}" api_server_logger.error(err_msg) return ErrorResponse(message=err_msg, code=400) - if request.model == "default" or self.models.model_paths[0].verification is False: - request.model = self.models.model_name() - if not self.models.is_supported_model(request.model): + is_supported, request.model = self.models.is_supported_model(request.model) + if not is_supported: err_msg = f"Unsupported model: {request.model}, support {', '.join([x.name for x in self.models.model_paths])} or default" api_server_logger.error(err_msg) return ErrorResponse(message=err_msg, code=400) diff --git a/fastdeploy/entrypoints/openai/serving_models.py b/fastdeploy/entrypoints/openai/serving_models.py index 90822294cc..4b1f5cb099 100644 --- a/fastdeploy/entrypoints/openai/serving_models.py +++ b/fastdeploy/entrypoints/openai/serving_models.py @@ -61,15 +61,15 @@ def _check_master(self): return True return False - def is_supported_model(self, model_name) -> bool: + def is_supported_model(self, model_name) -> tuple[bool, str]: """ Check whether the specified model is supported. """ if self.model_paths[0].verification is False: - return True + return True, self.model_name() if model_name == "default": - return True - return any(model.name == model_name for model in self.model_paths) + return True, self.model_name() + return any(model.name == model_name for model in self.model_paths), model_name def model_name(self) -> str: """ From ca68e24cb7691916027ee738338e1546db28f8a8 Mon Sep 17 00:00:00 2001 From: LiqinruiG <37392159+LiqinruiG@users.noreply.github.com> Date: Tue, 19 Aug 2025 08:14:15 +0000 Subject: [PATCH 15/17] pre-commit --- fastdeploy/entrypoints/openai/api_server.py | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/fastdeploy/entrypoints/openai/api_server.py b/fastdeploy/entrypoints/openai/api_server.py index 4194fcb145..90e58f9072 100644 --- a/fastdeploy/entrypoints/openai/api_server.py +++ b/fastdeploy/entrypoints/openai/api_server.py @@ -161,12 +161,7 @@ async def lifespan(app: FastAPI): ) app.state.model_handler = model_handler chat_handler = OpenAIServingChat( - engine_client, - app.state.model_handler, - pid, - args.ips, - args.max_waiting_time, - chat_template + engine_client, app.state.model_handler, pid, args.ips, args.max_waiting_time, chat_template ) completion_handler = OpenAIServingCompletion( engine_client, From 6472ef7ded96b121d6616d294c4249ef8124874e Mon Sep 17 00:00:00 2001 From: LiqinruiG <37392159+LiqinruiG@users.noreply.github.com> Date: Tue, 19 Aug 2025 14:13:57 +0000 Subject: [PATCH 16/17] update test case --- test/ce/server/test_evil_cases.py | 127 +++++++++--------------------- 1 file changed, 39 insertions(+), 88 deletions(-) diff --git a/test/ce/server/test_evil_cases.py b/test/ce/server/test_evil_cases.py index 508bfeabd4..aba46cd09d 100644 --- a/test/ce/server/test_evil_cases.py +++ b/test/ce/server/test_evil_cases.py @@ -130,15 +130,14 @@ def test_multilingual_input(): "messages": [ { "role": "user", - "content": "这是一个包含多种语言的输入:Hello, 世界!Bonjour, le monde! Hola, el mundo! こんにちは、世界!" + "content": "这是一个包含多种语言的输入:Hello, 世界!Bonjour, le monde! Hola, el mundo! こんにちは、世界!", } ], "stream": False, - } payload = build_request_payload(TEMPLATE, data) resp = send_request(URL, payload).json() - + # 验证响应是否包含有效的回复 assert "choices" in resp, "未收到有效的回复" assert len(resp["choices"]) > 0, "回复为空" @@ -150,18 +149,9 @@ def test_multilingual_input(): print("多语言混合输入测试通过!") - def test_too_long_input(): """测试超长输入是否被正确处理""" - data = { - "messages": [ - { - "role": "user", - "content": "a," * 200000 # 超过最大输入长度 - } - ], - "stream": False - } + data = {"messages": [{"role": "user", "content": "a," * 200000}], "stream": False} # 超过最大输入长度 payload = build_request_payload(TEMPLATE, data) resp = send_request(URL, payload).json() assert resp["detail"].get("object") == "error", "超长输入未被识别为错误" @@ -170,15 +160,7 @@ def test_too_long_input(): def test_empty_input(): """测试空输入是否被正确处理""" - data = { - "messages": [ - { - "role": "user", - "content": "" # 空输入 - } - ], - "stream": False - } + data = {"messages": [{"role": "user", "content": ""}], "stream": False} # 空输入 payload = build_request_payload(TEMPLATE, data) resp = send_request(URL, payload).json() assert "error" not in resp.get("object"), "空输入被识别为错误" @@ -191,7 +173,7 @@ def test_prompt_only_spaces(): "messages": [ { "role": "user", - "content": " ", # 纯空格 + "content": " ", # 纯空格 } ], "stream": False, @@ -202,19 +184,11 @@ def test_prompt_only_spaces(): assert resp.get("object") == "chat.completion", "应返回 chat.completion 对象" response_content = resp["choices"][0]["message"]["content"] assert len(response_content) > 0, "messages content为空,未正常生成回复" - + def test_illegal_characters(): """测试非法字符输入是否被正确处理""" - data = { - "messages": [ - { - "role": "user", - "content": "非洲的首都是?:\x00\x01\x02" # 非法字符 - } - ], - "stream": False - } + data = {"messages": [{"role": "user", "content": "非洲的首都是?:\x00\x01\x02"}], "stream": False} # 非法字符 payload = build_request_payload(TEMPLATE, data) resp = send_request(URL, payload).json() assert len(resp["choices"][0]["message"]["content"]) > 0, "非法字符输入影响模型回复" @@ -224,10 +198,7 @@ def test_sql_injection(): """测试 SQL 注入攻击是否被正确处理""" data = { "messages": [ - { - "role": "user", - "content": "SELECT * FROM users WHERE username = 'admin' OR '1'='1';" # SQL 注入攻击 - } + {"role": "user", "content": "SELECT * FROM users WHERE username = 'admin' OR '1'='1';"} # SQL 注入攻击 ], "stream": False, "max_tokens": 50, @@ -243,12 +214,7 @@ def test_sql_injection(): def test_xss_attack(): """测试 XSS 攻击是否被正确处理""" data = { - "messages": [ - { - "role": "user", - "content": "" # XSS 攻击 - } - ], + "messages": [{"role": "user", "content": ""}], # XSS 攻击 "stream": False, "max_tokens": 50, } @@ -264,14 +230,9 @@ def test_stop_empty_string(): """测试 stop 参数为空字符串时的行为""" data = { "stream": False, - "messages": [ - { - "role": "user", - "content": "非洲的首都是?" - } - ], + "messages": [{"role": "user", "content": "非洲的首都是?"}], "max_tokens": 10, - "stop": "" # 空字符串 + "stop": "", # 空字符串 } payload = build_request_payload(TEMPLATE, data) resp = send_request(URL, payload).json() @@ -283,14 +244,9 @@ def test_stop_multiple_strings(): """测试 stop 参数为多个字符串时的行为""" data = { "stream": False, - "messages": [ - { - "role": "user", - "content": "非洲的首都是?" - } - ], + "messages": [{"role": "user", "content": "非洲的首都是?"}], "max_tokens": 50, - "stop": ["。", "!", "?"] # 多个停止条件 + "stop": ["。", "!", "?"], # 多个停止条件 } payload = build_request_payload(TEMPLATE, data) resp = send_request(URL, payload).json() @@ -303,14 +259,9 @@ def test_stop_with_special_characters(): """测试 stop 参数为包含特殊字符的字符串时的行为""" data = { "stream": False, - "messages": [ - { - "role": "user", - "content": "非洲的首都是?" - } - ], + "messages": [{"role": "user", "content": "非洲的首都是?"}], "max_tokens": 50, - "stop": "!@#$%^&*()" # 包含特殊字符 + "stop": "!@#$%^&*()", # 包含特殊字符 } payload = build_request_payload(TEMPLATE, data) resp = send_request(URL, payload).json() @@ -323,14 +274,9 @@ def test_stop_with_newlines(): """测试 stop 参数为包含换行符的字符串时的行为""" data = { "stream": False, - "messages": [ - { - "role": "user", - "content": "非洲的首都是?" - } - ], + "messages": [{"role": "user", "content": "非洲的首都是?"}], "max_tokens": 50, - "stop": "\n\n" # 包含换行符 + "stop": "\n\n", # 包含换行符 } payload = build_request_payload(TEMPLATE, data) resp = send_request(URL, payload).json() @@ -345,12 +291,12 @@ def test_model_empty(): "messages": [ { "role": "user", - "content": "非洲的首都是?", + "content": "非洲的首都是?", } ], "stream": False, "max_tokens": 10, - "model": "" # 空模型 + "model": "", # 空模型 } payload = build_request_payload(TEMPLATE, data) resp = send_request(URL, payload).json() @@ -365,17 +311,17 @@ def test_model_invalid(): "messages": [ { "role": "user", - "content": "非洲的首都是?", + "content": "非洲的首都是?", } ], "stream": False, "max_tokens": 10, - "model": "non-existent-model" # 不存在的模型 + "model": "non-existent-model", # 不存在的模型 } payload = build_request_payload(TEMPLATE, data) resp = send_request(URL, payload).json() assert resp.get("object") == "chat.completion", "不存在的 model 应触发校验异常" - assert "non-existent-model" in resp.get("model"), "未返回预期的 model 信息" + # assert "non-existent-model" in resp.get("model"), "未返回预期的 model 信息" assert len(resp.get("choices")[0].get("message").get("content")) > 0, "模型名为不存在的 model,未正常生成回复" @@ -385,18 +331,20 @@ def test_model_with_special_characters(): "messages": [ { "role": "user", - "content": "非洲的首都是?", + "content": "非洲的首都是?", } ], "stream": False, "max_tokens": 10, - "model": "!@#" # 包含特殊字符 + "model": "!@#", # 包含特殊字符 } payload = build_request_payload(TEMPLATE, data) resp = send_request(URL, payload).json() assert resp.get("object") == "chat.completion", "不存在的 model 应触发校验异常" - assert "!@#" in resp.get("model"), "未返回预期的 model 信息" - assert len(resp.get("choices")[0].get("message").get("content")) > 0, "模型名为model 参数为非法格式,未正常生成回复" + # assert "!@#" in resp.get("model"), "未返回预期的 model 信息" + assert ( + len(resp.get("choices")[0].get("message").get("content")) > 0 + ), "模型名为model 参数为非法格式,未正常生成回复" def test_max_tokens_negative(): @@ -405,7 +353,7 @@ def test_max_tokens_negative(): "messages": [ { "role": "user", - "content": "非洲的首都是?", + "content": "非洲的首都是?", } ], "stream": False, @@ -414,7 +362,7 @@ def test_max_tokens_negative(): payload = build_request_payload(TEMPLATE, data) resp = send_request(URL, payload).json() assert resp.get("detail").get("object") == "error", "max_tokens < 0 未触发校验异常" - assert 'max_tokens can be defined [1,' in resp.get("detail").get("message"), "未返回预期的 max_tokens 错误信息" + assert "max_tokens can be defined [1," in resp.get("detail").get("message"), "未返回预期的 max_tokens 错误信息" def test_max_tokens_min(): @@ -423,7 +371,7 @@ def test_max_tokens_min(): "messages": [ { "role": "user", - "content": "非洲的首都是?", + "content": "非洲的首都是?", } ], "stream": False, @@ -431,8 +379,10 @@ def test_max_tokens_min(): } payload = build_request_payload(TEMPLATE, data) resp = send_request(URL, payload).json() - assert resp.get('detail').get("object") == "error", "max_tokens未0时API未拦截住" - assert "reasoning_max_tokens must be between max_tokens and 1" in resp.get('detail').get("message", ""), "未返回预期的 max_tokens 达到异常值0 的 错误信息" + assert resp.get("detail").get("object") == "error", "max_tokens未0时API未拦截住" + assert "reasoning_max_tokens must be between max_tokens and 1" in resp.get("detail").get( + "message", "" + ), "未返回预期的 max_tokens 达到异常值0 的 错误信息" def test_max_tokens_non_integer(): @@ -441,7 +391,7 @@ def test_max_tokens_non_integer(): "messages": [ { "role": "user", - "content": "非洲的首都是?", + "content": "非洲的首都是?", } ], "stream": False, @@ -449,5 +399,6 @@ def test_max_tokens_non_integer(): } payload = build_request_payload(TEMPLATE, data) resp = send_request(URL, payload).json() - assert resp.get('detail')[0].get("msg") == "Input should be a valid integer, got a number with a fractional part", "未返回预期的 max_tokens 为非整数的错误信息" - + assert ( + resp.get("detail")[0].get("msg") == "Input should be a valid integer, got a number with a fractional part" + ), "未返回预期的 max_tokens 为非整数的错误信息" From a988a5536a7d83f533149447d2b81471c1ec6f00 Mon Sep 17 00:00:00 2001 From: LiqinruiG <37392159+LiqinruiG@users.noreply.github.com> Date: Wed, 20 Aug 2025 12:28:14 +0000 Subject: [PATCH 17/17] update test case --- fastdeploy/entrypoints/openai/serving_chat.py | 11 +++++---- .../entrypoints/openai/serving_completion.py | 11 +++++---- .../openai/test_completion_echo.py | 24 ++++++++++++------- .../openai/test_serving_completion.py | 8 +++---- tests/utils/test_custom_chat_template.py | 14 +++++++++-- 5 files changed, 44 insertions(+), 24 deletions(-) diff --git a/fastdeploy/entrypoints/openai/serving_chat.py b/fastdeploy/entrypoints/openai/serving_chat.py index 77ad18dfcc..3583ef9849 100644 --- a/fastdeploy/entrypoints/openai/serving_chat.py +++ b/fastdeploy/entrypoints/openai/serving_chat.py @@ -80,11 +80,12 @@ async def create_chat_completion(self, request: ChatCompletionRequest): api_server_logger.error(err_msg) return ErrorResponse(message=err_msg, code=400) - is_supported, request.model = self.models.is_supported_model(request.model) - if not is_supported: - err_msg = f"Unsupported model: {request.model}, support {', '.join([x.name for x in self.models.model_paths])} or default" - api_server_logger.error(err_msg) - return ErrorResponse(message=err_msg, code=400) + if self.models: + is_supported, request.model = self.models.is_supported_model(request.model) + if not is_supported: + err_msg = f"Unsupported model: {request.model}, support {', '.join([x.name for x in self.models.model_paths])} or default" + api_server_logger.error(err_msg) + return ErrorResponse(message=err_msg, code=400) try: if self.max_waiting_time < 0: diff --git a/fastdeploy/entrypoints/openai/serving_completion.py b/fastdeploy/entrypoints/openai/serving_completion.py index ea3303317d..ddd202fb7b 100644 --- a/fastdeploy/entrypoints/openai/serving_completion.py +++ b/fastdeploy/entrypoints/openai/serving_completion.py @@ -69,11 +69,12 @@ async def create_completion(self, request: CompletionRequest): err_msg = f"Only master node can accept completion request, please send request to master node: {self.pod_ips[0]}" api_server_logger.error(err_msg) return ErrorResponse(message=err_msg, code=400) - is_supported, request.model = self.models.is_supported_model(request.model) - if not is_supported: - err_msg = f"Unsupported model: {request.model}, support {', '.join([x.name for x in self.models.model_paths])} or default" - api_server_logger.error(err_msg) - return ErrorResponse(message=err_msg, code=400) + if self.models: + is_supported, request.model = self.models.is_supported_model(request.model) + if not is_supported: + err_msg = f"Unsupported model: {request.model}, support {', '.join([x.name for x in self.models.model_paths])} or default" + api_server_logger.error(err_msg) + return ErrorResponse(message=err_msg, code=400) created_time = int(time.time()) if request.user is not None: request_id = f"cmpl-{request.user}-{uuid.uuid4()}" diff --git a/tests/entrypoints/openai/test_completion_echo.py b/tests/entrypoints/openai/test_completion_echo.py index 015166582c..565e5ad93e 100644 --- a/tests/entrypoints/openai/test_completion_echo.py +++ b/tests/entrypoints/openai/test_completion_echo.py @@ -24,7 +24,9 @@ def setUp(self): def test_single_prompt_non_streaming(self): """测试单prompt非流式响应""" - self.completion_handler = OpenAIServingCompletion(self.mock_engine, pid=123, ips=None, max_waiting_time=30) + self.completion_handler = OpenAIServingCompletion( + self.mock_engine, models=None, pid=123, ips=None, max_waiting_time=30 + ) request = CompletionRequest(prompt="test prompt", max_tokens=10, echo=True, logprobs=1) @@ -54,7 +56,9 @@ def test_single_prompt_non_streaming(self): async def test_echo_back_prompt_and_streaming(self): """测试_echo_back_prompt方法和流式响应的prompt拼接逻辑""" - self.completion_handler = OpenAIServingCompletion(self.mock_engine, pid=123, ips=None, max_waiting_time=30) + self.completion_handler = OpenAIServingCompletion( + self.mock_engine, models=None, pid=123, ips=None, max_waiting_time=30 + ) request = CompletionRequest(prompt="test prompt", max_tokens=10, stream=True, echo=True) @@ -76,7 +80,9 @@ def mock_echo_side_effect(req, res, idx): def test_multi_prompt_non_streaming(self): """测试多prompt非流式响应""" - self.completion_handler = OpenAIServingCompletion(self.mock_engine, pid=123, ips=None, max_waiting_time=30) + self.completion_handler = OpenAIServingCompletion( + self.mock_engine, models=None, pid=123, ips=None, max_waiting_time=30 + ) request = CompletionRequest(prompt=["prompt1", "prompt2"], max_tokens=10, echo=True) @@ -108,7 +114,9 @@ def test_multi_prompt_non_streaming(self): self.assertEqual(response.choices[1].text, "prompt2 response2") async def test_multi_prompt_streaming(self): - self.completion_handler = OpenAIServingCompletion(self.mock_engine, pid=123, ips=None, max_waiting_time=30) + self.completion_handler = OpenAIServingCompletion( + self.mock_engine, models=None, pid=123, ips=None, max_waiting_time=30 + ) request = CompletionRequest(prompt=["prompt1", "prompt2"], max_tokens=10, stream=True, echo=True) @@ -140,7 +148,7 @@ async def test_echo_back_prompt_and_streaming1(self): res = {"outputs": {"send_idx": 0, "text": "!"}} idx = 0 - instance = OpenAIServingCompletion(self.mock_engine, pid=123, ips=None, max_waiting_time=30) + instance = OpenAIServingCompletion(self.mock_engine, models=None, pid=123, ips=None, max_waiting_time=30) await instance._echo_back_prompt(request, res, idx) self.assertEqual(res["outputs"]["text"], "Hello!") @@ -149,7 +157,7 @@ async def test_1_prompt_is_string_and_send_idx_is_0(self): res = {"outputs": {"send_idx": 0, "text": "!"}} idx = 0 - instance = OpenAIServingCompletion(self.mock_engine, pid=123, ips=None, max_waiting_time=30) + instance = OpenAIServingCompletion(self.mock_engine, models=None, pid=123, ips=None, max_waiting_time=30) await instance._echo_back_prompt(request, res, idx) self.assertEqual(res["outputs"]["text"], "Hello!") @@ -158,7 +166,7 @@ async def test_1_send_idx_is_not_0(self): res = {"outputs": {"send_idx": 1, "text": "!"}} idx = 0 - instance = OpenAIServingCompletion(self.mock_engine, pid=123, ips=None, max_waiting_time=30) + instance = OpenAIServingCompletion(self.mock_engine, models=None, pid=123, ips=None, max_waiting_time=30) await instance._echo_back_prompt(request, res, idx) self.assertEqual(res["outputs"]["text"], "!") @@ -168,7 +176,7 @@ async def test_1_echo_is_false(self): res = {"outputs": {"send_idx": 0, "text": "!"}} idx = 0 - instance = OpenAIServingCompletion(self.mock_engine, pid=123, ips=None, max_waiting_time=30) + instance = OpenAIServingCompletion(self.mock_engine, models=None, pid=123, ips=None, max_waiting_time=30) await instance._echo_back_prompt(request, res, idx) self.assertEqual(res["outputs"]["text"], "!") diff --git a/tests/entrypoints/openai/test_serving_completion.py b/tests/entrypoints/openai/test_serving_completion.py index 381df17f60..82370ca0b1 100644 --- a/tests/entrypoints/openai/test_serving_completion.py +++ b/tests/entrypoints/openai/test_serving_completion.py @@ -16,7 +16,7 @@ def test_calc_finish_reason_tool_calls(self): engine_client = Mock() engine_client.reasoning_parser = "ernie_x1" # 创建一个OpenAIServingCompletion实例 - serving_completion = OpenAIServingCompletion(engine_client, "pid", "ips", 360) + serving_completion = OpenAIServingCompletion(engine_client, None, "pid", "ips", 360) # 创建一个模拟的output,并设置finish_reason为"tool_call" output = {"tool_call": "tool_call"} # 调用calc_finish_reason方法 @@ -29,7 +29,7 @@ def test_calc_finish_reason_stop(self): engine_client = Mock() engine_client.reasoning_parser = "ernie_x1" # 创建一个OpenAIServingCompletion实例 - serving_completion = OpenAIServingCompletion(engine_client, "pid", "ips", 360) + serving_completion = OpenAIServingCompletion(engine_client, None, "pid", "ips", 360) # 创建一个模拟的output,并设置finish_reason为其他值 output = {"finish_reason": "other_reason"} # 调用calc_finish_reason方法 @@ -41,7 +41,7 @@ def test_calc_finish_reason_length(self): # 创建一个模拟的engine_client engine_client = Mock() # 创建一个OpenAIServingCompletion实例 - serving_completion = OpenAIServingCompletion(engine_client, "pid", "ips", 360) + serving_completion = OpenAIServingCompletion(engine_client, None, "pid", "ips", 360) # 创建一个模拟的output output = {} # 调用calc_finish_reason方法 @@ -52,7 +52,7 @@ def test_calc_finish_reason_length(self): def test_request_output_to_completion_response(self): engine_client = Mock() # 创建一个OpenAIServingCompletion实例 - openai_serving_completion = OpenAIServingCompletion(engine_client, "pid", "ips", 360) + openai_serving_completion = OpenAIServingCompletion(engine_client, None, "pid", "ips", 360) final_res_batch: List[RequestOutput] = [ { "outputs": { diff --git a/tests/utils/test_custom_chat_template.py b/tests/utils/test_custom_chat_template.py index 27a66c4e91..b6fe255328 100644 --- a/tests/utils/test_custom_chat_template.py +++ b/tests/utils/test_custom_chat_template.py @@ -57,7 +57,12 @@ def test_path_object_file_error(self): async def test_serving_chat(self): request = ChatCompletionRequest(messages=[{"role": "user", "content": "你好"}]) self.chat_completion_handler = OpenAIServingChat( - self.mock_engine, pid=123, ips=None, max_waiting_time=-1, chat_template=self.input_chat_template + self.mock_engine, + models=None, + pid=123, + ips=None, + max_waiting_time=-1, + chat_template=self.input_chat_template, ) async def mock_chat_completion_full_generator( @@ -79,7 +84,12 @@ def mock_format_and_add_data(current_req_dict): async def test_serving_chat_cus(self): request = ChatCompletionRequest(messages=[{"role": "user", "content": "hi"}], chat_template="hello") self.chat_completion_handler = OpenAIServingChat( - self.mock_engine, pid=123, ips=None, max_waiting_time=10, chat_template=self.input_chat_template + self.mock_engine, + models=None, + pid=123, + ips=None, + max_waiting_time=10, + chat_template=self.input_chat_template, ) async def mock_chat_completion_full_generator(