Skip to content

Commit 6fa6f38

Browse files
minleminzuishuaillszhaochenyang20merrymercyzhyncs
authored
Feat: add support for thinking mode via chat_template_kwargs.enable_t… (#5551)
Co-authored-by: shuaills <shishuaiuoe@gmail.com> Co-authored-by: Chayenne <zhaochen20@outlook.com> Co-authored-by: Lianmin Zheng <lianminzheng@gmail.com> Co-authored-by: Yineng Zhang <me@zhyncs.com>
1 parent 693723d commit 6fa6f38

File tree

3 files changed

+55
-4
lines changed

3 files changed

+55
-4
lines changed

python/sglang/srt/openai_api/adapter.py

Lines changed: 29 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1001,6 +1001,11 @@ def v1_chat_generate_request(
10011001
tokenize=True,
10021002
add_generation_prompt=True,
10031003
tools=tools,
1004+
**(
1005+
request.chat_template_kwargs
1006+
if request.chat_template_kwargs
1007+
else {}
1008+
),
10041009
)
10051010
except:
10061011
# This except branch will be triggered when the chosen model
@@ -1012,6 +1017,11 @@ def v1_chat_generate_request(
10121017
tokenize=True,
10131018
add_generation_prompt=True,
10141019
tools=tools,
1020+
**(
1021+
request.chat_template_kwargs
1022+
if request.chat_template_kwargs
1023+
else {}
1024+
),
10151025
)
10161026

10171027
if assistant_prefix:
@@ -1245,16 +1255,34 @@ def v1_chat_generate_response(
12451255
tool_calls = None
12461256
text = ret_item["text"]
12471257

1258+
enable_thinking = True
12481259
if isinstance(request, list):
12491260
tool_choice = request[idx].tool_choice
12501261
tools = request[idx].tools
12511262
separate_reasoning = request[idx].separate_reasoning
1263+
1264+
if (
1265+
request[idx].chat_template_kwargs
1266+
and request[idx].chat_template_kwargs.get("enable_thinking") is not None
1267+
):
1268+
enable_thinking = request[idx].chat_template_kwargs.get(
1269+
"enable_thinking", True
1270+
)
12521271
else:
12531272
tool_choice = request.tool_choice
12541273
tools = request.tools
12551274
separate_reasoning = request.separate_reasoning
12561275

1257-
if reasoning_parser and separate_reasoning:
1276+
if (
1277+
request.chat_template_kwargs
1278+
and request.chat_template_kwargs.get("enable_thinking") is not None
1279+
):
1280+
enable_thinking = request.chat_template_kwargs.get(
1281+
"enable_thinking", True
1282+
)
1283+
1284+
reasoning_text = None
1285+
if reasoning_parser and separate_reasoning and enable_thinking:
12581286
try:
12591287
parser = ReasoningParser(
12601288
model_type=reasoning_parser, stream_reasoning=False
@@ -1266,8 +1294,6 @@ def v1_chat_generate_response(
12661294
HTTPStatus.BAD_REQUEST,
12671295
"Failed to parse reasoning related info to json format!",
12681296
)
1269-
else:
1270-
reasoning_text = None
12711297

12721298
if tool_choice != "none" and tools:
12731299
parser = FunctionCallParser(tools, tool_call_parser)

python/sglang/srt/openai_api/protocol.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -361,6 +361,7 @@ def set_tool_choice_default(cls, values):
361361
session_params: Optional[Dict] = None
362362
separate_reasoning: bool = True
363363
stream_reasoning: bool = True
364+
chat_template_kwargs: Optional[Dict] = None
364365

365366
# For PD disaggregation
366367
bootstrap_host: Optional[str] = None

python/sglang/srt/reasoning_parser.py

Lines changed: 25 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -117,6 +117,29 @@ def __init__(self, stream_reasoning: bool = True):
117117
# https://github.com/sgl-project/sglang/pull/3202#discussion_r1950153599
118118

119119

120+
class Qwen3Detector(BaseReasoningFormatDetector):
121+
"""
122+
Detector for Qwen3 model.
123+
Assumes reasoning format:
124+
(<think>)*(.*)</think>
125+
Returns all the text before the </think> tag as `reasoning_text`
126+
and the rest of the text as `normal_text`.
127+
128+
Args:
129+
stream_reasoning (bool): If False, accumulates reasoning content until the end tag.
130+
If True, streams reasoning content as it arrives.
131+
"""
132+
133+
def __init__(self, stream_reasoning: bool = True):
134+
# Qwen3 is assumed to be reasoning until `</think>` token
135+
super().__init__(
136+
"<think>",
137+
"</think>",
138+
force_reasoning=True,
139+
stream_reasoning=stream_reasoning,
140+
)
141+
142+
120143
class ReasoningParser:
121144
"""
122145
Parser that handles both streaming and non-streaming scenarios for extracting
@@ -129,7 +152,8 @@ class ReasoningParser:
129152
"""
130153

131154
DetectorMap: Dict[str, BaseReasoningFormatDetector] = {
132-
"deepseek-r1": DeepSeekR1Detector
155+
"deepseek-r1": DeepSeekR1Detector,
156+
"qwen3": Qwen3Detector,
133157
}
134158

135159
def __init__(self, model_type: str = None, stream_reasoning: bool = True):

0 commit comments

Comments
 (0)