Skip to content
Merged
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion test/srt/run_suite.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@ class TestFile:
TestFile("test_chunked_prefill.py", 336),
TestFile("test_eagle_infer.py", 500),
TestFile("test_ebnf_constrained.py"),
TestFile("test_fa3.py", 30),
TestFile("test_fp8_kernel.py", 8),
TestFile("test_embedding_openai_server.py", 36),
TestFile("test_hidden_states.py", 55),
Expand Down Expand Up @@ -91,7 +92,7 @@ class TestFile:
TestFile("test_verl_engine.py", 100),
],
"per-commit-8-gpu": [
TestFile("test_fa3.py", 30),
TestFile("test_local_attn.py", 10),
],
"nightly": [
TestFile("test_nightly_gsm8k_eval.py"),
Expand Down
17 changes: 0 additions & 17 deletions test/srt/test_fa3.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,6 @@
from sglang.test.test_utils import (
DEFAULT_MODEL_NAME_FOR_TEST,
DEFAULT_MODEL_NAME_FOR_TEST_EAGLE3,
DEFAULT_MODEL_NAME_FOR_TEST_LOCAL_ATTENTION,
DEFAULT_MODEL_NAME_FOR_TEST_MLA,
DEFAULT_MODEL_NAME_FOR_TEST_MLA_NEXTN,
DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH,
Expand Down Expand Up @@ -127,22 +126,6 @@ def get_server_args(cls):
return DEFAULT_SERVER_ARGS


class TestFlashAttention3LocalAttn(BaseFlashAttentionTest):
"""Test FlashAttention3 with Model with local attention, e.g. Llama 4."""

accuracy_threshold = 0.70
model = DEFAULT_MODEL_NAME_FOR_TEST_LOCAL_ATTENTION

@classmethod
def get_server_args(cls):
cloned_args = DEFAULT_SERVER_ARGS.copy()
# remove --enable-torch-compile from cloned_args since llama4 does not support it for now
cloned_args.remove("--enable-torch-compile")
# we cannot use scout's 10m context due to this bug: https://github.com/sgl-project/sglang/issues/5755
cloned_args.extend(["--tp", "4", "--context-length", "1000000"])
return cloned_args


class TestFlashAttention3SpeculativeDecode(BaseFlashAttentionTest):
"""Test FlashAttention3 with speculative decode enabled with Llama 3.1 8B and its eagle3 model"""

Expand Down
72 changes: 72 additions & 0 deletions test/srt/test_local_attn.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,72 @@
import os
import unittest
from types import SimpleNamespace

import requests

from sglang.srt.utils import get_device_sm, kill_process_tree
from sglang.test.few_shot_gsm8k import run_eval as run_eval_few_shot_gsm8k
from sglang.test.test_utils import (
DEFAULT_MODEL_NAME_FOR_TEST_LOCAL_ATTENTION,
DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH,
DEFAULT_URL_FOR_TEST,
popen_launch_server,
)


@unittest.skipIf(get_device_sm() < 90, "Test requires CUDA SM 90 or higher")
class TestFlashAttention3LocalAttn(unittest.TestCase):
model = DEFAULT_MODEL_NAME_FOR_TEST_LOCAL_ATTENTION
base_url = DEFAULT_URL_FOR_TEST
accuracy_threshold = 0.90

@classmethod
def get_server_args(cls):
return [
"--trust-remote-code",
"--cuda-graph-max-bs",
"2",
"--attention-backend",
"fa3",
"--tp",
"4",
"--context-length",
"1000000",
]

@classmethod
def setUpClass(cls):
# disable deep gemm precompile to make launch server faster
# please don't do this if you want to make your inference workload faster
cls.process = popen_launch_server(
cls.model,
cls.base_url,
timeout=DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH,
other_args=cls.get_server_args(),
env=os.environ,
)

@classmethod
def tearDownClass(cls):
kill_process_tree(cls.process.pid)

def test_gsm8k(self):
args = SimpleNamespace(
num_shots=4,
num_questions=100,
max_new_tokens=512,
parallel=128,
host="http://127.0.0.1",
port=int(self.base_url.split(":")[-1]),
data_path=None,
)
metrics = run_eval_few_shot_gsm8k(args)
print(f"{metrics=}")

# Use the appropriate metric key based on the test class
metric_key = "accuracy"
self.assertGreater(metrics[metric_key], self.accuracy_threshold)


if __name__ == "__main__":
unittest.main()
Loading