Skip to content
Open
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
21 changes: 21 additions & 0 deletions crates/goose-server/src/routes/config_management.rs
Original file line number Diff line number Diff line change
Expand Up @@ -938,4 +938,25 @@ mod tests {

std::env::remove_var("OPENAI_API_KEY");
}

#[tokio::test]
async fn test_get_provider_models_ollama_not_running() {
// Test when Ollama is not running (typical scenario in CI/testing)
let test_state = create_test_state().await;
let mut headers = HeaderMap::new();
headers.insert("X-Secret-Key", "test".parse().unwrap());

let result =
get_provider_models(State(test_state), headers, Path("ollama".to_string())).await;

// Should succeed but return empty array when Ollama is not accessible
// The implementation gracefully handles connection failures and returns Ok(None)
// which translates to an empty Vec in the API response
assert!(result.is_ok(), "Expected successful response from Ollama provider even when not running");
let models = result.unwrap().0;

// Should return empty list when Ollama is not accessible
// (implementation returns Ok(None) which becomes empty Vec in response)
assert_eq!(models.len(), 0, "Expected empty models list when Ollama is not running");
}
}
45 changes: 45 additions & 0 deletions crates/goose/src/providers/ollama.rs
Original file line number Diff line number Diff line change
Expand Up @@ -228,6 +228,51 @@ impl Provider for OllamaProvider {
fn supports_streaming(&self) -> bool {
self.supports_streaming
}

async fn fetch_supported_models(&self) -> Result<Option<Vec<String>>, ProviderError> {
// Ollama uses /api/tags endpoint to list installed models
let response = match self.api_client.response_get("api/tags").await {
Ok(resp) => resp,
Err(e) => {
tracing::warn!("Failed to fetch models from Ollama: {}", e);
return Ok(None);
}
};

// Parse JSON response
let json: serde_json::Value = match response.json().await {
Ok(json) => json,
Err(e) => {
tracing::warn!("Failed to parse Ollama models response: {}", e);
return Ok(None);
}
};

// Extract model names from the response
// Ollama returns: { "models": [{"name": "model1", "size": ..., "digest": ..., "modified_at": ...}, ...] }
let models = json
.get("models")
.and_then(|v| v.as_array())
.map(|arr| {
let mut model_names: Vec<String> = arr.iter()
.filter_map(|m| m.get("name").and_then(|v| v.as_str()))
.map(|s| s.to_string())
.collect();
model_names.sort();
model_names
});

match models {
Some(model_list) if !model_list.is_empty() => {
tracing::info!("Found {} models in Ollama", model_list.len());
Ok(Some(model_list))
}
_ => {
tracing::info!("No models found in Ollama or unable to parse response");
Ok(None)
}
}
}
}

impl OllamaProvider {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ interface LeadWorkerSettingsProps {
}

export function LeadWorkerSettings({ isOpen, onClose }: LeadWorkerSettingsProps) {
const { read, upsert, getProviders, remove } = useConfig();
const { read, upsert, getProviders, getProviderModels, remove } = useConfig();
const { currentModel } = useModelAndProvider();
const [leadModel, setLeadModel] = useState<string>('');
const [workerModel, setWorkerModel] = useState<string>('');
Expand Down Expand Up @@ -96,21 +96,48 @@ export function LeadWorkerSettings({ isOpen, onClose }: LeadWorkerSettingsProps)
});
});
} else {
// Fallback to provider-based models
// Fetch models with dynamic discovery and static fallback
const providers = await getProviders(false);
const activeProviders = providers.filter((p) => p.is_configured);

activeProviders.forEach(({ metadata, name }) => {
if (metadata.known_models) {
metadata.known_models.forEach((model) => {
options.push({
value: model.name,
label: `${model.name} (${metadata.display_name})`,
provider: name,
// Fetch models for all active providers with dynamic discovery
for (const provider of activeProviders) {
try {
// Try dynamic discovery first
let models = await getProviderModels(provider.name);

// Fallback to static known_models if dynamic returns empty
if ((!models || models.length === 0) && provider.metadata.known_models?.length) {
models = provider.metadata.known_models.map((m) => m.name);
}

// Add models to options
if (models && models.length > 0) {
models.forEach((modelName) => {
options.push({
value: modelName,
label: `${modelName} (${provider.metadata.display_name})`,
provider: provider.name,
});
});
});
}
} catch (error) {
// If dynamic fetch fails, use static fallback
console.warn(
`Failed to fetch models for ${provider.name}, using static fallback:`,
error
);
if (provider.metadata.known_models) {
provider.metadata.known_models.forEach((model) => {
options.push({
value: model.name,
label: `${model.name} (${provider.metadata.display_name})`,
provider: provider.name,
});
});
}
}
});
}
}

setModelOptions(options);
Expand All @@ -122,7 +149,7 @@ export function LeadWorkerSettings({ isOpen, onClose }: LeadWorkerSettingsProps)
};

loadConfig();
}, [read, getProviders, currentModel, isOpen]);
}, [read, getProviders, getProviderModels, currentModel, isOpen]);

const handleSave = async () => {
try {
Expand Down