Skip to content

Commit 301afdf

Browse files
authored
Merge pull request #5 from mzarnecki/PHPRAG-3-deepseek-models-ollama-support
support DeepSeek models Coder-V2 and R1-7B
2 parents d4b85ba + 3931b43 commit 301afdf

7 files changed

+141
-26
lines changed

README.md

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -57,12 +57,12 @@ The application demonstrates an interesting use case of distinguishing between t
5757
- Copy `.env-sample` to `.env` in `app/src`
5858
- Choose your model in `.env`:
5959
```env
60-
MODEL=<model-option> # Options: GPT-4o, Claude-3.5, Llama3.2, Mixtral, Bielik, Gemini2, DeepSeek
60+
MODEL=<model-option> # Options: GPT-4o, Claude-3.5, Llama3.2, Mixtral, Bielik, Gemini2, DeepSeek, , DeepSeek-R1-7B, DeepSeek-Coder-v2
6161
```
6262
6363
3. **API Configuration**
6464
65-
#### Local API Options (Mixtral, Llama3.2, Bielik)
65+
#### Local API Options (Mixtral, Llama3.2, Bielik, DeepSeek-R1-7B, DeepSeek-Coder-v2)
6666
- No API key required (go directly to point 4.)
6767
- Requires more CPU/RAM
6868
- GPU recommended for better performance
@@ -234,7 +234,7 @@ docker-compose up
234234

235235
## 🎚 Customize
236236
- Use different LLMs. \
237-
You can pick from available LLMs: `GPT-4o, Claude-3.5, Llama3.2, Mixtral, Gemini2` \
237+
You can pick from available LLMs: `GPT-4o, Claude-3.5, Llama3.2, Mixtral, Bielik, DeepSeek, DeepSeek-R1-7B, DeepSeek-Coder-v2, Gemini2` \
238238
For using other ones you can just modify model name in LLM client class for model provider, for example `app/src/service/openai/GeneratedTextFromGPTProvider.php:13`
239239
```php
240240
final class GeneratedTextFromGPTProvider extends AbstractGPTAPIClient

app/src/.env-sample

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
1-
MODEL=Llama3.2 #Available options: GPT-4o, Claude-3.5, Llama3.2, Mixtral, Bielik, Gemini2, DeepSeek
2-
#fill one API keys below only when using other model than Llama3.2 and Mixtral
1+
MODEL=Llama3.2 #Available options: GPT-4o, Claude-3.5, Llama3.2, Mixtral, Bielik, Gemini2, DeepSeek, DeepSeek-R1-7B, DeepSeek-Coder-v2
2+
#fill one API keys below only when using other model than Llama3.2, Mixtral, DeepSeek-R1-7B, DeepSeek-Coder-v2 (these are pulled locally and served with ollama)
33
#OpenAI API
44
OPENAI_API_KEY=your-open-ai-api-key
55
#Google Gemini API

app/src/service/ServicesForSpecificModelFactory.php

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,8 @@
66
use service\gemini\GeminiTextEncoder;
77
use service\gemini\GeneratedTextFromGeminiProvider;
88
use service\ollama\GeneratedTextFromLocalBielikProvider;
9+
use service\ollama\GeneratedTextFromLocalDeepSeekCoderV2Provider;
10+
use service\ollama\GeneratedTextFromLocalDeepSeekR17BProvider;
911
use service\ollama\GeneratedTextFromLocalLlama3Provider;
1012
use service\ollama\GeneratedTextFromMixtralProvider;
1113
use service\ollama\MxbaiTextEncoder;
@@ -24,6 +26,8 @@ public function getGeneratedTextProvider(string $model): GeneratedTextProviderIn
2426
'llama3.2' => GeneratedTextFromLocalLlama3Provider::class,
2527
'mixtral' => GeneratedTextFromMixtralProvider::class,
2628
'bielik' => GeneratedTextFromLocalBielikProvider::class,
29+
'deepseek-r1-7b' => GeneratedTextFromLocalDeepSeekR17BProvider::class,
30+
'deepseek-coder-v2' => GeneratedTextFromLocalDeepSeekCoderV2Provider::class,
2731
'gemini2' => GeneratedTextFromGeminiProvider::class,
2832
];
2933

@@ -43,6 +47,8 @@ public function getEmbeddingsService(string $model): TextEncoderInterface
4347
'llama3.2' => MxbaiTextEncoder::class,
4448
'mixtral' => MxbaiTextEncoder::class,
4549
'bielik' => MxbaiTextEncoder::class,
50+
'deepseek-r1-7b' => MxbaiTextEncoder::class,
51+
'deepseek-coder-v2' => MxbaiTextEncoder::class,
4652
'gemini2' => GeminiTextEncoder::class,
4753
];
4854

Lines changed: 36 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,36 @@
1+
<?php
2+
3+
namespace service\ollama;
4+
5+
use League\Pipeline\StageInterface;
6+
use service\GeneratedTextProviderInterface;
7+
use service\pipeline\Payload;
8+
9+
class GeneratedTextFromLocalDeepSeekCoderV2Provider extends AbstractOllamaAPIClient
10+
implements StageInterface, GeneratedTextProviderInterface
11+
{
12+
/**
13+
* @param Payload $payload
14+
* @return string
15+
*/
16+
public function __invoke($payload)
17+
{
18+
return $this->generateText($payload->getPrompt(), $payload->getRagPrompt());
19+
}
20+
21+
protected function getEndpoint(): string
22+
{
23+
return '/api/generate';
24+
}
25+
26+
protected function getBodyParams(string $input): array
27+
{
28+
return [
29+
"model" => "deepseek-coder-v2",
30+
"prompt" => $input
31+
];
32+
}
33+
}
34+
{
35+
36+
}
Lines changed: 36 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,36 @@
1+
<?php
2+
3+
namespace service\ollama;
4+
5+
use League\Pipeline\StageInterface;
6+
use service\GeneratedTextProviderInterface;
7+
use service\pipeline\Payload;
8+
9+
class GeneratedTextFromLocalDeepSeekR17BProvider extends AbstractOllamaAPIClient
10+
implements StageInterface, GeneratedTextProviderInterface
11+
{
12+
/**
13+
* @param Payload $payload
14+
* @return string
15+
*/
16+
public function __invoke($payload)
17+
{
18+
return $this->generateText($payload->getPrompt(), $payload->getRagPrompt());
19+
}
20+
21+
protected function getEndpoint(): string
22+
{
23+
return '/api/generate';
24+
}
25+
26+
protected function getBodyParams(string $input): array
27+
{
28+
return [
29+
"model" => "deepseek-r1:7b",
30+
"prompt" => $input
31+
];
32+
}
33+
}
34+
{
35+
36+
}

docker-compose.yaml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -52,6 +52,7 @@ services:
5252
- .:/code
5353
- ./ollama/ollama:/root/.ollama
5454
- ./entrypoint.sh:/entrypoint.sh
55+
- ./app/src/.env:/.env
5556
container_name: ollama-container
5657
tty: true
5758
restart: always

entrypoint.sh

Lines changed: 57 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -1,28 +1,64 @@
11
#!/bin/bash
22

3-
# Start Ollama in the background.
4-
/bin/ollama serve &
5-
# Record Process ID.
6-
pid=$!
3+
# Source the .env file to get the MODEL variable
4+
# Try multiple possible locations for the .env file
5+
if [ -f ".env" ]; then
6+
echo "Found .env in current directory"
7+
source ".env"
8+
else
9+
echo "Error: .env file not found after trying multiple locations!"
10+
echo "Please mount the .env file into the container or specify the correct path."
11+
exit 1
12+
fi
713

8-
# Pause for Ollama to start.
9-
sleep 5
14+
# Check if MODEL is one of the locally hosted models
15+
if [[ "$MODEL" == "Llama3.2" || "$MODEL" == "Mixtral" || "$MODEL" == "Bielik" || "$MODEL" == "DeepSeek-R1-7B" || "$MODEL" == "DeepSeek-Coder-v2" ]]; then
16+
# Start Ollama in the background
17+
/bin/ollama serve &
18+
# Record Process ID
19+
pid=$!
1020

11-
echo "🔴 Retrieve LLAMA3 model..."
12-
ollama pull llama3.2
13-
echo "🟢 Done!"
21+
# Pause for Ollama to start
22+
sleep 5
1423

15-
echo "🔴 Retrieve Mixtral model..."
16-
ollama pull mistral
17-
echo "🟢 Done!"
24+
# Pull only the selected model
25+
case "$MODEL" in
26+
"Llama3.2")
27+
echo "🔴 Retrieving LLAMA3 model..."
28+
ollama pull llama3.2
29+
echo "🟢 Done!"
30+
;;
31+
"Mixtral")
32+
echo "🔴 Retrieving Mixtral model..."
33+
ollama pull mistral
34+
echo "🟢 Done!"
35+
;;
36+
"Bielik")
37+
echo "🔴 Retrieving Bielik model..."
38+
ollama pull mwiewior/bielik
39+
echo "🟢 Done!"
40+
;;
41+
"DeepSeek-R1-7B")
42+
echo "🔴 Retrieving DeepSeek-R1:7B model..."
43+
ollama pull deepseek-r1:7b
44+
echo "🟢 Done!"
45+
;;
46+
"DeepSeek-Coder-v2")
47+
echo "🔴 Retrieving DeepSeek-Coder-v2 model..."
48+
ollama pull deepseek-coder-v2
49+
echo "🟢 Done!"
50+
;;
51+
esac
1852

19-
echo "🔴 Retrieve mxbai embedding model..."
20-
ollama pull mxbai-embed-large
21-
echo "🟢 Done!"
53+
# For embedding capabilities, always pull the mxbai model
54+
echo "🔴 Retrieving mxbai embedding model..."
55+
ollama pull mxbai-embed-large
56+
echo "🟢 Done!"
2257

23-
echo "🔴 Retrieve Bielik model..."
24-
ollama pull mwiewior/bielik
25-
echo "🟢 Done!"
26-
27-
# Wait for Ollama process to finish.
28-
wait $pid
58+
# Wait for Ollama process to finish
59+
wait $pid
60+
else
61+
echo "Using cloud-based model: $MODEL"
62+
echo "No need to pull local models via Ollama."
63+
exit 0
64+
fi

0 commit comments

Comments
 (0)