@@ -16,19 +16,19 @@ cd GenAIComps
16
16
- Xeon CPU
17
17
18
18
``` bash
19
- docker build -t opea/wav2lip:latest -f comps/animation/wav2lip /dependency/Dockerfile .
19
+ docker build -t opea/wav2lip:latest -f comps/animation/src/integration /dependency/Dockerfile .
20
20
```
21
21
22
22
- Gaudi2 HPU
23
23
24
24
``` bash
25
- docker build -t opea/wav2lip-gaudi:latest -f comps/animation/wav2lip /dependency/Dockerfile.intel_hpu .
25
+ docker build -t opea/wav2lip-gaudi:latest -f comps/animation/src/integration /dependency/Dockerfile.intel_hpu .
26
26
```
27
27
28
28
### 1.1.2 Animation server image
29
29
30
30
``` bash
31
- docker build -t opea/animation:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/animation/wav2lip /Dockerfile .
31
+ docker build -t opea/animation:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/animation/src /Dockerfile .
32
32
```
33
33
34
34
## 1.2. Set environment variables
@@ -78,13 +78,13 @@ export FPS=10
78
78
- Xeon CPU
79
79
80
80
``` bash
81
- docker run --privileged -d --name " wav2lip-service" -p 7860:7860 --ipc=host -w /home/user/comps/animation/wav2lip -e PYTHON=/usr/bin/python3.11 -v $( pwd) /comps/animation/wav2lip /assets:/home/user/comps/animation/wav2lip /assets -e DEVICE=$DEVICE -e INFERENCE_MODE=$INFERENCE_MODE -e CHECKPOINT_PATH=$CHECKPOINT_PATH -e FACE=$FACE -e AUDIO=$AUDIO -e FACESIZE=$FACESIZE -e OUTFILE=$OUTFILE -e GFPGAN_MODEL_VERSION=$GFPGAN_MODEL_VERSION -e UPSCALE_FACTOR=$UPSCALE_FACTOR -e FPS=$FPS -e WAV2LIP_PORT=$WAV2LIP_PORT opea/wav2lip:latest
81
+ docker run --privileged -d --name " wav2lip-service" -p 7860:7860 --ipc=host -w /home/user/comps/animation/src -e PYTHON=/usr/bin/python3.11 -v $( pwd) /comps/animation/src /assets:/home/user/comps/animation/src /assets -e DEVICE=$DEVICE -e INFERENCE_MODE=$INFERENCE_MODE -e CHECKPOINT_PATH=$CHECKPOINT_PATH -e FACE=$FACE -e AUDIO=$AUDIO -e FACESIZE=$FACESIZE -e OUTFILE=$OUTFILE -e GFPGAN_MODEL_VERSION=$GFPGAN_MODEL_VERSION -e UPSCALE_FACTOR=$UPSCALE_FACTOR -e FPS=$FPS -e WAV2LIP_PORT=$WAV2LIP_PORT opea/wav2lip:latest
82
82
```
83
83
84
84
- Gaudi2 HPU
85
85
86
86
``` bash
87
- docker run --privileged -d --name " wav2lip-gaudi-service" -p 7860:7860 --runtime=habana --cap-add=sys_nice --ipc=host -w /home/user/comps/animation/wav2lip -v $( pwd) /comps/animation/wav2lip /assets:/home/user/comps/animation/wav2lip /assets -e HABANA_VISIBLE_DEVICES=all -e OMPI_MCA_btl_vader_single_copy_mechanism=none -e PYTHON=/usr/bin/python3.10 -e DEVICE=$DEVICE -e INFERENCE_MODE=$INFERENCE_MODE -e CHECKPOINT_PATH=$CHECKPOINT_PATH -e FACE=$FACE -e AUDIO=$AUDIO -e FACESIZE=$FACESIZE -e OUTFILE=$OUTFILE -e GFPGAN_MODEL_VERSION=$GFPGAN_MODEL_VERSION -e UPSCALE_FACTOR=$UPSCALE_FACTOR -e FPS=$FPS -e WAV2LIP_PORT=$WAV2LIP_PORT opea/wav2lip-gaudi:latest
87
+ docker run --privileged -d --name " wav2lip-gaudi-service" -p 7860:7860 --runtime=habana --cap-add=sys_nice --ipc=host -w /home/user/comps/animation/src -v $( pwd) /comps/animation/src /assets:/home/user/comps/animation/src /assets -e HABANA_VISIBLE_DEVICES=all -e OMPI_MCA_btl_vader_single_copy_mechanism=none -e PYTHON=/usr/bin/python3.10 -e DEVICE=$DEVICE -e INFERENCE_MODE=$INFERENCE_MODE -e CHECKPOINT_PATH=$CHECKPOINT_PATH -e FACE=$FACE -e AUDIO=$AUDIO -e FACESIZE=$FACESIZE -e OUTFILE=$OUTFILE -e GFPGAN_MODEL_VERSION=$GFPGAN_MODEL_VERSION -e UPSCALE_FACTOR=$UPSCALE_FACTOR -e FPS=$FPS -e WAV2LIP_PORT=$WAV2LIP_PORT opea/wav2lip-gaudi:latest
88
88
```
89
89
90
90
## 2.2 Run Animation Microservice
@@ -101,28 +101,28 @@ Once microservice starts, user can use below script to validate the running micr
101
101
102
102
``` bash
103
103
cd GenAIComps
104
- python3 comps/animation/wav2lip /dependency/check_wav2lip_server.py
104
+ python3 comps/animation/src/integration /dependency/check_wav2lip_server.py
105
105
```
106
106
107
107
## 3.2 Validate Animation service
108
108
109
109
``` bash
110
110
cd GenAIComps
111
111
export ip_address=$( hostname -I | awk ' {print $1}' )
112
- curl http://${ip_address} :9066/v1/animation -X POST -H " Content-Type: application/json" -d @comps/animation/wav2lip /assets/audio/sample_question.json
112
+ curl http://${ip_address} :9066/v1/animation -X POST -H " Content-Type: application/json" -d @comps/animation/src /assets/audio/sample_question.json
113
113
```
114
114
115
115
or
116
116
117
117
``` bash
118
118
cd GenAIComps
119
- python3 comps/animation/wav2lip /dependency/check_animation_server.py
119
+ python3 comps/animation/src/integration /dependency/check_animation_server.py
120
120
```
121
121
122
122
The expected output will be a message similar to the following:
123
123
124
124
``` bash
125
- {' wav2lip_result' : ' ....../GenAIComps/comps/animation/wav2lip /assets/outputs/result.mp4' }
125
+ {' wav2lip_result' : ' ....../GenAIComps/comps/animation/src /assets/outputs/result.mp4' }
126
126
```
127
127
128
- Please find "comps/animation/wav2lip /assets/outputs/result.mp4" as a reference generated video.
128
+ Please find "comps/animation/src /assets/outputs/result.mp4" as a reference generated video.
0 commit comments