Skip to content

Commit bece9f4

Browse files
Fix tool_render with direct import from pydantic (opea-project#750)
* fix tool renderer bug Signed-off-by: minmin-intel <minmin.hou@intel.com> * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * clean up test script Signed-off-by: minmin-intel <minmin.hou@intel.com> * switch to pydantic direct import Signed-off-by: minmin-intel <minmin.hou@intel.com> * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Signed-off-by: minmin-intel <minmin.hou@intel.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
1 parent f6f620a commit bece9f4

File tree

4 files changed

+6
-12
lines changed

4 files changed

+6
-12
lines changed

comps/agent/langchain/src/strategy/planexec/planner.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -13,12 +13,12 @@
1313
from langchain_core.output_parsers.openai_tools import PydanticToolsParser
1414
from langchain_core.outputs import Generation
1515
from langchain_core.prompts import PromptTemplate
16-
from langchain_core.pydantic_v1 import BaseModel, Field
1716
from langchain_core.utils.json import parse_partial_json
1817
from langchain_huggingface import ChatHuggingFace
1918
from langgraph.checkpoint.memory import MemorySaver
2019
from langgraph.graph import END, START, StateGraph
2120
from langgraph.graph.message import add_messages
21+
from pydantic import BaseModel, Field
2222

2323
from ...global_var import threads_global_kv
2424
from ...utils import has_multi_tool_inputs, tool_renderer

comps/agent/langchain/src/strategy/ragagent/planner.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -8,13 +8,13 @@
88
from langchain_core.output_parsers import JsonOutputParser, StrOutputParser
99
from langchain_core.output_parsers.openai_tools import PydanticToolsParser
1010
from langchain_core.prompts import PromptTemplate
11-
from langchain_core.pydantic_v1 import BaseModel, Field
1211
from langchain_huggingface import ChatHuggingFace, HuggingFaceEndpoint
1312
from langchain_openai import ChatOpenAI
1413
from langgraph.checkpoint.memory import MemorySaver
1514
from langgraph.graph import END, START, StateGraph
1615
from langgraph.graph.message import add_messages
1716
from langgraph.prebuilt import ToolNode, tools_condition
17+
from pydantic import BaseModel, Field
1818

1919
from ..base_agent import BaseAgent
2020
from .prompt import DOC_GRADER_PROMPT, RAG_PROMPT, QueryWriterLlamaPrompt
@@ -366,7 +366,7 @@ def __call__(self, state) -> Literal["generate", "rewrite"]:
366366
print("@@@@ Score: ", score)
367367

368368
# if score.startswith("yes"):
369-
if "yes" in score:
369+
if "yes" in score.lower():
370370
print("---DECISION: DOCS RELEVANT---")
371371
return {"doc_score": "generate"}
372372

comps/agent/langchain/src/tools.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -7,11 +7,9 @@
77
import sys
88

99
import yaml
10-
11-
# from pydantic import create_model, Field
12-
from langchain.pydantic_v1 import BaseModel, Field, create_model
1310
from langchain.tools import BaseTool, StructuredTool
1411
from langchain_community.agent_toolkits.load_tools import load_tools
12+
from pydantic import BaseModel, Field, create_model
1513

1614

1715
def generate_request_function(url):

tests/agent/test_agent_langchain_on_intel_hpu.sh

Lines changed: 2 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@ WORKPATH=$(dirname "$PWD")
88
LOG_PATH="$WORKPATH/tests"
99
ip_address=$(hostname -I | awk '{print $1}')
1010
tgi_port=8085
11-
tgi_volume=$WORKPATH/data #/data2/cache/hub/Meta-Llama-3.1-70B-Instruct #$HF_CACHE_DIR #
11+
tgi_volume=$WORKPATH/data
1212

1313
export agent_image="opea/agent-langchain:comps"
1414
export agent_container_name="test-comps-agent-endpoint"
@@ -39,10 +39,9 @@ function build_docker_images() {
3939
}
4040

4141
function start_tgi_service() {
42-
# redis endpoint
4342
echo "token is ${HF_TOKEN}"
4443

45-
#single card
44+
#multi cards
4645
echo "start tgi gaudi service"
4746
docker run -d --runtime=habana --name "test-comps-tgi-gaudi-service" -p $tgi_port:80 -v $tgi_volume:/data -e HF_TOKEN=$HF_TOKEN -e HABANA_VISIBLE_DEVICES=0,1,2,3 -e OMPI_MCA_btl_vader_single_copy_mechanism=none -e PT_HPU_ENABLE_LAZY_COLLECTIVES=true -e http_proxy=$http_proxy -e https_proxy=$https_proxy --cap-add=sys_nice --ipc=host ghcr.io/huggingface/tgi-gaudi:2.0.5 --model-id $model --max-input-tokens 4096 --max-total-tokens 8192 --sharded true --num-shard 4
4847
sleep 5s
@@ -62,7 +61,6 @@ function start_tgi_service() {
6261

6362
function start_react_langchain_agent_service() {
6463
echo "Starting react_langchain agent microservice"
65-
# docker run -d --runtime=runc --name="test-comps-agent-endpoint" -v $WORKPATH/comps/agent/langchain/tools:/home/user/comps/agent/langchain/tools -p 9095:9095 --ipc=host -e port=9095 -e HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN} -e model=${model} -e strategy=react_langchain -e llm_endpoint_url=http://${ip_address}:${tgi_port} -e llm_engine=tgi -e recursion_limit=10 -e require_human_feedback=false -e tools=/home/user/comps/agent/langchain/tools/custom_tools.yaml opea/agent-langchain:comps
6664
docker compose -f $WORKPATH/tests/agent/react_langchain.yaml up -d
6765
sleep 5s
6866
docker logs test-comps-agent-endpoint
@@ -72,7 +70,6 @@ function start_react_langchain_agent_service() {
7270

7371
function start_react_langgraph_agent_service() {
7472
echo "Starting react_langgraph agent microservice"
75-
# docker run -d --runtime=runc --name="test-comps-agent-endpoint" -v $WORKPATH/comps/agent/langchain/tools:/home/user/comps/agent/langchain/tools -p 9095:9095 --ipc=host -e HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN} -e model=${model} -e strategy=react_llama -e llm_endpoint_url=http://${ip_address}:${tgi_port} -e llm_engine=tgi -e recursion_limit=10 -e require_human_feedback=false -e tools=/home/user/comps/agent/langchain/tools/custom_tools.yaml opea/agent-langchain:comps
7673
docker compose -f $WORKPATH/tests/agent/reactllama.yaml up -d
7774
sleep 5s
7875
docker logs test-comps-agent-endpoint
@@ -90,7 +87,6 @@ function start_react_langgraph_agent_service_openai() {
9087

9188
function start_ragagent_agent_service() {
9289
echo "Starting rag agent microservice"
93-
# docker run -d --runtime=runc --name="test-comps-agent-endpoint" -v $WORKPATH/comps/agent/langchain/tools:/home/user/comps/agent/langchain/tools -p 9095:9095 --ipc=host -e HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN} -e model=${model} -e strategy=rag_agent_llama -e llm_endpoint_url=http://${ip_address}:${tgi_port} -e llm_engine=tgi -e recursion_limit=10 -e require_human_feedback=false -e tools=/home/user/comps/agent/langchain/tools/custom_tools.yaml opea/agent-langchain:comps
9490
docker compose -f $WORKPATH/tests/agent/ragagent.yaml up -d
9591
sleep 5s
9692
docker logs test-comps-agent-endpoint

0 commit comments

Comments
 (0)