diff --git a/.github/workflows/docker/compose/agent-compose.yaml b/.github/workflows/docker/compose/agent-compose.yaml index fcff2be278..88321474d1 100644 --- a/.github/workflows/docker/compose/agent-compose.yaml +++ b/.github/workflows/docker/compose/agent-compose.yaml @@ -3,7 +3,7 @@ # this file should be run in the root of the repo services: - agent-langchain: + agent: build: - dockerfile: comps/agent/langchain/Dockerfile - image: ${REGISTRY:-opea}/agent-langchain:${TAG:-latest} + dockerfile: comps/agent/src/Dockerfile + image: ${REGISTRY:-opea}/agent:${TAG:-latest} diff --git a/.github/workflows/docker/compose/embeddings-compose.yaml b/.github/workflows/docker/compose/embeddings-compose.yaml index 5fc274c241..f27fed6129 100644 --- a/.github/workflows/docker/compose/embeddings-compose.yaml +++ b/.github/workflows/docker/compose/embeddings-compose.yaml @@ -3,10 +3,10 @@ # this file should be run in the root of the repo services: - embedding-tei: + embedding: build: dockerfile: comps/embeddings/src/Dockerfile - image: ${REGISTRY:-opea}/embedding-tei:${TAG:-latest} + image: ${REGISTRY:-opea}/embedding:${TAG:-latest} embedding-multimodal-clip: build: dockerfile: comps/embeddings/src/integrations/dependency/clip/Dockerfile @@ -15,15 +15,7 @@ services: build: dockerfile: comps/embeddings/src/integrations/dependency/bridgetower/Dockerfile image: ${REGISTRY:-opea}/embedding-multimodal-bridgetower:${TAG:-latest} - embedding-multimodal: - build: - dockerfile: comps/embeddings/src/Dockerfile - image: ${REGISTRY:-opea}/embedding-multimodal:${TAG:-latest} embedding-multimodal-bridgetower-gaudi: build: dockerfile: comps/embeddings/src/integrations/dependency/bridgetower/Dockerfile.intel_hpu image: ${REGISTRY:-opea}/embedding-multimodal-bridgetower-gaudi:${TAG:-latest} - embedding-predictionguard: - build: - dockerfile: comps/embeddings/src/Dockerfile - image: ${REGISTRY:-opea}/embedding-predictionguard:${TAG:-latest} diff --git a/.github/workflows/docker/compose/llms-compose.yaml b/.github/workflows/docker/compose/llms-compose.yaml index 2d42e6f46d..a86c035af7 100644 --- a/.github/workflows/docker/compose/llms-compose.yaml +++ b/.github/workflows/docker/compose/llms-compose.yaml @@ -3,10 +3,10 @@ # this file should be run in the root of the repo services: - llm-tgi: + llm-textgen: build: dockerfile: comps/llms/src/text-generation/Dockerfile - image: ${REGISTRY:-opea}/llm-tgi:${TAG:-latest} + image: ${REGISTRY:-opea}/llm-textgen:${TAG:-latest} llm-ollama: build: dockerfile: comps/llms/text-generation/ollama/langchain/Dockerfile @@ -19,10 +19,6 @@ services: build: dockerfile: comps/llms/faq-generation/tgi/langchain/Dockerfile image: ${REGISTRY:-opea}/llm-faqgen-tgi:${TAG:-latest} - llm-vllm: - build: - dockerfile: comps/llms/src/text-generation/Dockerfile - image: ${REGISTRY:-opea}/llm-vllm:${TAG:-latest} llm-native: build: dockerfile: comps/llms/text-generation/native/langchain/Dockerfile diff --git a/.github/workflows/docker/compose/reranks-compose.yaml b/.github/workflows/docker/compose/reranks-compose.yaml index ff71162bff..f1a757247f 100644 --- a/.github/workflows/docker/compose/reranks-compose.yaml +++ b/.github/workflows/docker/compose/reranks-compose.yaml @@ -3,15 +3,7 @@ # this file should be run in the root of the repo services: - reranking-tei: + reranking: build: dockerfile: comps/reranks/src/Dockerfile - image: ${REGISTRY:-opea}/reranking-tei:${TAG:-latest} - reranking-videoqna: - build: - dockerfile: comps/reranks/src/Dockerfile # TODO. need to update - image: ${REGISTRY:-opea}/reranking-videoqna:${TAG:-latest} - reranking-fastrag: - build: - dockerfile: comps/reranks/src/Dockerfile # TODO. need to update - image: ${REGISTRY:-opea}/reranking-fastrag:${TAG:-latest} + image: ${REGISTRY:-opea}/reranking:${TAG:-latest} diff --git a/.github/workflows/scripts/check_duplicated_image.py b/.github/workflows/scripts/check_duplicated_image.py index cd5f0cddfb..03790cce9a 100644 --- a/.github/workflows/scripts/check_duplicated_image.py +++ b/.github/workflows/scripts/check_duplicated_image.py @@ -9,6 +9,8 @@ import yaml images = {} +dockerfiles = {} +errors = [] def check_docker_compose_build_definition(file_path): @@ -30,18 +32,26 @@ def check_docker_compose_build_definition(file_path): if not os.path.exists(dockerfile): # dockerfile not exists in the current repo context, assume it's in 3rd party context dockerfile = os.path.normpath(os.path.join(context, build.get("dockerfile", ""))) - item = {"file_path": file_path, "service": service, "dockerfile": dockerfile} + item = {"file_path": file_path, "service": service, "dockerfile": dockerfile, "image": image} if image in images and dockerfile != images[image]["dockerfile"]: - print("ERROR: !!! Found Conflicts !!!") - print(f"Image: {image}, Dockerfile: {dockerfile}, defined in Service: {service}, File: {file_path}") - print( + errors.append( + f"ERROR: !!! Found Conflicts !!!\n" + f"Image: {image}, Dockerfile: {dockerfile}, defined in Service: {service}, File: {file_path}\n" f"Image: {image}, Dockerfile: {images[image]['dockerfile']}, defined in Service: {images[image]['service']}, File: {images[image]['file_path']}" ) - sys.exit(1) else: # print(f"Add Image: {image} Dockerfile: {dockerfile}") images[image] = item + if dockerfile in dockerfiles and image != dockerfiles[dockerfile]["image"]: + errors.append( + f"WARNING: Different images using the same Dockerfile\n" + f"Dockerfile: {dockerfile}, Image: {image}, defined in Service: {service}, File: {file_path}\n" + f"Dockerfile: {dockerfile}, Image: {dockerfiles[dockerfile]['image']}, defined in Service: {dockerfiles[dockerfile]['service']}, File: {dockerfiles[dockerfile]['file_path']}" + ) + else: + dockerfiles[dockerfile] = item + def parse_arg(): parser = argparse.ArgumentParser( @@ -55,6 +65,10 @@ def main(): args = parse_arg() for file_path in args.files: check_docker_compose_build_definition(file_path) + if errors: + for error in errors: + print(error) + sys.exit(1) print("SUCCESS: No Conlicts Found.") return 0 diff --git a/comps/agent/langchain/Dockerfile b/comps/agent/src/Dockerfile similarity index 68% rename from comps/agent/langchain/Dockerfile rename to comps/agent/src/Dockerfile index 4b0b951c8b..be889676c8 100644 --- a/comps/agent/langchain/Dockerfile +++ b/comps/agent/src/Dockerfile @@ -21,19 +21,19 @@ COPY comps /home/user/comps RUN pip install --no-cache-dir --upgrade pip setuptools && \ if [ ${ARCH} = "cpu" ]; then \ - pip install --no-cache-dir --extra-index-url https://download.pytorch.org/whl/cpu -r /home/user/comps/agent/langchain/requirements.txt; \ + pip install --no-cache-dir --extra-index-url https://download.pytorch.org/whl/cpu -r /home/user/comps/agent/src/requirements.txt; \ else \ - pip install --no-cache-dir -r /home/user/comps/agent/langchain/requirements.txt; \ + pip install --no-cache-dir -r /home/user/comps/agent/src/requirements.txt; \ fi ENV PYTHONPATH=$PYTHONPATH:/home/user USER root -RUN mkdir -p /home/user/comps/agent/langchain/status && chown -R user /home/user/comps/agent/langchain/status +RUN mkdir -p /home/user/comps/agent/src/status && chown -R user /home/user/comps/agent/src/status USER user -WORKDIR /home/user/comps/agent/langchain/ +WORKDIR /home/user/comps/agent/src/ ENTRYPOINT ["python", "agent.py"] diff --git a/comps/agent/langchain/README.md b/comps/agent/src/README.md similarity index 95% rename from comps/agent/langchain/README.md rename to comps/agent/src/README.md index e6ad4e6f88..ce680756a3 100644 --- a/comps/agent/langchain/README.md +++ b/comps/agent/src/README.md @@ -11,7 +11,7 @@ We currently support the following types of agents. Please refer to the example 1. ReAct: use `react_langchain` or `react_langgraph` or `react_llama` as strategy. First introduced in this seminal [paper](https://arxiv.org/abs/2210.03629). The ReAct agent engages in "reason-act-observe" cycles to solve problems. Please refer to this [doc](https://python.langchain.com/v0.2/docs/how_to/migrate_agent/) to understand the differences between the langchain and langgraph versions of react agents. See table below to understand the validated LLMs for each react strategy. 2. RAG agent: use `rag_agent` or `rag_agent_llama` strategy. This agent is specifically designed for improving RAG performance. It has the capability to rephrase query, check relevancy of retrieved context, and iterate if context is not relevant. See table below to understand the validated LLMs for each rag agent strategy. 3. Plan and execute: `plan_execute` strategy. This type of agent first makes a step-by-step plan given a user request, and then execute the plan sequentially (or in parallel, to be implemented in future). If the execution results can solve the problem, then the agent will output an answer; otherwise, it will replan and execute again. -4. SQL agent: use `sql_agent_llama` or `sql_agent` strategy. This agent is specifically designed and optimized for answering questions aabout data in SQL databases. Users need to specify `db_name` and `db_path` for the agent to access the SQL database. For more technical details read descriptions [here](src/strategy/sqlagent/README.md). +4. SQL agent: use `sql_agent_llama` or `sql_agent` strategy. This agent is specifically designed and optimized for answering questions aabout data in SQL databases. Users need to specify `db_name` and `db_path` for the agent to access the SQL database. For more technical details read descriptions [here](integrations/strategy/sqlagent/README.md). **Note**: @@ -60,7 +60,7 @@ Examples of how to register tools can be found in [Section 4](#-4-provide-your-o ```bash cd GenAIComps/ # back to GenAIComps/ folder -docker build -t opea/agent-langchain:latest -f comps/agent/langchain/Dockerfile . --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy +docker build -t opea/agent:latest -f comps/agent/src/Dockerfile . --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy ``` #### 2.2.1 Start Agent microservices with TGI @@ -78,7 +78,7 @@ docker run -d --runtime=habana --name "comps-tgi-gaudi-service" -p 8080:80 -v ./ docker logs comps-tgi-gaudi-service # Agent: react_llama strategy -docker run -d --runtime=runc --name="comps-langchain-agent-endpoint" -v $WORKPATH/comps/agent/langchain/tools:/home/user/comps/agent/langchain/tools -p 9090:9090 --ipc=host -e HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN} -e model=${model} -e ip_address=${ip_address} -e strategy=react_llama -e llm_endpoint_url=http://${ip_address}:8080 -e llm_engine=tgi -e recursion_limit=15 -e require_human_feedback=false -e tools=/home/user/comps/agent/langchain/tools/custom_tools.yaml opea/agent-langchain:latest +docker run -d --runtime=runc --name="comps-langchain-agent-endpoint" -v $WORKPATH/comps/agent/langchain/tools:/home/user/comps/agent/langchain/tools -p 9090:9090 --ipc=host -e HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN} -e model=${model} -e ip_address=${ip_address} -e strategy=react_llama -e llm_endpoint_url=http://${ip_address}:8080 -e llm_engine=tgi -e recursion_limit=15 -e require_human_feedback=false -e tools=/home/user/comps/agent/langchain/tools/custom_tools.yaml opea/agent:latest # check status docker logs comps-langchain-agent-endpoint @@ -105,7 +105,7 @@ docker run -d --runtime=habana --rm --name "comps-vllm-gaudi-service" -p 8080:80 docker logs comps-vllm-gaudi-service # Agent -docker run -d --runtime=runc --name="comps-langchain-agent-endpoint" -v $WORKPATH/comps/agent/langchain/tools:/home/user/comps/agent/langchain/tools -p 9090:9090 --ipc=host -e HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN} -e model=${model} -e ip_address=${ip_address} -e strategy=react_llama -e llm_endpoint_url=http://${ip_address}:8080 -e llm_engine=vllm -e recursion_limit=15 -e require_human_feedback=false -e tools=/home/user/comps/agent/langchain/tools/custom_tools.yaml opea/agent-langchain:latest +docker run -d --runtime=runc --name="comps-langchain-agent-endpoint" -v $WORKPATH/comps/agent/langchain/tools:/home/user/comps/agent/langchain/tools -p 9090:9090 --ipc=host -e HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN} -e model=${model} -e ip_address=${ip_address} -e strategy=react_llama -e llm_endpoint_url=http://${ip_address}:8080 -e llm_engine=vllm -e recursion_limit=15 -e require_human_feedback=false -e tools=/home/user/comps/agent/langchain/tools/custom_tools.yaml opea/agent:latest # check status docker logs comps-langchain-agent-endpoint @@ -114,7 +114,7 @@ docker logs comps-langchain-agent-endpoint > debug mode > > ```bash -> docker run --rm --runtime=runc --name="comps-langchain-agent-endpoint" -v ./comps/agent/langchain/:/home/user/comps/agent/langchain/ -p 9090:9090 --ipc=host -e http_proxy=$http_proxy -e https_proxy=$https_proxy -e HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN} -e model=${model} -e ip_address=${ip_address} -e strategy=react_llama -e llm_endpoint_url=http://${ip_address}:8080 -e llm_engine=vllm -e recursion_limit=15 -e require_human_feedback=false -e tools=/home/user/comps/agent/langchain/tools/custom_tools.yaml opea/agent-langchain:latest +> docker run --rm --runtime=runc --name="comps-langchain-agent-endpoint" -v ./comps/agent/langchain/:/home/user/comps/agent/langchain/ -p 9090:9090 --ipc=host -e http_proxy=$http_proxy -e https_proxy=$https_proxy -e HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN} -e model=${model} -e ip_address=${ip_address} -e strategy=react_llama -e llm_endpoint_url=http://${ip_address}:8080 -e llm_engine=vllm -e recursion_limit=15 -e require_human_feedback=false -e tools=/home/user/comps/agent/langchain/tools/custom_tools.yaml opea/agent:latest > ``` ## 🚀 3. Validate Microservice @@ -189,7 +189,7 @@ def opea_rag_query(query): ```bash # Agent -docker run -d --runtime=runc --name="comps-langchain-agent-endpoint" -v my_tools:/home/user/comps/agent/langchain/tools -p 9090:9090 --ipc=host -e HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN} -e model=${model} -e ip_address=${ip_address} -e strategy=react_llama -e llm_endpoint_url=http://${ip_address}:8080 -e llm_engine=tgi -e recursive_limit=15 -e require_human_feedback=false -e tools=/home/user/comps/agent/langchain/tools/custom_tools.yaml opea/agent-langchain:latest +docker run -d --runtime=runc --name="comps-langchain-agent-endpoint" -v my_tools:/home/user/comps/agent/langchain/tools -p 9090:9090 --ipc=host -e HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN} -e model=${model} -e ip_address=${ip_address} -e strategy=react_llama -e llm_endpoint_url=http://${ip_address}:8080 -e llm_engine=tgi -e recursive_limit=15 -e require_human_feedback=false -e tools=/home/user/comps/agent/langchain/tools/custom_tools.yaml opea/agent:latest ``` - validate with my_tools @@ -205,5 +205,5 @@ data: [DONE] ## 5. Customize agent strategy -For advanced developers who want to implement their own agent strategies, you can add a separate folder in `src\strategy`, implement your agent by inherit the `BaseAgent` class, and add your strategy into the `src\agent.py`. The architecture of this agent microservice is shown in the diagram below as a reference. +For advanced developers who want to implement their own agent strategies, you can add a separate folder in `integrations\strategy`, implement your agent by inherit the `BaseAgent` class, and add your strategy into the `integrations\agent.py`. The architecture of this agent microservice is shown in the diagram below as a reference. ![Architecture Overview](agent_arch.jpg) diff --git a/comps/agent/langchain/agent.py b/comps/agent/src/agent.py similarity index 95% rename from comps/agent/langchain/agent.py rename to comps/agent/src/agent.py index 0278005452..fc47c3132e 100644 --- a/comps/agent/langchain/agent.py +++ b/comps/agent/src/agent.py @@ -14,10 +14,10 @@ sys.path.append(comps_path) from comps import CustomLogger, GeneratedDoc, LLMParamsDoc, ServiceType, opea_microservices, register_microservice -from comps.agent.langchain.src.agent import instantiate_agent -from comps.agent.langchain.src.global_var import assistants_global_kv, threads_global_kv -from comps.agent.langchain.src.thread import instantiate_thread_memory, thread_completion_callback -from comps.agent.langchain.src.utils import get_args +from comps.agent.src.integrations.agent import instantiate_agent +from comps.agent.src.integrations.global_var import assistants_global_kv, threads_global_kv +from comps.agent.src.integrations.thread import instantiate_thread_memory, thread_completion_callback +from comps.agent.src.integrations.utils import get_args from comps.cores.proto.api_protocol import ( AssistantsObject, ChatCompletionRequest, diff --git a/comps/agent/langchain/agent_arch.jpg b/comps/agent/src/agent_arch.jpg similarity index 100% rename from comps/agent/langchain/agent_arch.jpg rename to comps/agent/src/agent_arch.jpg diff --git a/comps/agent/langchain/src/agent.py b/comps/agent/src/integrations/agent.py similarity index 100% rename from comps/agent/langchain/src/agent.py rename to comps/agent/src/integrations/agent.py diff --git a/comps/agent/langchain/src/config.py b/comps/agent/src/integrations/config.py similarity index 100% rename from comps/agent/langchain/src/config.py rename to comps/agent/src/integrations/config.py diff --git a/comps/agent/langchain/src/global_var.py b/comps/agent/src/integrations/global_var.py similarity index 100% rename from comps/agent/langchain/src/global_var.py rename to comps/agent/src/integrations/global_var.py diff --git a/comps/agent/langchain/src/persistence.py b/comps/agent/src/integrations/persistence.py similarity index 100% rename from comps/agent/langchain/src/persistence.py rename to comps/agent/src/integrations/persistence.py diff --git a/comps/agent/langchain/src/strategy/__init__.py b/comps/agent/src/integrations/strategy/__init__.py similarity index 100% rename from comps/agent/langchain/src/strategy/__init__.py rename to comps/agent/src/integrations/strategy/__init__.py diff --git a/comps/agent/langchain/src/strategy/base_agent.py b/comps/agent/src/integrations/strategy/base_agent.py similarity index 100% rename from comps/agent/langchain/src/strategy/base_agent.py rename to comps/agent/src/integrations/strategy/base_agent.py diff --git a/comps/agent/langchain/src/strategy/planexec/README.md b/comps/agent/src/integrations/strategy/planexec/README.md similarity index 100% rename from comps/agent/langchain/src/strategy/planexec/README.md rename to comps/agent/src/integrations/strategy/planexec/README.md diff --git a/comps/agent/langchain/src/strategy/planexec/__init__.py b/comps/agent/src/integrations/strategy/planexec/__init__.py similarity index 100% rename from comps/agent/langchain/src/strategy/planexec/__init__.py rename to comps/agent/src/integrations/strategy/planexec/__init__.py diff --git a/comps/agent/langchain/src/strategy/planexec/planner.py b/comps/agent/src/integrations/strategy/planexec/planner.py similarity index 100% rename from comps/agent/langchain/src/strategy/planexec/planner.py rename to comps/agent/src/integrations/strategy/planexec/planner.py diff --git a/comps/agent/langchain/src/strategy/planexec/prompt.py b/comps/agent/src/integrations/strategy/planexec/prompt.py similarity index 100% rename from comps/agent/langchain/src/strategy/planexec/prompt.py rename to comps/agent/src/integrations/strategy/planexec/prompt.py diff --git a/comps/agent/langchain/src/strategy/ragagent/README.md b/comps/agent/src/integrations/strategy/ragagent/README.md similarity index 100% rename from comps/agent/langchain/src/strategy/ragagent/README.md rename to comps/agent/src/integrations/strategy/ragagent/README.md diff --git a/comps/agent/langchain/src/strategy/ragagent/__init__.py b/comps/agent/src/integrations/strategy/ragagent/__init__.py similarity index 100% rename from comps/agent/langchain/src/strategy/ragagent/__init__.py rename to comps/agent/src/integrations/strategy/ragagent/__init__.py diff --git a/comps/agent/langchain/src/strategy/ragagent/planner.py b/comps/agent/src/integrations/strategy/ragagent/planner.py similarity index 100% rename from comps/agent/langchain/src/strategy/ragagent/planner.py rename to comps/agent/src/integrations/strategy/ragagent/planner.py diff --git a/comps/agent/langchain/src/strategy/ragagent/prompt.py b/comps/agent/src/integrations/strategy/ragagent/prompt.py similarity index 100% rename from comps/agent/langchain/src/strategy/ragagent/prompt.py rename to comps/agent/src/integrations/strategy/ragagent/prompt.py diff --git a/comps/agent/langchain/src/strategy/ragagent/utils.py b/comps/agent/src/integrations/strategy/ragagent/utils.py similarity index 100% rename from comps/agent/langchain/src/strategy/ragagent/utils.py rename to comps/agent/src/integrations/strategy/ragagent/utils.py diff --git a/comps/agent/langchain/src/strategy/react/__init__.py b/comps/agent/src/integrations/strategy/react/__init__.py similarity index 100% rename from comps/agent/langchain/src/strategy/react/__init__.py rename to comps/agent/src/integrations/strategy/react/__init__.py diff --git a/comps/agent/langchain/src/strategy/react/planner.py b/comps/agent/src/integrations/strategy/react/planner.py similarity index 100% rename from comps/agent/langchain/src/strategy/react/planner.py rename to comps/agent/src/integrations/strategy/react/planner.py diff --git a/comps/agent/langchain/src/strategy/react/prompt.py b/comps/agent/src/integrations/strategy/react/prompt.py similarity index 100% rename from comps/agent/langchain/src/strategy/react/prompt.py rename to comps/agent/src/integrations/strategy/react/prompt.py diff --git a/comps/agent/langchain/src/strategy/react/utils.py b/comps/agent/src/integrations/strategy/react/utils.py similarity index 100% rename from comps/agent/langchain/src/strategy/react/utils.py rename to comps/agent/src/integrations/strategy/react/utils.py diff --git a/comps/agent/langchain/src/strategy/sqlagent/README.md b/comps/agent/src/integrations/strategy/sqlagent/README.md similarity index 100% rename from comps/agent/langchain/src/strategy/sqlagent/README.md rename to comps/agent/src/integrations/strategy/sqlagent/README.md diff --git a/comps/agent/langchain/src/strategy/sqlagent/__init__.py b/comps/agent/src/integrations/strategy/sqlagent/__init__.py similarity index 100% rename from comps/agent/langchain/src/strategy/sqlagent/__init__.py rename to comps/agent/src/integrations/strategy/sqlagent/__init__.py diff --git a/comps/agent/langchain/src/strategy/sqlagent/hint.py b/comps/agent/src/integrations/strategy/sqlagent/hint.py similarity index 100% rename from comps/agent/langchain/src/strategy/sqlagent/hint.py rename to comps/agent/src/integrations/strategy/sqlagent/hint.py diff --git a/comps/agent/langchain/src/strategy/sqlagent/planner.py b/comps/agent/src/integrations/strategy/sqlagent/planner.py similarity index 100% rename from comps/agent/langchain/src/strategy/sqlagent/planner.py rename to comps/agent/src/integrations/strategy/sqlagent/planner.py diff --git a/comps/agent/langchain/src/strategy/sqlagent/prompt.py b/comps/agent/src/integrations/strategy/sqlagent/prompt.py similarity index 100% rename from comps/agent/langchain/src/strategy/sqlagent/prompt.py rename to comps/agent/src/integrations/strategy/sqlagent/prompt.py diff --git a/comps/agent/langchain/src/strategy/sqlagent/sql_tools.py b/comps/agent/src/integrations/strategy/sqlagent/sql_tools.py similarity index 100% rename from comps/agent/langchain/src/strategy/sqlagent/sql_tools.py rename to comps/agent/src/integrations/strategy/sqlagent/sql_tools.py diff --git a/comps/agent/langchain/src/strategy/sqlagent/utils.py b/comps/agent/src/integrations/strategy/sqlagent/utils.py similarity index 100% rename from comps/agent/langchain/src/strategy/sqlagent/utils.py rename to comps/agent/src/integrations/strategy/sqlagent/utils.py diff --git a/comps/agent/langchain/src/thread.py b/comps/agent/src/integrations/thread.py similarity index 100% rename from comps/agent/langchain/src/thread.py rename to comps/agent/src/integrations/thread.py diff --git a/comps/agent/langchain/src/tools.py b/comps/agent/src/integrations/tools.py similarity index 100% rename from comps/agent/langchain/src/tools.py rename to comps/agent/src/integrations/tools.py diff --git a/comps/agent/langchain/src/utils.py b/comps/agent/src/integrations/utils.py similarity index 100% rename from comps/agent/langchain/src/utils.py rename to comps/agent/src/integrations/utils.py diff --git a/comps/agent/langchain/requirements.txt b/comps/agent/src/requirements.txt similarity index 100% rename from comps/agent/langchain/requirements.txt rename to comps/agent/src/requirements.txt diff --git a/comps/agent/langchain/sql_agent.png b/comps/agent/src/sql_agent.png similarity index 100% rename from comps/agent/langchain/sql_agent.png rename to comps/agent/src/sql_agent.png diff --git a/comps/agent/langchain/sql_agent_llama.png b/comps/agent/src/sql_agent_llama.png similarity index 100% rename from comps/agent/langchain/sql_agent_llama.png rename to comps/agent/src/sql_agent_llama.png diff --git a/comps/agent/langchain/test.py b/comps/agent/src/test.py similarity index 97% rename from comps/agent/langchain/test.py rename to comps/agent/src/test.py index f074c74277..1ca089ed0c 100644 --- a/comps/agent/langchain/test.py +++ b/comps/agent/src/test.py @@ -8,11 +8,11 @@ import pandas as pd import requests -from src.utils import get_args +from integrations.utils import get_args def test_agent_local(args): - from src.agent import instantiate_agent + from integrations.agent import instantiate_agent if args.q == 0: df = pd.DataFrame({"query": ["What is the Intel OPEA Project?"]}) @@ -148,7 +148,7 @@ def process_request(api, query, is_stream=False): def test_ut(args): - from src.tools import get_tools_descriptions + from integrations.tools import get_tools_descriptions tools = get_tools_descriptions("tools/custom_tools.py") for tool in tools: diff --git a/comps/agent/langchain/test_assistant_api.py b/comps/agent/src/test_assistant_api.py similarity index 98% rename from comps/agent/langchain/test_assistant_api.py rename to comps/agent/src/test_assistant_api.py index cf398c8bbf..b4c252d232 100644 --- a/comps/agent/langchain/test_assistant_api.py +++ b/comps/agent/src/test_assistant_api.py @@ -5,7 +5,7 @@ import json import requests -from src.utils import get_args +from integrations.utils import get_args def test_assistants_http(args): diff --git a/comps/agent/langchain/tools/custom_prompt.py b/comps/agent/src/tools/custom_prompt.py similarity index 100% rename from comps/agent/langchain/tools/custom_prompt.py rename to comps/agent/src/tools/custom_prompt.py diff --git a/comps/agent/langchain/tools/custom_tools.py b/comps/agent/src/tools/custom_tools.py similarity index 100% rename from comps/agent/langchain/tools/custom_tools.py rename to comps/agent/src/tools/custom_tools.py diff --git a/comps/agent/langchain/tools/custom_tools.yaml b/comps/agent/src/tools/custom_tools.yaml similarity index 100% rename from comps/agent/langchain/tools/custom_tools.yaml rename to comps/agent/src/tools/custom_tools.yaml diff --git a/comps/embeddings/deployment/docker_compose/compose_multimodal_bridgetower.yaml b/comps/embeddings/deployment/docker_compose/compose_multimodal_bridgetower.yaml index c7044ac296..40b1008568 100644 --- a/comps/embeddings/deployment/docker_compose/compose_multimodal_bridgetower.yaml +++ b/comps/embeddings/deployment/docker_compose/compose_multimodal_bridgetower.yaml @@ -20,7 +20,7 @@ services: timeout: 6s retries: 18 start_period: 30s - embedding-multimodal: + embedding: image: opea/embedding:latest container_name: embedding-multimodal-bridgetower-server ports: diff --git a/comps/embeddings/deployment/docker_compose/compose_multimodal_bridgetower_intel_hpu.yaml b/comps/embeddings/deployment/docker_compose/compose_multimodal_bridgetower_intel_hpu.yaml index 946e68f730..a0d88173e6 100644 --- a/comps/embeddings/deployment/docker_compose/compose_multimodal_bridgetower_intel_hpu.yaml +++ b/comps/embeddings/deployment/docker_compose/compose_multimodal_bridgetower_intel_hpu.yaml @@ -24,7 +24,7 @@ services: timeout: 6s retries: 18 start_period: 30s - embedding-multimodal: + embedding: image: opea/embedding:latest container_name: embedding-multimodal-bridgetower-server ports: diff --git a/comps/embeddings/deployment/docker_compose/compose_predictionguard.yaml b/comps/embeddings/deployment/docker_compose/compose_predictionguard.yaml index 6ea2fa4d55..b55c6f88f7 100644 --- a/comps/embeddings/deployment/docker_compose/compose_predictionguard.yaml +++ b/comps/embeddings/deployment/docker_compose/compose_predictionguard.yaml @@ -4,7 +4,7 @@ services: embedding: image: opea/embedding:latest - container_name: embedding-predictionguard + container_name: embedding ports: - "6000:6000" ipc: host diff --git a/comps/embeddings/deployment/docker_compose/compose_tei.yaml b/comps/embeddings/deployment/docker_compose/compose_tei.yaml index da19212b32..4841b7ffe0 100644 --- a/comps/embeddings/deployment/docker_compose/compose_tei.yaml +++ b/comps/embeddings/deployment/docker_compose/compose_tei.yaml @@ -24,7 +24,7 @@ services: retries: 18 embedding: image: opea/embedding:latest - container_name: embedding-tei-server + container_name: embedding-server ports: - "6000:6000" ipc: host diff --git a/comps/llms/deployment/docker_compose/text-generation_vllm_langchain.yaml b/comps/llms/deployment/docker_compose/text-generation_vllm_langchain.yaml index 5e8b7ed9ba..5871367ceb 100644 --- a/comps/llms/deployment/docker_compose/text-generation_vllm_langchain.yaml +++ b/comps/llms/deployment/docker_compose/text-generation_vllm_langchain.yaml @@ -25,7 +25,7 @@ services: ipc: host command: --model $LLM_MODEL --tensor-parallel-size 1 --host 0.0.0.0 --port 80 llm: - image: opea/llm-vllm:latest + image: opea/llm-textgen:latest container_name: llm-vllm-gaudi-server depends_on: - vllm-service diff --git a/comps/llms/src/text-generation/README.md b/comps/llms/src/text-generation/README.md index 3deba1dda9..b59c77f87d 100644 --- a/comps/llms/src/text-generation/README.md +++ b/comps/llms/src/text-generation/README.md @@ -41,7 +41,7 @@ export LLM_MODEL_ID=${your_hf_llm_model} ```bash cd ../../../../ -docker build -t opea/llm-tgi:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/llms/src/text-generation/Dockerfile . +docker build -t opea/llm-textgen:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/llms/src/text-generation/Dockerfile . ``` To start a docker container, you have two options: @@ -54,7 +54,7 @@ You can choose one as needed. ### 2.3 Run Docker with CLI (Option A) ```bash -docker run -d --name="llm-tgi-server" -p 9000:9000 --ipc=host -e http_proxy=$http_proxy -e https_proxy=$https_proxy -e TGI_LLM_ENDPOINT=$TGI_LLM_ENDPOINT -e HF_TOKEN=$HF_TOKEN opea/llm-tgi:latest +docker run -d --name="llm-tgi-server" -p 9000:9000 --ipc=host -e http_proxy=$http_proxy -e https_proxy=$https_proxy -e TGI_LLM_ENDPOINT=$TGI_LLM_ENDPOINT -e HF_TOKEN=$HF_TOKEN opea/llm-textgen:latest ``` ### 2.4 Run Docker with Docker Compose (Option B) diff --git a/comps/llms/text-generation/README.md b/comps/llms/text-generation/README.md index c1a608bbe8..2c12e1cfc4 100644 --- a/comps/llms/text-generation/README.md +++ b/comps/llms/text-generation/README.md @@ -194,7 +194,7 @@ docker run -d \ -e https_proxy=$https_proxy \ -e TGI_LLM_ENDPOINT=$TGI_LLM_ENDPOINT \ -e HF_TOKEN=$HF_TOKEN \ - opea/llm-tgi:latest + opea/llm-textgen:latest ``` #### 2.3.2 vLLM @@ -218,7 +218,7 @@ docker run \ -e vLLM_LLM_ENDPOINT=$vLLM_LLM_ENDPOINT \ -e HF_TOKEN=$HF_TOKEN \ -e LLM_MODEL=$LLM_MODEL \ - opea/llm-vllm:latest + opea/llm-textgen:latest ``` ### 2.4 Run Docker with Docker Compose (Option B) diff --git a/comps/llms/text-generation/vllm/langchain/docker_compose_llm.yaml b/comps/llms/text-generation/vllm/langchain/docker_compose_llm.yaml index 077ceee8b1..7737048ba4 100644 --- a/comps/llms/text-generation/vllm/langchain/docker_compose_llm.yaml +++ b/comps/llms/text-generation/vllm/langchain/docker_compose_llm.yaml @@ -25,7 +25,7 @@ services: ipc: host command: --model $LLM_MODEL --tensor-parallel-size 1 --host 0.0.0.0 --port 80 llm: - image: opea/llm-vllm:latest + image: opea/llm-textgen:latest container_name: llm-vllm-gaudi-server depends_on: - vllm-service diff --git a/comps/llms/text-generation/vllm/langchain/launch_microservice.sh b/comps/llms/text-generation/vllm/langchain/launch_microservice.sh index 01bd0f6f58..70822ed841 100644 --- a/comps/llms/text-generation/vllm/langchain/launch_microservice.sh +++ b/comps/llms/text-generation/vllm/langchain/launch_microservice.sh @@ -11,4 +11,4 @@ docker run -d --rm \ -e HUGGINGFACEHUB_API_TOKEN=$HUGGINGFACEHUB_API_TOKEN \ -e LLM_MODEL=$LLM_MODEL \ -e LOGFLAG=$LOGFLAG \ - opea/llm-vllm:latest + opea/llm-textgen:latest diff --git a/comps/reranks/deployment/docker_compose/rerank_tei.yaml b/comps/reranks/deployment/docker_compose/rerank_tei.yaml index 14c62ff3fb..73ab14fcb9 100644 --- a/comps/reranks/deployment/docker_compose/rerank_tei.yaml +++ b/comps/reranks/deployment/docker_compose/rerank_tei.yaml @@ -20,7 +20,7 @@ services: command: --model-id ${RERANK_MODEL_ID} --hf-api-token ${HF_TOKEN} reranking: image: opea/reranking:latest - container_name: reranking-tei-server + container_name: reranking-server ports: - "8000:8000" ipc: host diff --git a/tests/agent/sql_agent_test/test_sql_agent.sh b/tests/agent/sql_agent_test/test_sql_agent.sh index 502abaa318..edbdb01e48 100644 --- a/tests/agent/sql_agent_test/test_sql_agent.sh +++ b/tests/agent/sql_agent_test/test_sql_agent.sh @@ -15,7 +15,7 @@ LOG_PATH="$WORKPATH/tests" export WORKDIR=$(dirname "$WORKPATH") echo $WORKDIR -export agent_image="opea/agent-langchain:comps" +export agent_image="opea/agent:comps" export agent_container_name="test-comps-agent-endpoint" export ip_address=$(hostname -I | awk '{print $1}') @@ -29,7 +29,7 @@ export LLM_MODEL_ID="meta-llama/Meta-Llama-3.1-70B-Instruct" export LLM_ENDPOINT_URL="http://${ip_address}:${vllm_port}" export temperature=0.01 export max_new_tokens=4096 -export TOOLSET_PATH=$WORKPATH/comps/agent/langchain/tools/ # $WORKPATH/tests/agent/sql_agent_test/ +export TOOLSET_PATH=$WORKPATH/comps/agent/src/tools/ # $WORKPATH/tests/agent/sql_agent_test/ echo "TOOLSET_PATH=${TOOLSET_PATH}" export recursion_limit=15 export db_name=california_schools @@ -75,12 +75,12 @@ function build_docker_images() { echo "Building the docker images" cd $WORKPATH echo $WORKPATH - docker build --no-cache -t $agent_image --build-arg http_proxy=$http_proxy --build-arg https_proxy=$https_proxy -f comps/agent/langchain/Dockerfile . + docker build --no-cache -t $agent_image --build-arg http_proxy=$http_proxy --build-arg https_proxy=$https_proxy -f comps/agent/src/Dockerfile . if [ $? -ne 0 ]; then - echo "opea/agent-langchain built fail" + echo "opea/agent built fail" exit 1 else - echo "opea/agent-langchain built successful" + echo "opea/agent built successful" fi } diff --git a/tests/agent/test_agent_langchain_on_intel_hpu.sh b/tests/agent/test_agent_langchain_on_intel_hpu.sh index 6d5f2f1a4c..16b1fc8373 100644 --- a/tests/agent/test_agent_langchain_on_intel_hpu.sh +++ b/tests/agent/test_agent_langchain_on_intel_hpu.sh @@ -20,7 +20,7 @@ ls $vllm_volume export WORKPATH=$WORKPATH -export agent_image="opea/agent-langchain:comps" +export agent_image="opea/agent:comps" export agent_container_name="test-comps-agent-endpoint" export model=meta-llama/Meta-Llama-3.1-70B-Instruct @@ -31,7 +31,7 @@ export LLM_MODEL_ID="meta-llama/Meta-Llama-3.1-70B-Instruct" export LLM_ENDPOINT_URL="http://${ip_address}:${vllm_port}" export temperature=0.01 export max_new_tokens=4096 -export TOOLSET_PATH=$WORKPATH/comps/agent/langchain/tools/ +export TOOLSET_PATH=$WORKPATH/comps/agent/src/tools/ echo "TOOLSET_PATH=${TOOLSET_PATH}" export recursion_limit=15 @@ -39,12 +39,12 @@ function build_docker_images() { echo "Building the docker images" cd $WORKPATH echo $WORKPATH - docker build --no-cache -t $agent_image --build-arg http_proxy=$http_proxy --build-arg https_proxy=$https_proxy -f comps/agent/langchain/Dockerfile . + docker build --no-cache -t $agent_image --build-arg http_proxy=$http_proxy --build-arg https_proxy=$https_proxy -f comps/agent/src/Dockerfile . if [ $? -ne 0 ]; then - echo "opea/agent-langchain built fail" + echo "opea/agent built fail" exit 1 else - echo "opea/agent-langchain built successful" + echo "opea/agent built successful" fi } @@ -164,8 +164,8 @@ function start_vllm_service_70B() { echo "Service started successfully" } -function start_react_langchain_agent_service() { - echo "Starting react_langchain agent microservice" +function start_react_agent_service() { + echo "Starting react agent microservice" docker compose -f $WORKPATH/tests/agent/react_langchain.yaml up -d sleep 120s docker logs test-comps-agent-endpoint @@ -174,7 +174,7 @@ function start_react_langchain_agent_service() { function start_react_langgraph_agent_service_openai() { - echo "Starting react_langchain agent microservice" + echo "Starting react agent microservice" docker compose -f $WORKPATH/tests/agent/react_langgraph_openai.yaml up -d sleep 120s docker logs test-comps-agent-endpoint @@ -253,7 +253,7 @@ function validate_microservice() { # "query": "What is OPEA?" # }') CONTENT=$(python3 $WORKPATH/tests/agent/test.py) - local EXIT_CODE=$(validate "$CONTENT" "OPEA" "test-agent-langchain") + local EXIT_CODE=$(validate "$CONTENT" "OPEA" "test-agent") echo "$EXIT_CODE" local EXIT_CODE="${EXIT_CODE:0-1}" echo "return value is $EXIT_CODE" @@ -270,7 +270,7 @@ function validate_microservice() { function validate_microservice_streaming() { echo "Testing agent service - chat completion API" CONTENT=$(python3 $WORKPATH/tests/agent/test.py --stream) - local EXIT_CODE=$(validate "$CONTENT" "OPEA" "test-agent-langchain") + local EXIT_CODE=$(validate "$CONTENT" "OPEA" "test-agent") echo "$EXIT_CODE" local EXIT_CODE="${EXIT_CODE:0-1}" echo "return value is $EXIT_CODE" @@ -286,8 +286,8 @@ function validate_microservice_streaming() { function validate_assistant_api() { cd $WORKPATH echo "Testing agent service - assistant api" - local CONTENT=$(python3 comps/agent/langchain/test_assistant_api.py --ip_addr ${ip_address} --ext_port 9095 --assistants_api_test --query 'What is Intel OPEA project?' 2>&1 | tee ${LOG_PATH}/test-agent-langchain-assistantsapi.log) - local EXIT_CODE=$(validate "$CONTENT" "OPEA" "test-agent-langchain-assistantsapi") + local CONTENT=$(python3 comps/agent/src/test_assistant_api.py --ip_addr ${ip_address} --ext_port 9095 --assistants_api_test --query 'What is Intel OPEA project?' 2>&1 | tee ${LOG_PATH}/test-agent-assistantsapi.log) + local EXIT_CODE=$(validate "$CONTENT" "OPEA" "test-agent-assistantsapi") echo "$EXIT_CODE" local EXIT_CODE="${EXIT_CODE:0-1}" echo "return value is $EXIT_CODE" @@ -295,7 +295,7 @@ function validate_assistant_api() { echo "==================TGI logs ======================" docker logs comps-tgi-gaudi-service echo "==================Agent logs ======================" - docker logs comps-langchain-agent-endpoint + docker logs comps-agent-endpoint exit 1 fi } @@ -357,7 +357,7 @@ function main() { build_vllm_docker_images # ==================== Tests with 70B model ==================== - # RAG agent, react_llama, react_langchain, assistant apis + # RAG agent, react_llama, react, assistant apis start_vllm_service_70B @@ -376,8 +376,8 @@ function main() { echo "=============================================" - # # # test react_langchain - start_react_langchain_agent_service + # # # test react + start_react_agent_service echo "=============Testing ReAct Langchain=============" validate_microservice_streaming validate_assistant_api diff --git a/tests/embeddings/test_embeddings_tei.sh b/tests/embeddings/test_embeddings_tei.sh index b3d2a30198..ceecaf0c37 100644 --- a/tests/embeddings/test_embeddings_tei.sh +++ b/tests/embeddings/test_embeddings_tei.sh @@ -23,11 +23,11 @@ function start_service() { tei_endpoint=5001 model="BAAI/bge-base-en-v1.5" unset http_proxy - docker run -d --name="test-comps-embedding-tei-endpoint" -p $tei_endpoint:80 -v ./data:/data --pull always ghcr.io/huggingface/text-embeddings-inference:cpu-1.5 --model-id $model + docker run -d --name="test-comps-embedding-endpoint" -p $tei_endpoint:80 -v ./data:/data --pull always ghcr.io/huggingface/text-embeddings-inference:cpu-1.5 --model-id $model sleep 3m export TEI_EMBEDDING_ENDPOINT="http://${ip_address}:${tei_endpoint}" tei_service_port=5002 - docker run -d --name="test-comps-embedding-tei-server" -e LOGFLAG=True -e http_proxy=$http_proxy -e https_proxy=$https_proxy -p ${tei_service_port}:6000 --ipc=host -e TEI_EMBEDDING_ENDPOINT=$TEI_EMBEDDING_ENDPOINT -e EMBEDDING_COMPONENT_NAME="OPEA_TEI_EMBEDDING" opea/embedding:comps + docker run -d --name="test-comps-embedding-server" -e LOGFLAG=True -e http_proxy=$http_proxy -e https_proxy=$https_proxy -p ${tei_service_port}:6000 --ipc=host -e TEI_EMBEDDING_ENDPOINT=$TEI_EMBEDDING_ENDPOINT -e EMBEDDING_COMPONENT_NAME="OPEA_TEI_EMBEDDING" opea/embedding:comps sleep 15 } @@ -42,8 +42,8 @@ function validate_service() { echo "Result correct." else echo "Result wrong. Received was $result" - docker logs test-comps-embedding-tei-endpoint - docker logs test-comps-embedding-tei-server + docker logs test-comps-embedding-endpoint + docker logs test-comps-embedding-server exit 1 fi } @@ -62,8 +62,8 @@ function validate_microservice_with_openai() { tei_service_port=5002 python3 ${WORKPATH}/tests/utils/validate_svc_with_openai.py $ip_address $tei_service_port "embedding" if [ $? -ne 0 ]; then - docker logs test-comps-embedding-tei-endpoint - docker logs test-comps-embedding-tei-server + docker logs test-comps-embedding-endpoint + docker logs test-comps-embedding-server exit 1 fi } diff --git a/tests/llms/test_llms_summarization_tgi_langchain.sh b/tests/llms/test_llms_summarization_tgi_langchain.sh index d805b7361b..ee12777657 100644 --- a/tests/llms/test_llms_summarization_tgi_langchain.sh +++ b/tests/llms/test_llms_summarization_tgi_langchain.sh @@ -12,10 +12,10 @@ function build_docker_images() { cd $WORKPATH docker build --no-cache -t opea/llm-sum-tgi:comps --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/llms/summarization/tgi/langchain/Dockerfile . if [ $? -ne 0 ]; then - echo "opea/llm-tgi built fail" + echo "opea/llm-textgen built fail" exit 1 else - echo "opea/llm-tgi built successful" + echo "opea/llm-textgen built successful" fi } diff --git a/tests/reranks/test_reranks_fastrag.sh b/tests/reranks/test_reranks_fastrag.sh index 17bc015039..325dcf72c1 100644 --- a/tests/reranks/test_reranks_fastrag.sh +++ b/tests/reranks/test_reranks_fastrag.sh @@ -8,12 +8,12 @@ WORKPATH=$(dirname "$PWD") ip_address=$(hostname -I | awk '{print $1}') function build_docker_images() { cd $WORKPATH - docker build --no-cache --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -t opea/reranking-fastrag:comps -f comps/reranks/fastrag/Dockerfile . + docker build --no-cache --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -t opea/reranking:comps -f comps/reranks/fastrag/Dockerfile . if [ $? -ne 0 ]; then - echo "opea/reranking-fastrag built fail" + echo "opea/reranking built fail" exit 1 else - echo "opea/reranking-fastrag built successful" + echo "opea/reranking built successful" fi } @@ -21,7 +21,7 @@ function start_service() { export EMBED_MODEL="Intel/bge-small-en-v1.5-rag-int8-static" fastrag_service_port=5020 unset http_proxy - docker run -d --name="test-comps-reranking-fastrag-server" -p ${fastrag_service_port}:8000 --ipc=host -e http_proxy=$http_proxy -e https_proxy=$https_proxy -e EMBED_MODEL=$EMBED_MODEL opea/reranking-fastrag:comps + docker run -d --name="test-comps-reranking-server" -p ${fastrag_service_port}:8000 --ipc=host -e http_proxy=$http_proxy -e https_proxy=$https_proxy -e EMBED_MODEL=$EMBED_MODEL opea/reranking:comps sleep 3m } @@ -35,7 +35,7 @@ function validate_microservice() { echo "Result correct." else echo "Result wrong. Received was $result" - docker logs test-comps-reranking-fastrag-server + docker logs test-comps-reranking-server exit 1 fi } diff --git a/tests/reranks/test_reranks_opea_tei.sh b/tests/reranks/test_reranks_opea_tei.sh index 65acf6830d..3d510ffb28 100644 --- a/tests/reranks/test_reranks_opea_tei.sh +++ b/tests/reranks/test_reranks_opea_tei.sh @@ -25,12 +25,12 @@ function start_service() { model=BAAI/bge-reranker-base revision=refs/pr/4 volume=$PWD/data - docker run -d --name="test-comps-reranking-tei-endpoint" -p $tei_endpoint:80 -v $volume:/data -e http_proxy=$http_proxy -e https_proxy=$https_proxy --pull always ghcr.io/huggingface/text-embeddings-inference:cpu-1.5 --model-id $model + docker run -d --name="test-comps-reranking-endpoint" -p $tei_endpoint:80 -v $volume:/data -e http_proxy=$http_proxy -e https_proxy=$https_proxy --pull always ghcr.io/huggingface/text-embeddings-inference:cpu-1.5 --model-id $model sleep 3m export TEI_RERANKING_ENDPOINT="http://${ip_address}:${tei_endpoint}" tei_service_port=5007 unset http_proxy - docker run -d --name="test-comps-reranking-tei-server" -e LOGFLAG=True -p ${tei_service_port}:8000 --ipc=host -e http_proxy=$http_proxy -e https_proxy=$https_proxy -e TEI_RERANKING_ENDPOINT=$TEI_RERANKING_ENDPOINT -e HF_TOKEN=$HF_TOKEN -e RERANK_TYPE="tei" opea/reranking:comps + docker run -d --name="test-comps-reranking-server" -e LOGFLAG=True -p ${tei_service_port}:8000 --ipc=host -e http_proxy=$http_proxy -e https_proxy=$https_proxy -e TEI_RERANKING_ENDPOINT=$TEI_RERANKING_ENDPOINT -e HF_TOKEN=$HF_TOKEN -e RERANK_TYPE="tei" opea/reranking:comps sleep 15 } @@ -45,8 +45,8 @@ function validate_microservice() { echo "Content is as expected." else echo "Content does not match the expected result: $CONTENT" - docker logs test-comps-reranking-tei-server - docker logs test-comps-reranking-tei-endpoint + docker logs test-comps-reranking-server + docker logs test-comps-reranking-endpoint exit 1 fi } diff --git a/tests/reranks/test_reranks_videoqna.sh b/tests/reranks/test_reranks_videoqna.sh index e63e13fe0c..37fca3e60e 100755 --- a/tests/reranks/test_reranks_videoqna.sh +++ b/tests/reranks/test_reranks_videoqna.sh @@ -9,11 +9,11 @@ ip_address=$(hostname -I | awk '{print $1}') function build_docker_images() { cd $WORKPATH - docker build --no-cache -t opea/reranking-videoqna:comps --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/reranks/videoqna/Dockerfile . + docker build --no-cache -t opea/reranking:comps --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/reranks/videoqna/Dockerfile . } function start_service() { - docker run -d --name "test-comps-reranking-videoqna-server" \ + docker run -d --name "test-comps-reranking-server" \ -p 5037:8000 \ --ipc=host \ -e no_proxy=${no_proxy} \ @@ -21,10 +21,10 @@ function start_service() { -e https_proxy=${https_proxy} \ -e CHUNK_DURATION=${CHUNK_DURATION} \ -e FILE_SERVER_ENDPOINT=${FILE_SERVER_ENDPOINT} \ - opea/reranking-videoqna:comps + opea/reranking:comps - until docker logs test-comps-reranking-videoqna-server 2>&1 | grep -q "Uvicorn running on"; do + until docker logs test-comps-reranking-server 2>&1 | grep -q "Uvicorn running on"; do sleep 2 done }