diff --git a/python/morpheus_llm/morpheus_llm/error.py b/python/morpheus_llm/morpheus_llm/error.py new file mode 100644 index 0000000000..2505d987dd --- /dev/null +++ b/python/morpheus_llm/morpheus_llm/error.py @@ -0,0 +1,18 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +IMPORT_ERROR_MESSAGE = ( + "{package} not found. Install it and other additional dependencies by running the following command:\n" + "`conda env update --solver=libmamba -n morpheus " + "--file conda/environments/examples_cuda-121_arch-x86_64.yaml`") diff --git a/python/morpheus_llm/morpheus_llm/llm/nodes/langchain_agent_node.py b/python/morpheus_llm/morpheus_llm/llm/nodes/langchain_agent_node.py index e63b1d351c..0e96c600fd 100644 --- a/python/morpheus_llm/morpheus_llm/llm/nodes/langchain_agent_node.py +++ b/python/morpheus_llm/morpheus_llm/llm/nodes/langchain_agent_node.py @@ -16,13 +16,19 @@ import logging import typing -from langchain_core.exceptions import OutputParserException - +from morpheus_llm.error import IMPORT_ERROR_MESSAGE from morpheus_llm.llm import LLMContext from morpheus_llm.llm import LLMNodeBase logger = logging.getLogger(__name__) +IMPORT_EXCEPTION = None + +try: + from langchain_core.exceptions import OutputParserException +except ImportError as import_exc: + IMPORT_EXCEPTION = import_exc + if typing.TYPE_CHECKING: from langchain.agents import AgentExecutor @@ -47,6 +53,9 @@ def __init__(self, agent_executor: "AgentExecutor", replace_exceptions: bool = False, replace_exceptions_value: typing.Optional[str] = None): + if IMPORT_EXCEPTION is not None: + raise ImportError(IMPORT_ERROR_MESSAGE.format('langchain_core')) from IMPORT_EXCEPTION + super().__init__() self._agent_executor = agent_executor diff --git a/python/morpheus_llm/morpheus_llm/llm/services/nemo_llm_service.py b/python/morpheus_llm/morpheus_llm/llm/services/nemo_llm_service.py index ef80814929..30cda8e02c 100644 --- a/python/morpheus_llm/morpheus_llm/llm/services/nemo_llm_service.py +++ b/python/morpheus_llm/morpheus_llm/llm/services/nemo_llm_service.py @@ -18,16 +18,13 @@ import warnings from morpheus.utils.env_config_value import EnvConfigValue +from morpheus_llm.error import IMPORT_ERROR_MESSAGE from morpheus_llm.llm.services.llm_service import LLMClient from morpheus_llm.llm.services.llm_service import LLMService logger = logging.getLogger(__name__) IMPORT_EXCEPTION = None -IMPORT_ERROR_MESSAGE = ( - "NemoLLM not found. Install it and other additional dependencies by running the following command:\n" - "`conda env update --solver=libmamba -n morpheus " - "--file conda/environments/examples_cuda-121_arch-x86_64.yaml --prune`") try: import nemollm @@ -53,7 +50,7 @@ class NeMoLLMClient(LLMClient): def __init__(self, parent: "NeMoLLMService", *, model_name: str, **model_kwargs) -> None: if IMPORT_EXCEPTION is not None: - raise ImportError(IMPORT_ERROR_MESSAGE) from IMPORT_EXCEPTION + raise ImportError(IMPORT_ERROR_MESSAGE.format(package='nemollm')) from IMPORT_EXCEPTION super().__init__() @@ -231,7 +228,7 @@ def __init__(self, """ if IMPORT_EXCEPTION is not None: - raise ImportError(IMPORT_ERROR_MESSAGE) from IMPORT_EXCEPTION + raise ImportError(IMPORT_ERROR_MESSAGE.format(package='nemollm')) from IMPORT_EXCEPTION super().__init__() diff --git a/python/morpheus_llm/morpheus_llm/llm/services/nvfoundation_llm_service.py b/python/morpheus_llm/morpheus_llm/llm/services/nvfoundation_llm_service.py index d1e706b7c2..709f394712 100644 --- a/python/morpheus_llm/morpheus_llm/llm/services/nvfoundation_llm_service.py +++ b/python/morpheus_llm/morpheus_llm/llm/services/nvfoundation_llm_service.py @@ -16,17 +16,13 @@ import typing from morpheus.utils.env_config_value import EnvConfigValue +from morpheus_llm.error import IMPORT_ERROR_MESSAGE from morpheus_llm.llm.services.llm_service import LLMClient from morpheus_llm.llm.services.llm_service import LLMService logger = logging.getLogger(__name__) IMPORT_EXCEPTION = None -IMPORT_ERROR_MESSAGE = ( - "The `langchain-nvidia-ai-endpoints` package was not found. Install it and other additional dependencies by " - "running the following command:" - "`conda env update --solver=libmamba -n morpheus " - "--file conda/environments/examples_cuda-121_arch-x86_64.yaml`") try: from langchain_core.prompt_values import StringPromptValue @@ -52,7 +48,8 @@ class NVFoundationLLMClient(LLMClient): def __init__(self, parent: "NVFoundationLLMService", *, model_name: str, **model_kwargs) -> None: if IMPORT_EXCEPTION is not None: - raise ImportError(IMPORT_ERROR_MESSAGE) from IMPORT_EXCEPTION + raise ImportError( + IMPORT_ERROR_MESSAGE.format(package='langchain-nvidia-ai-endpoints')) from IMPORT_EXCEPTION super().__init__() @@ -218,7 +215,8 @@ class BaseURL(EnvConfigValue): def __init__(self, *, api_key: APIKey | str = None, base_url: BaseURL | str = None, **model_kwargs) -> None: if IMPORT_EXCEPTION is not None: - raise ImportError(IMPORT_ERROR_MESSAGE) from IMPORT_EXCEPTION + raise ImportError( + IMPORT_ERROR_MESSAGE.format(package='langchain-nvidia-ai-endpoints')) from IMPORT_EXCEPTION super().__init__() diff --git a/python/morpheus_llm/morpheus_llm/llm/services/openai_chat_service.py b/python/morpheus_llm/morpheus_llm/llm/services/openai_chat_service.py index d4eaac4503..2df6048d5a 100644 --- a/python/morpheus_llm/morpheus_llm/llm/services/openai_chat_service.py +++ b/python/morpheus_llm/morpheus_llm/llm/services/openai_chat_service.py @@ -23,16 +23,13 @@ import appdirs from morpheus.utils.env_config_value import EnvConfigValue +from morpheus_llm.error import IMPORT_ERROR_MESSAGE from morpheus_llm.llm.services.llm_service import LLMClient from morpheus_llm.llm.services.llm_service import LLMService logger = logging.getLogger(__name__) IMPORT_EXCEPTION = None -IMPORT_ERROR_MESSAGE = ("OpenAIChatService & OpenAIChatClient require the openai package to be installed. " - "Install it by running the following command:\n" - "`conda env update --solver=libmamba -n morpheus " - "--file conda/environments/examples_cuda-121_arch-x86_64.yaml --prune`") try: import openai @@ -107,7 +104,7 @@ def __init__(self, json=False, **model_kwargs) -> None: if IMPORT_EXCEPTION is not None: - raise ImportError(IMPORT_ERROR_MESSAGE) from IMPORT_EXCEPTION + raise ImportError(IMPORT_ERROR_MESSAGE.format(package='openai')) from IMPORT_EXCEPTION super().__init__() @@ -400,7 +397,7 @@ def __init__(self, default_model_kwargs: dict = None) -> None: if IMPORT_EXCEPTION is not None: - raise ImportError(IMPORT_ERROR_MESSAGE) from IMPORT_EXCEPTION + raise ImportError(IMPORT_ERROR_MESSAGE.format(package='openai')) from IMPORT_EXCEPTION super().__init__() diff --git a/python/morpheus_llm/morpheus_llm/service/vdb/faiss_vdb_service.py b/python/morpheus_llm/morpheus_llm/service/vdb/faiss_vdb_service.py index 82e6c146d2..0197f3071d 100644 --- a/python/morpheus_llm/morpheus_llm/service/vdb/faiss_vdb_service.py +++ b/python/morpheus_llm/morpheus_llm/service/vdb/faiss_vdb_service.py @@ -21,13 +21,13 @@ import cudf +from morpheus_llm.error import IMPORT_ERROR_MESSAGE from morpheus_llm.service.vdb.vector_db_service import VectorDBResourceService from morpheus_llm.service.vdb.vector_db_service import VectorDBService logger = logging.getLogger(__name__) IMPORT_EXCEPTION = None -IMPORT_ERROR_MESSAGE = "FaissDBResourceService requires the FAISS library to be installed." try: from langchain.embeddings.base import Embeddings @@ -50,7 +50,7 @@ class FaissVectorDBResourceService(VectorDBResourceService): def __init__(self, parent: "FaissVectorDBService", *, name: str) -> None: if IMPORT_EXCEPTION is not None: - raise ImportError(IMPORT_ERROR_MESSAGE) from IMPORT_EXCEPTION + raise ImportError(IMPORT_ERROR_MESSAGE.format(package='langchain and faiss-gpu')) from IMPORT_EXCEPTION super().__init__() @@ -285,7 +285,7 @@ class FaissVectorDBService(VectorDBService): def __init__(self, local_dir: str, embeddings: "Embeddings"): if IMPORT_EXCEPTION is not None: - raise ImportError(IMPORT_ERROR_MESSAGE) from IMPORT_EXCEPTION + raise ImportError(IMPORT_ERROR_MESSAGE.format(package='langchain and faiss-gpu')) from IMPORT_EXCEPTION self._local_dir = local_dir self._embeddings = embeddings diff --git a/python/morpheus_llm/morpheus_llm/service/vdb/milvus_vector_db_service.py b/python/morpheus_llm/morpheus_llm/service/vdb/milvus_vector_db_service.py index f43fbbf79c..71df614b23 100644 --- a/python/morpheus_llm/morpheus_llm/service/vdb/milvus_vector_db_service.py +++ b/python/morpheus_llm/morpheus_llm/service/vdb/milvus_vector_db_service.py @@ -25,13 +25,13 @@ from morpheus.io.utils import cudf_string_cols_exceed_max_bytes from morpheus.io.utils import truncate_string_cols_by_bytes from morpheus.utils.type_aliases import DataFrameType +from morpheus_llm.error import IMPORT_ERROR_MESSAGE from morpheus_llm.service.vdb.vector_db_service import VectorDBResourceService from morpheus_llm.service.vdb.vector_db_service import VectorDBService logger = logging.getLogger(__name__) IMPORT_EXCEPTION = None -IMPORT_ERROR_MESSAGE = "MilvusVectorDBResourceService requires the milvus and pymilvus packages to be installed." # Milvus has a max string length in bytes of 65,535. Multi-byte characters like "ñ" will have a string length of 1, the # byte length encoded as UTF-8 will be 2 @@ -234,7 +234,7 @@ class MilvusVectorDBResourceService(VectorDBResourceService): def __init__(self, name: str, client: "MilvusClient", truncate_long_strings: bool = False) -> None: if IMPORT_EXCEPTION is not None: - raise ImportError(IMPORT_ERROR_MESSAGE) from IMPORT_EXCEPTION + raise ImportError(IMPORT_ERROR_MESSAGE.format(package='pymilvus')) from IMPORT_EXCEPTION super().__init__() diff --git a/tests/benchmarks/conftest.py b/tests/benchmarks/conftest.py index 1e21affaa8..607febf434 100644 --- a/tests/benchmarks/conftest.py +++ b/tests/benchmarks/conftest.py @@ -20,8 +20,13 @@ from unittest import mock import pytest -from pynvml.smi import NVSMI_QUERY_GPU -from pynvml.smi import nvidia_smi + +try: + from pynvml.smi import NVSMI_QUERY_GPU + from pynvml.smi import nvidia_smi +except ImportError: + print("pynvml is not installed") + from test_bench_e2e_pipelines import E2E_TEST_CONFIGS diff --git a/tests/benchmarks/test_bench_agents_simple_pipeline.py b/tests/benchmarks/test_bench_agents_simple_pipeline.py index 85202fdc02..cbd83e3cae 100644 --- a/tests/benchmarks/test_bench_agents_simple_pipeline.py +++ b/tests/benchmarks/test_bench_agents_simple_pipeline.py @@ -19,13 +19,17 @@ import typing from unittest import mock -import langchain import pytest -from langchain.agents import AgentType -from langchain.agents import initialize_agent -from langchain.agents import load_tools -from langchain.agents.tools import Tool -from langchain.utilities import serpapi + +try: + import langchain + from langchain.agents import AgentType + from langchain.agents import initialize_agent + from langchain.agents import load_tools + from langchain.agents.tools import Tool + from langchain.utilities import serpapi +except ImportError: + print("langchain is not installed") import cudf diff --git a/tests/conftest.py b/tests/conftest.py index 952142f249..84f894f707 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -47,6 +47,10 @@ from _utils.kafka import kafka_server # noqa: F401 pylint:disable=unused-import from _utils.kafka import zookeeper_proc # noqa: F401 pylint:disable=unused-import +OPT_DEP_SKIP_REASON = ( + "This test requires the {package} package to be installed, to install this run:\n" + "`conda env update --solver=libmamba -n morpheus --file conda/environments/examples_cuda-121_arch-x86_64.yaml`") + def pytest_addoption(parser: pytest.Parser): """ @@ -1064,33 +1068,53 @@ def nemollm_fixture(fail_missing: bool): """ Fixture to ensure nemollm is installed """ - skip_reason = ("Tests for the NeMoLLMService require the nemollm package to be installed, to install this run:\n" - "`conda env update --solver=libmamba -n morpheus " - "--file conda/environments/all_cuda-121_arch-x86_64.yaml --prune`") - yield import_or_skip("nemollm", reason=skip_reason, fail_missing=fail_missing) + yield import_or_skip("nemollm", reason=OPT_DEP_SKIP_REASON.format(package="nemollm"), fail_missing=fail_missing) -@pytest.fixture(name="nvfoundationllm", scope='session') -def nvfoundationllm_fixture(fail_missing: bool): +@pytest.fixture(name="openai", scope='session') +def openai_fixture(fail_missing: bool): """ - Fixture to ensure nvfoundationllm is installed + Fixture to ensure openai is installed """ - skip_reason = ( - "Tests for NVFoundation require the langchain-nvidia-ai-endpoints package to be installed, to install this " - "run:\n `conda env update --solver=libmamba -n morpheus " - "--file conda/environments/all_cuda-121_arch-x86_64.yaml --prune`") - yield import_or_skip("langchain_nvidia_ai_endpoints", reason=skip_reason, fail_missing=fail_missing) + yield import_or_skip("openai", reason=OPT_DEP_SKIP_REASON.format(package="openai"), fail_missing=fail_missing) -@pytest.fixture(name="openai", scope='session') -def openai_fixture(fail_missing: bool): +@pytest.fixture(name="langchain", scope='session') +def langchain_fixture(fail_missing: bool): """ - Fixture to ensure openai is installed + Fixture to ensure langchain is installed + """ + yield import_or_skip("langchain", reason=OPT_DEP_SKIP_REASON.format(package="langchain"), fail_missing=fail_missing) + + +@pytest.fixture(name="langchain_core", scope='session') +def langchain_core_fixture(fail_missing: bool): + """ + Fixture to ensure langchain_core is installed + """ + yield import_or_skip("langchain_core", + reason=OPT_DEP_SKIP_REASON.format(package="langchain_core"), + fail_missing=fail_missing) + + +@pytest.fixture(name="langchain_community", scope='session') +def langchain_community_fixture(fail_missing: bool): + """ + Fixture to ensure langchain_community is installed + """ + yield import_or_skip("langchain_community", + reason=OPT_DEP_SKIP_REASON.format(package="langchain_community"), + fail_missing=fail_missing) + + +@pytest.fixture(name="langchain_nvidia_ai_endpoints", scope='session') +def langchain_nvidia_ai_endpoints_fixture(fail_missing: bool): + """ + Fixture to ensure langchain_nvidia_ai_endpoints is installed """ - skip_reason = ("Tests for the OpenAIChatService require the openai package to be installed, to install this run:\n" - "`conda env update --solver=libmamba -n morpheus " - "--file conda/environments/all_cuda-121_arch-x86_64.yaml --prune`") - yield import_or_skip("openai", reason=skip_reason, fail_missing=fail_missing) + yield import_or_skip("langchain_nvidia_ai_endpoints", + reason=OPT_DEP_SKIP_REASON.format(package="langchain_nvidia_ai_endpoints"), + fail_missing=fail_missing) @pytest.mark.usefixtures("openai") diff --git a/tests/llm/conftest.py b/tests/llm/conftest.py index 3519166635..94658863c5 100644 --- a/tests/llm/conftest.py +++ b/tests/llm/conftest.py @@ -13,6 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import types from unittest import mock import pytest @@ -20,6 +21,54 @@ from _utils import require_env_variable +@pytest.fixture(name="nemollm", scope='session', autouse=True) +def nemollm_fixture(nemollm: types.ModuleType): + """ + Fixture to ensure nemollm is installed + """ + yield nemollm + + +@pytest.fixture(name="openai", scope='session', autouse=True) +def openai_fixture(openai: types.ModuleType): + """ + Fixture to ensure openai is installed + """ + yield openai + + +@pytest.fixture(name="langchain", scope='session', autouse=True) +def langchain_fixture(langchain: types.ModuleType): + """ + Fixture to ensure langchain is installed + """ + yield langchain + + +@pytest.fixture(name="langchain_core", scope='session', autouse=True) +def langchain_core_fixture(langchain_core: types.ModuleType): + """ + Fixture to ensure langchain_core is installed + """ + yield langchain_core + + +@pytest.fixture(name="langchain_community", scope='session', autouse=True) +def langchain_community_fixture(langchain_community: types.ModuleType): + """ + Fixture to ensure langchain_community is installed + """ + yield langchain_community + + +@pytest.fixture(name="langchain_nvidia_ai_endpoints", scope='session', autouse=True) +def langchain_nvidia_ai_endpoints_fixture(langchain_nvidia_ai_endpoints: types.ModuleType): + """ + Fixture to ensure langchain_nvidia_ai_endpoints is installed + """ + yield langchain_nvidia_ai_endpoints + + @pytest.fixture(name="countries") def countries_fixture(): yield [ diff --git a/tests/llm/nodes/test_langchain_agent_node.py b/tests/llm/nodes/test_langchain_agent_node.py index 033e978402..0779b11604 100644 --- a/tests/llm/nodes/test_langchain_agent_node.py +++ b/tests/llm/nodes/test_langchain_agent_node.py @@ -19,14 +19,6 @@ from unittest import mock import pytest -from langchain.agents import AgentType -from langchain.agents import Tool -from langchain.agents import initialize_agent -from langchain.callbacks.manager import AsyncCallbackManagerForToolRun -from langchain.callbacks.manager import CallbackManagerForToolRun -from langchain_community.chat_models.openai import ChatOpenAI -from langchain_core.exceptions import OutputParserException -from langchain_core.tools import BaseTool from _utils.llm import execute_node from _utils.llm import mk_mock_langchain_tool @@ -34,6 +26,26 @@ from morpheus_llm.llm import LLMNodeBase from morpheus_llm.llm.nodes.langchain_agent_node import LangChainAgentNode +try: + from langchain.agents import AgentType + from langchain.agents import Tool + from langchain.agents import initialize_agent + from langchain.callbacks.manager import AsyncCallbackManagerForToolRun + from langchain.callbacks.manager import CallbackManagerForToolRun + from langchain_community.chat_models.openai import ChatOpenAI + from langchain_core.tools import BaseTool +except ImportError: + pass + + +class OutputParserExceptionStandin(Exception): + """ + Stand-in for the OutputParserException class to avoid importing the actual class from the langchain_core.exceptions. + There is a need to have OutputParserException objects appear in test parameters, but we don't want to import + langchain_core at the top of the test as it is an optional dependency. + """ + pass + def test_constructor(mock_agent_executor: mock.MagicMock): node = LangChainAgentNode(agent_executor=mock_agent_executor) @@ -156,32 +168,6 @@ def test_execute_error(mock_chat_completion: tuple[mock.MagicMock, mock.MagicMoc assert isinstance(execute_node(node, input="input1"), RuntimeError) -class MetadataSaverTool(BaseTool): - # The base class defines *args and **kwargs in the signature for _run and _arun requiring the arguments-differ - # pylint: disable=arguments-differ - name: str = "MetadataSaverTool" - description: str = "useful for when you need to know the name of a reptile" - - saved_metadata: list[dict] = [] - - def _run( - self, - query: str, - run_manager: typing.Optional[CallbackManagerForToolRun] = None, - ) -> str: - raise NotImplementedError("This tool only supports async") - - async def _arun( - self, - query: str, - run_manager: typing.Optional[AsyncCallbackManagerForToolRun] = None, - ) -> str: - assert query is not None # avoiding unused-argument - assert run_manager is not None - self.saved_metadata.append(run_manager.metadata.copy()) - return "frog" - - @pytest.mark.parametrize("metadata", [{ "morpheus": "unittest" @@ -192,6 +178,32 @@ async def _arun( }], ids=["single-metadata", "single-metadata-list", "multiple-metadata-list"]) def test_metadata(mock_chat_completion: tuple[mock.MagicMock, mock.MagicMock], metadata: dict): + + class MetadataSaverTool(BaseTool): + # The base class defines *args and **kwargs in the signature for _run and _arun requiring the arguments-differ + # pylint: disable=arguments-differ + name: str = "MetadataSaverTool" + description: str = "useful for when you need to know the name of a reptile" + + saved_metadata: list[dict] = [] + + def _run( + self, + query: str, + run_manager: typing.Optional[CallbackManagerForToolRun] = None, + ) -> str: + raise NotImplementedError("This tool only supports async") + + async def _arun( + self, + query: str, + run_manager: typing.Optional[AsyncCallbackManagerForToolRun] = None, + ) -> str: + assert query is not None # avoiding unused-argument + assert run_manager is not None + self.saved_metadata.append(run_manager.metadata.copy()) + return "frog" + if isinstance(metadata['morpheus'], list): num_meta = len(metadata['morpheus']) input_data = [f"input_{i}" for i in range(num_meta)] @@ -271,7 +283,7 @@ def mock_llm_chat(*_, messages, **__): "arun_return,replace_value,expected_output", [ ( - [[OutputParserException("Parsing Error"), "A valid result."]], + [[OutputParserExceptionStandin("Parsing Error"), "A valid result."]], "Default error message.", [["Default error message.", "A valid result."]], ), @@ -282,7 +294,7 @@ def mock_llm_chat(*_, messages, **__): ), ( [ - ["A valid result.", OutputParserException("Parsing Error")], + ["A valid result.", OutputParserExceptionStandin("Parsing Error")], [Exception("General error"), "Another valid result."], ], None, @@ -297,6 +309,22 @@ def test_execute_replaces_exceptions( replace_value: str, expected_output: list, ): + # We couldn't import OutputParserException at the module level, so we need to replace instances of + # OutputParserExceptionStandin with OutputParserException + from langchain_core.exceptions import OutputParserException + + arun_return_tmp = [] + for values in arun_return: + values_tmp = [] + for value in values: + if isinstance(value, OutputParserExceptionStandin): + values_tmp.append(OutputParserException(*value.args)) + else: + values_tmp.append(value) + arun_return_tmp.append(values_tmp) + + arun_return = arun_return_tmp + placeholder_input_values = {"foo": "bar"} # a non-empty placeholder input for the context mock_agent_executor.arun.return_value = arun_return diff --git a/tests/llm/services/conftest.py b/tests/llm/services/conftest.py index a802c6ec84..88f30e76ba 100644 --- a/tests/llm/services/conftest.py +++ b/tests/llm/services/conftest.py @@ -36,12 +36,12 @@ def openai_fixture(openai): yield openai -@pytest.fixture(name="nvfoundationllm", autouse=True, scope='session') -def nvfoundationllm_fixture(nvfoundationllm): +@pytest.fixture(name="langchain_nvidia_ai_endpoints", autouse=True, scope='session') +def langchain_nvidia_ai_endpoints_fixture(langchain_nvidia_ai_endpoints): """ - All of the tests in this subdir require nvfoundationllm + All of the tests in this subdir require langchain_nvidia_ai_endpoints """ - yield nvfoundationllm + yield langchain_nvidia_ai_endpoints @pytest.fixture(name="mock_chat_completion", autouse=True) diff --git a/tests/llm/services/test_nvfoundation_llm_service.py b/tests/llm/services/test_nvfoundation_llm_service.py index f139ddacde..35a6a66f2b 100644 --- a/tests/llm/services/test_nvfoundation_llm_service.py +++ b/tests/llm/services/test_nvfoundation_llm_service.py @@ -17,13 +17,17 @@ from unittest import mock import pytest -from langchain_core.messages import ChatMessage -from langchain_core.outputs import ChatGeneration -from langchain_core.outputs import LLMResult from morpheus_llm.llm.services.nvfoundation_llm_service import NVFoundationLLMClient from morpheus_llm.llm.services.nvfoundation_llm_service import NVFoundationLLMService +try: + from langchain_core.messages import ChatMessage + from langchain_core.outputs import ChatGeneration + from langchain_core.outputs import LLMResult +except ImportError: + pass + @pytest.fixture(name="set_default_nvidia_api_key", autouse=True, scope="function") def set_default_nvidia_api_key_fixture(): @@ -34,7 +38,7 @@ def set_default_nvidia_api_key_fixture(): @pytest.mark.parametrize("api_key", ["nvapi-12345", None]) @pytest.mark.parametrize("base_url", ["http://test.nvidia.com/v1", None]) -def test_constructor(api_key: str, base_url: bool): +def test_constructor(api_key: str | None, base_url: bool | None): service = NVFoundationLLMService(api_key=api_key, base_url=base_url) diff --git a/tests/llm/test_agents_simple_pipe.py b/tests/llm/test_agents_simple_pipe.py index 61fa7f8d84..5d33dacb03 100644 --- a/tests/llm/test_agents_simple_pipe.py +++ b/tests/llm/test_agents_simple_pipe.py @@ -18,12 +18,6 @@ from unittest import mock import pytest -from langchain.agents import AgentType -from langchain.agents import initialize_agent -from langchain.agents import load_tools -from langchain.agents.tools import Tool -from langchain_community.llms import OpenAI # pylint: disable=no-name-in-module -from langchain_community.utilities import serpapi import cudf @@ -41,6 +35,18 @@ from morpheus_llm.llm.task_handlers.simple_task_handler import SimpleTaskHandler from morpheus_llm.stages.llm.llm_engine_stage import LLMEngineStage +try: + from langchain.agents import AgentType + from langchain.agents import initialize_agent + from langchain.agents import load_tools + from langchain.agents.tools import Tool + from langchain.schema import Generation + from langchain.schema import LLMResult + from langchain_community.llms import OpenAI # pylint: disable=no-name-in-module + from langchain_community.utilities import serpapi +except ImportError: + pass + @pytest.fixture(name="questions") def questions_fixture(): @@ -48,7 +54,6 @@ def questions_fixture(): def _build_agent_executor(model_name: str): - llm = OpenAI(model=model_name, temperature=0, cache=False) # Explicitly construct the serpapi tool, loading it via load_tools makes it too difficult to mock @@ -132,9 +137,6 @@ def test_agents_simple_pipe(mock_openai_agenerate: mock.AsyncMock, questions: list[str]): os.environ.update({'OPENAI_API_KEY': 'test_api_key', 'SERPAPI_API_KEY': 'test_api_key'}) - from langchain.schema import Generation - from langchain.schema import LLMResult - assert serpapi.SerpAPIWrapper().aresults is mock_serpapi_aresults model_name = "test_model" diff --git a/tests/utils/test_shared_process_pool.py b/tests/utils/test_shared_process_pool.py index 788dfab1da..0e5e5a0b82 100644 --- a/tests/utils/test_shared_process_pool.py +++ b/tests/utils/test_shared_process_pool.py @@ -93,6 +93,7 @@ def test_singleton(): assert pool_1 is pool_2 +@pytest.mark.slow def test_pool_status(shared_process_pool): pool = shared_process_pool @@ -125,6 +126,7 @@ def test_pool_status(shared_process_pool): assert not pool._task_queues +@pytest.mark.slow @pytest.mark.parametrize( "a, b, expected", [ @@ -157,6 +159,7 @@ def test_submit_single_task(shared_process_pool, a, b, expected): pool.submit_task("test_stage", _add_task, 10, 20) +@pytest.mark.slow def test_submit_task_with_invalid_stage(shared_process_pool): pool = shared_process_pool @@ -165,6 +168,7 @@ def test_submit_task_with_invalid_stage(shared_process_pool): pool.submit_task("stage_does_not_exist", _add_task, 10, 20) +@pytest.mark.slow def test_submit_task_raises_exception(shared_process_pool): pool = shared_process_pool @@ -175,6 +179,7 @@ def test_submit_task_raises_exception(shared_process_pool): task.result() +@pytest.mark.slow def test_submit_task_with_unserializable_result(shared_process_pool): pool = shared_process_pool @@ -185,6 +190,7 @@ def test_submit_task_with_unserializable_result(shared_process_pool): task.result() +@pytest.mark.slow def test_submit_task_with_unserializable_arg(shared_process_pool): pool = shared_process_pool @@ -195,6 +201,7 @@ def test_submit_task_with_unserializable_arg(shared_process_pool): pool.submit_task("test_stage", _arbitrary_function, threading.Lock()) +@pytest.mark.slow @pytest.mark.parametrize( "a, b, expected", [ @@ -220,6 +227,7 @@ def test_submit_multiple_tasks(shared_process_pool, a, b, expected): assert future.result() == expected +@pytest.mark.slow def test_set_usage(shared_process_pool): pool = shared_process_pool @@ -256,6 +264,7 @@ def test_set_usage(shared_process_pool): assert pool._total_usage == 0.9 +@pytest.mark.slow def test_task_completion_with_early_stop(shared_process_pool): pool = shared_process_pool