kotaemon/tests/test_llms_completion_models.py
Nguyen Trung Duc (john) d79b3744cb Simplify the BaseComponent inteface (#64)
This change remove `BaseComponent`'s:

- run_raw
- run_batch_raw
- run_document
- run_batch_document
- is_document
- is_batch

Each component is expected to support multiple types of inputs and a single type of output. Since we want the component to work out-of-the-box with both standardized and customized use cases, supporting multiple types of inputs are expected. At the same time, to reduce the complexity of understanding how to use a component, we restrict a component to only have a single output type.

To accommodate these changes, we also refactor some components to remove their run_raw, run_batch_raw... methods, and to decide the common output interface for those components.

Tests are updated accordingly.

Commit changes:

* Add kwargs to vector store's query
* Simplify the BaseComponent
* Update tests
* Remove support for Python 3.8 and 3.9
* Bump version 0.3.0
* Fix github PR caching still use old environment after bumping version

---------

Co-authored-by: ian <ian@cinnamon.is>
2023-11-13 15:10:18 +07:00

74 lines
2.2 KiB
Python

from unittest.mock import patch
from langchain.llms import AzureOpenAI as AzureOpenAILC
from langchain.llms import OpenAI as OpenAILC
from openai.types.completion import Completion
from kotaemon.llms.base import LLMInterface
from kotaemon.llms.completions.openai import AzureOpenAI, OpenAI
_openai_completion_response = Completion.parse_obj(
{
"id": "cmpl-7qyNoIo6gRSCJR0hi8o3ZKBH4RkJ0",
"object": "text_completion",
"created": 1392751226,
"model": "gpt-35-turbo",
"system_fingerprint": None,
"choices": [
{
"text": "completion",
"index": 0,
"finish_reason": "length",
"logprobs": None,
}
],
"usage": {"completion_tokens": 20, "prompt_tokens": 2, "total_tokens": 22},
}
)
@patch(
"openai.resources.completions.Completions.create",
side_effect=lambda *args, **kwargs: _openai_completion_response,
)
def test_azureopenai_model(openai_completion):
model = AzureOpenAI(
openai_api_base="https://test.openai.azure.com/",
openai_api_key="some-key",
openai_api_version="2023-03-15-preview",
deployment_name="gpt35turbo",
temperature=0,
request_timeout=60,
)
assert isinstance(
model.agent, AzureOpenAILC
), "Agent not wrapped in Langchain's AzureOpenAI"
output = model("hello world")
assert isinstance(
output, LLMInterface
), "Output for single text is not LLMInterface"
@patch(
"openai.resources.completions.Completions.create",
side_effect=lambda *args, **kwargs: _openai_completion_response,
)
def test_openai_model(openai_completion):
model = OpenAI(
openai_api_base="https://test.openai.azure.com/",
openai_api_key="some-key",
openai_api_version="2023-03-15-preview",
deployment_name="gpt35turbo",
temperature=0,
request_timeout=60,
)
assert isinstance(
model.agent, OpenAILC
), "Agent is not wrapped in Langchain's OpenAI"
output = model("hello world")
assert isinstance(
output, LLMInterface
), "Output for single text is not LLMInterface"