enable config through .env
This commit is contained in:
@@ -31,70 +31,98 @@ KH_VECTORSTORE = {
|
||||
"__type__": "kotaemon.storages.ChromaVectorStore",
|
||||
"path": str(user_cache_dir / "vectorstore"),
|
||||
}
|
||||
KH_LLMS = {
|
||||
# example for using Azure OpenAI, the config variables can set as environment
|
||||
# variables or in the .env file
|
||||
# "gpt4": {
|
||||
# "def": {
|
||||
# "__type__": "kotaemon.llms.AzureChatOpenAI",
|
||||
# "temperature": 0,
|
||||
# "azure_endpoint": config("AZURE_OPENAI_ENDPOINT", default=""),
|
||||
# "openai_api_key": config("AZURE_OPENAI_API_KEY", default=""),
|
||||
# "openai_api_version": config("OPENAI_API_VERSION", default=""),
|
||||
# "deployment_name": "<your deployment name>",
|
||||
# "stream": True,
|
||||
# },
|
||||
# "accuracy": 10,
|
||||
# "cost": 10,
|
||||
# "default": False,
|
||||
# },
|
||||
# "gpt35": {
|
||||
# "def": {
|
||||
# "__type__": "kotaemon.llms.AzureChatOpenAI",
|
||||
# "temperature": 0,
|
||||
# "azure_endpoint": config("AZURE_OPENAI_ENDPOINT", default=""),
|
||||
# "openai_api_key": config("AZURE_OPENAI_API_KEY", default=""),
|
||||
# "openai_api_version": config("OPENAI_API_VERSION", default=""),
|
||||
# "deployment_name": "<your deployment name>",
|
||||
# "request_timeout": 10,
|
||||
# "stream": False,
|
||||
# },
|
||||
# "accuracy": 5,
|
||||
# "cost": 5,
|
||||
# "default": False,
|
||||
# },
|
||||
"local": {
|
||||
KH_LLMS = {}
|
||||
KH_EMBEDDINGS = {}
|
||||
|
||||
# populate options from config
|
||||
if config("AZURE_OPENAI_API_KEY", default="") and config(
|
||||
"AZURE_OPENAI_ENDPOINT", default=""
|
||||
):
|
||||
if config("AZURE_OPENAI_CHAT_DEPLOYMENT", default=""):
|
||||
KH_LLMS["azure"] = {
|
||||
"def": {
|
||||
"__type__": "kotaemon.llms.AzureChatOpenAI",
|
||||
"temperature": 0,
|
||||
"azure_endpoint": config("AZURE_OPENAI_ENDPOINT", default=""),
|
||||
"openai_api_key": config("AZURE_OPENAI_API_KEY", default=""),
|
||||
"api_version": config("OPENAI_API_VERSION", default="")
|
||||
or "2024-02-15-preview",
|
||||
"deployment_name": config("AZURE_OPENAI_CHAT_DEPLOYMENT", default=""),
|
||||
"request_timeout": 10,
|
||||
"stream": False,
|
||||
},
|
||||
"default": False,
|
||||
"accuracy": 5,
|
||||
"cost": 5,
|
||||
}
|
||||
if config("AZURE_OPENAI_EMBEDDINGS_DEPLOYMENT", default=""):
|
||||
KH_EMBEDDINGS["azure"] = {
|
||||
"def": {
|
||||
"__type__": "kotaemon.embeddings.AzureOpenAIEmbeddings",
|
||||
"azure_endpoint": config("AZURE_OPENAI_ENDPOINT", default=""),
|
||||
"openai_api_key": config("AZURE_OPENAI_API_KEY", default=""),
|
||||
"api_version": config("OPENAI_API_VERSION", default="")
|
||||
or "2024-02-15-preview",
|
||||
"deployment": config("AZURE_OPENAI_EMBEDDINGS_DEPLOYMENT", default=""),
|
||||
"request_timeout": 10,
|
||||
"chunk_size": 16,
|
||||
},
|
||||
"default": False,
|
||||
"accuracy": 5,
|
||||
"cost": 5,
|
||||
}
|
||||
|
||||
if config("OPENAI_API_KEY", default=""):
|
||||
KH_LLMS["openai"] = {
|
||||
"def": {
|
||||
"__type__": "kotaemon.llms.ChatOpenAI",
|
||||
"temperature": 0,
|
||||
"openai_api_base": config("OPENAI_API_BASE", default="")
|
||||
or "https://api.openai.com/v1",
|
||||
"openai_api_key": config("OPENAI_API_KEY", default=""),
|
||||
"model": config("OPENAI_CHAT_MODEL", default="") or "gpt-3.5-turbo",
|
||||
"request_timeout": 10,
|
||||
"stream": False,
|
||||
},
|
||||
"default": False,
|
||||
}
|
||||
if len(KH_EMBEDDINGS) < 1:
|
||||
KH_EMBEDDINGS["openai"] = {
|
||||
"def": {
|
||||
"__type__": "kotaemon.embeddings.OpenAIEmbeddings",
|
||||
"openai_api_base": config("OPENAI_API_BASE", default="")
|
||||
or "https://api.openai.com/v1",
|
||||
"openai_api_key": config("OPENAI_API_KEY", default=""),
|
||||
"model": config(
|
||||
"OPENAI_EMBEDDINGS_MODEL", default="text-embedding-ada-002"
|
||||
)
|
||||
or "text-embedding-ada-002",
|
||||
"request_timeout": 10,
|
||||
"chunk_size": 16,
|
||||
},
|
||||
"default": False,
|
||||
}
|
||||
|
||||
if config("LOCAL_MODEL", default=""):
|
||||
KH_LLMS["local"] = {
|
||||
"def": {
|
||||
"__type__": "kotaemon.llms.EndpointChatLLM",
|
||||
"endpoint_url": "http://localhost:31415/v1/chat/completions",
|
||||
},
|
||||
"default": False,
|
||||
},
|
||||
}
|
||||
KH_EMBEDDINGS = {
|
||||
# example for using Azure OpenAI, the config variables can set as environment
|
||||
# variables or in the .env file
|
||||
# "ada": {
|
||||
# "def": {
|
||||
# "__type__": "kotaemon.embeddings.AzureOpenAIEmbeddings",
|
||||
# "model": "text-embedding-ada-002",
|
||||
# "azure_endpoint": config("AZURE_OPENAI_ENDPOINT", default=""),
|
||||
# "openai_api_key": config("AZURE_OPENAI_API_KEY", default=""),
|
||||
# "deployment": "<your deployment name>",
|
||||
# "chunk_size": 16,
|
||||
# },
|
||||
# "accuracy": 5,
|
||||
# "cost": 5,
|
||||
# "default": True,
|
||||
# },
|
||||
"local": {
|
||||
"def": {
|
||||
"__type__": "kotaemon.embeddings.EndpointEmbeddings",
|
||||
"endpoint_url": "http://localhost:31415/v1/embeddings",
|
||||
},
|
||||
"default": False,
|
||||
},
|
||||
}
|
||||
"cost": 0,
|
||||
}
|
||||
if len(KH_EMBEDDINGS) < 1:
|
||||
KH_EMBEDDINGS["local"] = {
|
||||
"def": {
|
||||
"__type__": "kotaemon.embeddings.EndpointEmbeddings",
|
||||
"endpoint_url": "http://localhost:31415/v1/embeddings",
|
||||
},
|
||||
"default": False,
|
||||
"cost": 0,
|
||||
}
|
||||
|
||||
|
||||
KH_REASONINGS = ["ktem.reasoning.simple.FullQAPipeline"]
|
||||
|
||||
|
||||
|
Reference in New Issue
Block a user