llm-qa/llm-qa/llm_qa/dependencies.py

42 lines
1.1 KiB
Python

from typing import Annotated
from fastapi import Depends
from langchain.callbacks import StreamingStdOutCallbackHandler
from langchain.callbacks.manager import CallbackManager
from langchain.chat_models.base import BaseChatModel
from langchain.chat_models.ollama import ChatOllama
from llm_qa.models.tei import TeiConfig
from llm_qa.settings import Settings
def settings() -> Settings:
return Settings()
def tei_config(settings: Annotated[Settings, Depends(settings)]) -> TeiConfig:
return TeiConfig(
base_url=settings.tei_base_url,
document_prefix=settings.tei_document_prefix,
query_prefix=settings.tei_query_prefix,
)
def tei_rerank_config(
settings: Annotated[Settings, Depends(settings)],
) -> TeiConfig:
return TeiConfig(
base_url=settings.tei_rerank_base_url,
)
def chat_model(
settings: Annotated[Settings, Depends(settings)],
) -> BaseChatModel:
return ChatOllama(
base_url=settings.ollama_base_url,
model=settings.ollama_model_name,
verbose=True,
callback_manager=CallbackManager([StreamingStdOutCallbackHandler()]),
)