Merge branch 'mrh-online-dev' of github.com:BRUNIX-AI/assistance-engine into mrh-online-dev

This commit is contained in:
pseco 2026-03-03 15:07:58 +01:00
commit 89316a9f6b
10 changed files with 1465 additions and 1020 deletions

View File

@ -1,81 +0,0 @@
from pathlib import Path
from pydantic_settings import BaseSettings, SettingsConfigDict
from dotenv import load_dotenv
from datetime import timedelta
import warnings
import os
load_dotenv()
MODEL_NAME=os.getenv("MODEL_NAME", "gpt-3.5-turbo")
OPENAI_API_KEY=os.getenv("OPENAI_API_KEY", "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")
OLLAMA_URL=os.getenv("OLLAMA_URL", "http://host.docker.internal:11434")
OLLAMA_LOCAL_URL=os.getenv("OLLAMA_LOCAL_URL", "http://localhost:11434")
OLLAMA_MODEL_NAME=os.getenv("OLLAMA_MODEL_NAME", "qwen3-0.6B:latest")
OLLAMA_EMB_MODEL_NAME=os.getenv("OLLAMA_EMB_MODEL_NAME", "qwen3-0.6B-emb:latest")
LANGFUSE_HOST=os.getenv("LANGFUSE_HOST", "http://45.77.119.180")
LANGFUSE_PUBLIC_KEY=os.getenv("LANGFUSE_PUBLIC_KEY", "pk-lf-0e6db694-3e95-4dd4-aedf-5a2694267058")
LANGFUSE_SECRET_KEY=os.getenv("LANGFUSE_SECRET_KEY", "sk-lf-dbf28bb9-15bb-4d03-a8c3-05caa3e3905f")
ELASTICSEARCH_URL=os.getenv("ELASTICSEARCH_URL", "http://host.docker.internal:9200")
ELASTICSEARCH_LOCAL_URL=os.getenv("ELASTICSEARCH_LOCAL_URL", "http://localhost:9200")
ELASTICSEARCH_INDEX=os.getenv("ELASTICSEARCH_INDEX", "avap-docs-test")
DATABASE_URL=os.getenv("DATABASE_URL", "postgresql://postgres:brunix_pass@host.docker.internal:5432/postgres")
KUBECONFIG_PATH=os.getenv("KUBECONFIG_PATH", "kubernetes/kubeconfig.yaml")
HF_TOKEN=os.getenv("HF_TOKEN", "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")
HF_EMB_MODEL_NAME=os.getenv("HF_EMB_MODEL_NAME", "Qwen/Qwen3-Embedding-0.6B")
MRH_AVAP_DATA_PATH_=os.getenv("MRH_AVAP_DATA_PATH_", "/home/pseco/VsCodeProjects/assistance-engine/data/")
MRH_AVAP_MODELS_PATH_=os.getenv("MRH_AVAP_MODELS_PATH_", "/home/pseco/VsCodeProjects/assistance-engine/data/models")
MRH_AVAP_RAW_PATH_=os.getenv("MRH_AVAP_RAW_PATH_", "/home/pseco/VsCodeProjects/assistance-engine/data/raw")
MRH_AVAP_PROCESSED_PATH_=os.getenv("MRH_AVAP_PROCESSED_PATH_", "/home/pseco/VsCodeProjects/assistance-engine/data/processed")
MRH_AVAP_INTERIM_PATH_=os.getenv("MRH_AVAP_INTERIM_PATH_", "/home/pseco/VsCodeProjects/assistance-engine/data/interim")
MRH_AVAP_EXTERNAL_PATH_=os.getenv("MRH_AVAP_EXTERNAL_PATH_", "/home/pseco/VsCodeProjects/assistance-engine/data/external")
class Settings(BaseSettings):
raw_path_: str
processed_path_: str
models_path_:str
interim_path_:str
external_path_:str
model_config = SettingsConfigDict(
env_prefix="mrh_avap_",
env_file=".env",
env_file_encoding="utf-8",
case_sensitive=False,
extra="ignore",
)
@property
def raw_path(self) -> Path:
return Path(self.raw_path_)
@property
def processed_path(self) -> Path:
return Path(self.processed_path_)
@property
def proj_root(self) -> Path:
return Path(__file__).resolve().parents[1]
@property
def interim_path(self) -> Path:
return Path(self.interim_path_)
@property
def external_path(self) -> Path:
return Path(self.external_path_)
@property
def models_path(self) -> Path:
return Path(self.models_path_)
settings = Settings()

View File

@ -13,7 +13,7 @@ services:
LANGFUSE_HOST: ${LANGFUSE_HOST} LANGFUSE_HOST: ${LANGFUSE_HOST}
LANGFUSE_PUBLIC_KEY: ${LANGFUSE_PUBLIC_KEY} LANGFUSE_PUBLIC_KEY: ${LANGFUSE_PUBLIC_KEY}
LANGFUSE_SECRET_KEY: ${LANGFUSE_SECRET_KEY} LANGFUSE_SECRET_KEY: ${LANGFUSE_SECRET_KEY}
OLLAMA_URL: ${OLLAMA_LOCAL_URL} OLLAMA_URL: ${OLLAMA_URL}
OLLAMA_MODEL_NAME: ${OLLAMA_MODEL_NAME} OLLAMA_MODEL_NAME: ${OLLAMA_MODEL_NAME}
OLLAMA_EMB_MODEL_NAME: ${OLLAMA_EMB_MODEL_NAME} OLLAMA_EMB_MODEL_NAME: ${OLLAMA_EMB_MODEL_NAME}

View File

@ -1,24 +0,0 @@
import os
from langchain_elasticsearch import ElasticsearchStore
from utils.llm_factory import create_chat_model
from utils.emb_factory import create_embedding_model
llm = create_chat_model(
provider="ollama",
model=os.getenv("OLLAMA_MODEL_NAME"),
temperature=0,
validate_model_on_init=True,
)
embeddings = create_embedding_model(
provider="ollama",
model=os.getenv("OLLAMA_EMB_MODEL_NAME"),
)
vector_store = ElasticsearchStore(
es_url=os.getenv("ELASTICSEARCH_URL"),
index_name=os.getenv("ELASTICSEARCH_INDEX"),
embedding=embeddings,
query_field="text",
vector_query_field="vector",
)

View File

@ -1,46 +1,34 @@
import logging import logging
import os import os
import sys
from concurrent import futures from concurrent import futures
from pathlib import Path
from typing import Any
import brunix_pb2 import brunix_pb2
import brunix_pb2_grpc import brunix_pb2_grpc
import grpc import grpc
from grpc_reflection.v1alpha import reflection from grpc_reflection.v1alpha import reflection
from langchain_core.prompts import ChatPromptTemplate
from langchain_elasticsearch import ElasticsearchStore from langchain_elasticsearch import ElasticsearchStore
from utils.llm_factory import create_chat_model from utils.llm_factory import create_chat_model
from utils.emb_factory import create_embedding_model from utils.emb_factory import create_embedding_model
from graph import build_graph from graph import build_graph
# PROJECT_ROOT = Path(__file__).resolve().parents[2]
# if str(PROJECT_ROOT) not in sys.path:
# sys.path.insert(0, str(PROJECT_ROOT))
logging.basicConfig(level=logging.INFO) logging.basicConfig(level=logging.INFO)
logger = logging.getLogger("brunix-engine") logger = logging.getLogger("brunix-engine")
def _provider_kwargs(provider: str, base_url: str) -> dict[str, Any]:
if provider == "ollama":
return {"base_url": base_url}
return {}
class BrunixEngine(brunix_pb2_grpc.AssistanceEngineServicer): class BrunixEngine(brunix_pb2_grpc.AssistanceEngineServicer):
def __init__(self): def __init__(self):
self.llm = create_chat_model( self.llm = create_chat_model(
provider="ollama", provider="ollama",
model=os.getenv("OLLAMA_MODEL_NAME"), model=os.getenv("OLLAMA_MODEL_NAME"),
base_url=os.getenv("OLLAMA_URL"),
temperature=0, temperature=0,
validate_model_on_init=True, validate_model_on_init=True,
) )
self.embeddings = create_embedding_model( self.embeddings = create_embedding_model(
provider="ollama", provider="ollama",
model=os.getenv("OLLAMA_EMB_MODEL_NAME"), model=os.getenv("OLLAMA_EMB_MODEL_NAME"),
base_url=os.getenv("OLLAMA_URL"),
) )
self.vector_store = ElasticsearchStore( self.vector_store = ElasticsearchStore(
es_url=os.getenv("ELASTICSEARCH_URL"), es_url=os.getenv("ELASTICSEARCH_URL"),

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

0
src/utils/__init__.py Normal file
View File