20 lines
548 B
Python
20 lines
548 B
Python
from langchain_elasticsearch import ElasticsearchStore
|
|
from langchain_community.llms import Ollama
|
|
from langchain_community.embeddings import OllamaEmbeddings
|
|
|
|
es_url = "http://localhost:9200"
|
|
|
|
base_url = "http://ollama-light-service:11434"
|
|
model_name = "qwen2.5:1.5b"
|
|
|
|
print(f"Starting server")
|
|
|
|
llm = Ollama(base_url=base_url, model=model_name)
|
|
embeddings = OllamaEmbeddings(base_url=base_url, model="nomic-embed-text")
|
|
|
|
|
|
vector_store = ElasticsearchStore(
|
|
es_url=es_url, index_name="avap_manuals", embedding=embeddings
|
|
)
|
|
print(vector_store)
|