{ "cells": [ { "cell_type": "markdown", "id": "66cbbaf8", "metadata": {}, "source": [ "# Libraries" ] }, { "cell_type": "code", "execution_count": 8, "id": "c01c19dc", "metadata": {}, "outputs": [], "source": [ "import os\n", "import json\n", "from typing import Dict, List, Union\n", "import numpy as np\n", "from datasets import load_dataset\n", "from langchain_ollama import OllamaEmbeddings\n", "from beir.datasets.data_loader import GenericDataLoader\n", "from beir.retrieval.search.dense import DenseRetrievalExactSearch\n", "from beir.retrieval.evaluation import EvaluateRetrieval\n", "from beir import util" ] }, { "cell_type": "markdown", "id": "ac011c1c", "metadata": {}, "source": [ "# Utils" ] }, { "cell_type": "code", "execution_count": 3, "id": "b83e7900", "metadata": {}, "outputs": [], "source": [ "class BEIROllamaEmbeddings:\n", " \"\"\"\n", " Adapter that makes LangChain's OllamaEmbeddings compatible with BEIR.\n", " \"\"\"\n", "\n", " def __init__(\n", " self,\n", " base_url: str,\n", " model: str,\n", " batch_size: int = 64,\n", " ) -> None:\n", " self.batch_size = batch_size\n", " self.embeddings = OllamaEmbeddings(\n", " base_url=base_url,\n", " model=model,\n", " )\n", "\n", " def _batch_embed(self, texts: List[str]) -> np.ndarray:\n", " vectors = []\n", "\n", " for i in range(0, len(texts), self.batch_size):\n", " batch = texts[i : i + self.batch_size]\n", " batch_vectors = self.embeddings.embed_documents(batch)\n", " vectors.extend(batch_vectors)\n", "\n", " return np.asarray(vectors, dtype=np.float32)\n", "\n", " def encode_queries(self, queries: List[str], **kwargs) -> np.ndarray:\n", " \"\"\"\n", " BEIR query encoder\n", " \"\"\"\n", " return self._batch_embed(queries)\n", "\n", " def encode_corpus(\n", " self,\n", " corpus: Union[List[Dict[str, str]], Dict[str, Dict[str, str]]],\n", " **kwargs,\n", " ) -> np.ndarray:\n", " \"\"\"\n", " BEIR corpus encoder\n", " \"\"\"\n", " if isinstance(corpus, dict):\n", " corpus = list(corpus.values())\n", "\n", " texts = []\n", " for doc in corpus:\n", " title = (doc.get(\"title\") or \"\").strip()\n", " text = (doc.get(\"text\") or \"\").strip()\n", "\n", " if title:\n", " texts.append(f\"{title}\\n{text}\")\n", " else:\n", " texts.append(text)\n", "\n", " return self._batch_embed(texts)" ] }, { "cell_type": "markdown", "id": "c9528fb6", "metadata": {}, "source": [ "# Data" ] }, { "cell_type": "code", "execution_count": null, "id": "230aae25", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Descargando datos de Hugging Face...\n", "Cargando con BEIR GenericDataLoader...\n" ] }, { "data": { "application/vnd.jupyter.widget-view+json": { "model_id": "0e67479e959248f598db3415efbb13ae", "version_major": 2, "version_minor": 0 }, "text/plain": [ " 0%| | 0/20604 [00:00>>>>>> 4b5352d93cf89b7562895b550fb5bd62160586c5 } ], "metadata": { "kernelspec": { "display_name": "assistance-engine", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.11.13" } }, "nbformat": 4, "nbformat_minor": 5 }