Spaces:
Sleeping
Sleeping
Update knowledge_base.py
Browse files- knowledge_base.py +21 -4
knowledge_base.py
CHANGED
|
@@ -1,6 +1,6 @@
|
|
| 1 |
-
# knowledge_base.py
|
| 2 |
import os
|
| 3 |
import fitz # PyMuPDF
|
|
|
|
| 4 |
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
| 5 |
from langchain.vectorstores import Chroma
|
| 6 |
from langchain.embeddings import HuggingFaceEmbeddings
|
|
@@ -9,28 +9,45 @@ from langchain.docstore.document import Document
|
|
| 9 |
CHROMA_DIR = "chroma"
|
| 10 |
MODEL_NAME = "sentence-transformers/all-MiniLM-L6-v2"
|
| 11 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 12 |
|
| 13 |
def load_and_chunk_pdfs(folder_path):
|
| 14 |
documents = []
|
|
|
|
| 15 |
for filename in os.listdir(folder_path):
|
| 16 |
if filename.endswith(".pdf"):
|
| 17 |
path = os.path.join(folder_path, filename)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 18 |
doc = fitz.open(path)
|
| 19 |
-
text = "\n".join(page.get_text() for page in doc)
|
| 20 |
documents.append(Document(page_content=text, metadata={"source": filename}))
|
| 21 |
|
| 22 |
splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
|
| 23 |
chunks = splitter.split_documents(documents)
|
| 24 |
return chunks
|
| 25 |
|
| 26 |
-
|
| 27 |
def create_vectorstore(chunks):
|
| 28 |
embeddings = HuggingFaceEmbeddings(model_name=MODEL_NAME)
|
| 29 |
db = Chroma.from_documents(chunks, embeddings, persist_directory=CHROMA_DIR)
|
| 30 |
db.persist()
|
| 31 |
return db
|
| 32 |
|
| 33 |
-
|
| 34 |
def load_vectorstore():
|
| 35 |
embeddings = HuggingFaceEmbeddings(model_name=MODEL_NAME)
|
| 36 |
return Chroma(persist_directory=CHROMA_DIR, embedding_function=embeddings)
|
|
|
|
|
|
|
| 1 |
import os
|
| 2 |
import fitz # PyMuPDF
|
| 3 |
+
import requests
|
| 4 |
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
| 5 |
from langchain.vectorstores import Chroma
|
| 6 |
from langchain.embeddings import HuggingFaceEmbeddings
|
|
|
|
| 9 |
CHROMA_DIR = "chroma"
|
| 10 |
MODEL_NAME = "sentence-transformers/all-MiniLM-L6-v2"
|
| 11 |
|
| 12 |
+
# Set this to your actual file on HF
|
| 13 |
+
HF_FILE_URL = "https://huggingface.co/spaces/DurgaDeepak/eat2fit/resolve/main/meal_plans/Lafayette%2C%20Natasha%20-%20Fit%20By%20Tasha%20High%20Protein%20Recipes%20_%2052%20High%20Protein%20Clean%20Recipes%20%26%20Meal%20Plan%20(2021).pdf"
|
| 14 |
+
|
| 15 |
+
def ensure_pdf_downloaded(local_path: str, url: str):
|
| 16 |
+
if not os.path.exists(local_path):
|
| 17 |
+
print(f"Downloading large PDF from: {url}")
|
| 18 |
+
response = requests.get(url)
|
| 19 |
+
if response.status_code == 200:
|
| 20 |
+
with open(local_path, "wb") as f:
|
| 21 |
+
f.write(response.content)
|
| 22 |
+
print("PDF downloaded successfully.")
|
| 23 |
+
else:
|
| 24 |
+
raise RuntimeError(f"Failed to download PDF: {response.status_code}")
|
| 25 |
|
| 26 |
def load_and_chunk_pdfs(folder_path):
|
| 27 |
documents = []
|
| 28 |
+
|
| 29 |
for filename in os.listdir(folder_path):
|
| 30 |
if filename.endswith(".pdf"):
|
| 31 |
path = os.path.join(folder_path, filename)
|
| 32 |
+
|
| 33 |
+
# Try downloading the file if it's missing or an LFS pointer
|
| 34 |
+
if os.path.getsize(path) < 1000: # LFS pointer files are tiny
|
| 35 |
+
ensure_pdf_downloaded(path, HF_FILE_URL)
|
| 36 |
+
|
| 37 |
doc = fitz.open(path)
|
| 38 |
+
text = "\n".join(page.get_text() for page in doc if page.get_text())
|
| 39 |
documents.append(Document(page_content=text, metadata={"source": filename}))
|
| 40 |
|
| 41 |
splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
|
| 42 |
chunks = splitter.split_documents(documents)
|
| 43 |
return chunks
|
| 44 |
|
|
|
|
| 45 |
def create_vectorstore(chunks):
|
| 46 |
embeddings = HuggingFaceEmbeddings(model_name=MODEL_NAME)
|
| 47 |
db = Chroma.from_documents(chunks, embeddings, persist_directory=CHROMA_DIR)
|
| 48 |
db.persist()
|
| 49 |
return db
|
| 50 |
|
|
|
|
| 51 |
def load_vectorstore():
|
| 52 |
embeddings = HuggingFaceEmbeddings(model_name=MODEL_NAME)
|
| 53 |
return Chroma(persist_directory=CHROMA_DIR, embedding_function=embeddings)
|