Spaces:
Running
Running
Update llm.py
Browse files
llm.py
CHANGED
|
@@ -7,7 +7,8 @@ from langchain_community.llms.huggingface_pipeline import HuggingFacePipeline
|
|
| 7 |
# Initialize HF pipeline for text generation
|
| 8 |
text_generator = pipeline(
|
| 9 |
"text-generation", # Task type
|
| 10 |
-
model="google/gemma-3n-
|
|
|
|
| 11 |
# model="Qwen/Qwen3-Embedding-0.6B",
|
| 12 |
# device="cuda" if torch.cuda.is_available() else "cpu",
|
| 13 |
device= "cpu",
|
|
@@ -20,16 +21,9 @@ model = HuggingFacePipeline(pipeline=text_generator)
|
|
| 20 |
|
| 21 |
def generate_sentences(topic, n=1):
|
| 22 |
prompt = ChatPromptTemplate.from_template(
|
| 23 |
-
"
|
| 24 |
-
"
|
| 25 |
-
"
|
| 26 |
-
"- Use simple vocabulary\n"
|
| 27 |
-
"- Family-friendly content\n\n"
|
| 28 |
-
"### Output Format\n"
|
| 29 |
-
"Return ONLY the sentences, one per line with:\n"
|
| 30 |
-
"- No bullet points\n"
|
| 31 |
-
"- No numbering\n"
|
| 32 |
-
"- No extra text or explanations"
|
| 33 |
)
|
| 34 |
chain = prompt | model | StrOutputParser()
|
| 35 |
response = chain.invoke({"topic": topic, "n": n})
|
|
|
|
| 7 |
# Initialize HF pipeline for text generation
|
| 8 |
text_generator = pipeline(
|
| 9 |
"text-generation", # Task type
|
| 10 |
+
model="google/gemma-3n-E2B-it-litert-preview"
|
| 11 |
+
# model="google/gemma-3n-e4b-it",
|
| 12 |
# model="Qwen/Qwen3-Embedding-0.6B",
|
| 13 |
# device="cuda" if torch.cuda.is_available() else "cpu",
|
| 14 |
device= "cpu",
|
|
|
|
| 21 |
|
| 22 |
def generate_sentences(topic, n=1):
|
| 23 |
prompt = ChatPromptTemplate.from_template(
|
| 24 |
+
"You are a helpful assistant. Generate exactly {n} simple sentences about the topic: {topic}. "
|
| 25 |
+
"Each sentence must be in English and appropriate for all audiences. "
|
| 26 |
+
"Return each sentence on a new line without any numbering or bullets"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 27 |
)
|
| 28 |
chain = prompt | model | StrOutputParser()
|
| 29 |
response = chain.invoke({"topic": topic, "n": n})
|