Update app.py
Browse files
app.py
CHANGED
|
@@ -64,7 +64,7 @@ from transformers import AutoModelForCausalLM, AutoTokenizer
|
|
| 64 |
|
| 65 |
# model_id = "mistralai/Mistral-7B-v0.3"
|
| 66 |
|
| 67 |
-
model_id = "CohereForAI/aya-23-
|
| 68 |
|
| 69 |
|
| 70 |
tokenizer = AutoTokenizer.from_pretrained(
|
|
@@ -77,7 +77,7 @@ accelerator = Accelerator()
|
|
| 77 |
|
| 78 |
model = AutoModelForCausalLM.from_pretrained(model_id, token= token,
|
| 79 |
# torch_dtype= torch.uint8,
|
| 80 |
-
|
| 81 |
load_in_8bit=True,
|
| 82 |
# torch_dtype=torch.fl,
|
| 83 |
attn_implementation="flash_attention_2",
|
|
|
|
| 64 |
|
| 65 |
# model_id = "mistralai/Mistral-7B-v0.3"
|
| 66 |
|
| 67 |
+
model_id = "CohereForAI/aya-23-8B"
|
| 68 |
|
| 69 |
|
| 70 |
tokenizer = AutoTokenizer.from_pretrained(
|
|
|
|
| 77 |
|
| 78 |
model = AutoModelForCausalLM.from_pretrained(model_id, token= token,
|
| 79 |
# torch_dtype= torch.uint8,
|
| 80 |
+
torch_dtype=torch.float16,
|
| 81 |
load_in_8bit=True,
|
| 82 |
# torch_dtype=torch.fl,
|
| 83 |
attn_implementation="flash_attention_2",
|