Add Demos about ChatGLM
Browse files- examples/ChatGLM2-Demo.py +11 -0
- examples/DownloadChatGLM.py +8 -0
examples/ChatGLM2-Demo.py
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "max_split_size_mb:32"
|
| 3 |
+
|
| 4 |
+
import os
|
| 5 |
+
from transformers import AutoTokenizer, AutoModel
|
| 6 |
+
|
| 7 |
+
tokenizer = AutoTokenizer.from_pretrained("tunning/chatglm2-6b", trust_remote_code=True)
|
| 8 |
+
model = AutoModel.from_pretrained("tunning/chatglm2-6b", trust_remote_code=True).cuda()
|
| 9 |
+
model = model.eval()
|
| 10 |
+
response, history = model.chat(tokenizer, "你好", history=[])
|
| 11 |
+
print(response)
|
examples/DownloadChatGLM.py
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from transformers import AutoTokenizer, AutoModel
|
| 2 |
+
tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm2-6b", trust_remote_code=True)
|
| 3 |
+
model = AutoModel.from_pretrained("THUDM/chatglm2-6b", trust_remote_code=True, device='cuda')
|
| 4 |
+
model = model.eval()
|
| 5 |
+
response, history = model.chat(tokenizer, "你好", history=[])
|
| 6 |
+
print(response)
|
| 7 |
+
response, history = model.chat(tokenizer, "晚上睡不着应该怎么办", history=history)
|
| 8 |
+
print(response)
|