Text-to-Speech
Safetensors
GGUF
qwen2
audio
speech
speech-language-models
conversational
harryjulian commited on
Commit
a827e13
·
verified ·
1 Parent(s): 6f93298

Upload folder using huggingface_hub

Browse files
config.json CHANGED
@@ -4,11 +4,38 @@
4
  ],
5
  "attention_dropout": 0.0,
6
  "bos_token_id": 41577,
 
7
  "eos_token_id": 41579,
8
  "hidden_act": "silu",
9
  "hidden_size": 896,
10
  "initializer_range": 0.02,
11
  "intermediate_size": 4864,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12
  "max_position_embeddings": 32768,
13
  "max_window_layers": 21,
14
  "model_type": "qwen2",
@@ -18,11 +45,10 @@
18
  "rms_norm_eps": 1e-06,
19
  "rope_scaling": null,
20
  "rope_theta": 1000000.0,
21
- "sliding_window": 32768,
22
  "tie_word_embeddings": true,
23
- "torch_dtype": "bfloat16",
24
- "transformers_version": "4.51.3",
25
  "use_cache": true,
26
  "use_sliding_window": false,
27
- "vocab_size": 107567
28
  }
 
4
  ],
5
  "attention_dropout": 0.0,
6
  "bos_token_id": 41577,
7
+ "dtype": "bfloat16",
8
  "eos_token_id": 41579,
9
  "hidden_act": "silu",
10
  "hidden_size": 896,
11
  "initializer_range": 0.02,
12
  "intermediate_size": 4864,
13
+ "layer_types": [
14
+ "full_attention",
15
+ "full_attention",
16
+ "full_attention",
17
+ "full_attention",
18
+ "full_attention",
19
+ "full_attention",
20
+ "full_attention",
21
+ "full_attention",
22
+ "full_attention",
23
+ "full_attention",
24
+ "full_attention",
25
+ "full_attention",
26
+ "full_attention",
27
+ "full_attention",
28
+ "full_attention",
29
+ "full_attention",
30
+ "full_attention",
31
+ "full_attention",
32
+ "full_attention",
33
+ "full_attention",
34
+ "full_attention",
35
+ "full_attention",
36
+ "full_attention",
37
+ "full_attention"
38
+ ],
39
  "max_position_embeddings": 32768,
40
  "max_window_layers": 21,
41
  "model_type": "qwen2",
 
45
  "rms_norm_eps": 1e-06,
46
  "rope_scaling": null,
47
  "rope_theta": 1000000.0,
48
+ "sliding_window": null,
49
  "tie_word_embeddings": true,
50
+ "transformers_version": "4.56.1",
 
51
  "use_cache": true,
52
  "use_sliding_window": false,
53
+ "vocab_size": 107565
54
  }
generation_config.json CHANGED
@@ -10,5 +10,5 @@
10
  "temperature": 0.7,
11
  "top_k": 20,
12
  "top_p": 0.8,
13
- "transformers_version": "4.51.3"
14
  }
 
10
  "temperature": 0.7,
11
  "top_k": 20,
12
  "top_p": 0.8,
13
+ "transformers_version": "4.56.1"
14
  }
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:cf437b16bc161b4bc2518e8803d6fb55cb49227da55709cc99f33612d9d1f343
3
- size 1101348824
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:828456eaeba515fa82657fe88de39dc72b41d28e7cca7a5f50885087576749b2
3
+ size 1101341656
special_tokens_map.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<|endoftext|>",
4
+ "lstrip": false,
5
+ "normalized": true,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "<|im_end|>",
11
+ "lstrip": false,
12
+ "normalized": true,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "<|im_end|>",
18
+ "lstrip": false,
19
+ "normalized": true,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ }
23
+ }
tokenizer.json CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3734a82454510e6089a810075f6d5af72765e34399a52a9c35b3171596e00c3d
3
- size 16040578
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7e8d608914e242181a782b86f006e91ebd158bca3435808629bac8d210d53955
3
+ size 18183921
tokenizer_config.json CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d7c54417645c79117d3b98ba99a536453498537b240736e617f1c5bf94ac0ab0
3
- size 12002255
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d5219977fcb10a20ff6fa5e6472729bf6b7ddde64f391918789c1883f1077687
3
+ size 12001720
vocab.json ADDED
The diff for this file is too large to render. See raw diff