lakhera2023 commited on
Commit
f73f47e
·
verified ·
1 Parent(s): 0b9358d

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: llama3
3
+ base_model: meta-llama/Llama-4-Scout-17B-16E
4
+ tags:
5
+ - llama4
6
+ - moe
7
+ - torchtitan
8
+ - custom
9
+ ---
10
+
11
+ # Llama 4 Debug Model - Trained with TorchTitan
12
+
13
+ Custom-trained Llama 4 model using TorchTitan framework.
14
+
15
+ ## Model Details
16
+
17
+ - **Training Framework**: TorchTitan
18
+ - **Training Steps**: 10,000
19
+ - **Model Size**: ~220 MB
20
+ - **Precision**: bfloat16
21
+
22
+ ## Usage
23
+
24
+ from transformers import AutoModelForCausalLM, AutoTokenizer
25
+
26
+ model = AutoModelForCausalLM.from_pretrained("lakhera2023/llama4-debugmodel-10k")
27
+ tokenizer = AutoTokenizer.from_pretrained("lakhera2023/llama4-debugmodel-10k")
28
+
29
+ prompt = "Once upon a time"
30
+ inputs = tokenizer(prompt, return_tensors="pt")
31
+ outputs = model.generate(**inputs, max_new_tokens=100)
32
+ print(tokenizer.decode(outputs[0]))
model-00001-of-00001.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a7a67862d9623ab17d348d4640edc8f8e42f35fa5aeaeeb7f8d2039056cfb638
3
+ size 231303471
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2c680f67931b83232a1d9dd880f15ff671173515fae2d3cd607bbc81bec894c7
3
+ size 231302968
model.safetensors.index.json ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "metadata": {
3
+ "total_size": 231295488
4
+ },
5
+ "weight_map": {
6
+ "language_model.model.embed_tokens.weight": "model-00001-of-00001.safetensors",
7
+ "language_model.model.layers.0.self_attn.q_proj.weight": "model-00001-of-00001.safetensors",
8
+ "language_model.model.layers.0.self_attn.k_proj.weight": "model-00001-of-00001.safetensors",
9
+ "language_model.model.layers.0.self_attn.v_proj.weight": "model-00001-of-00001.safetensors",
10
+ "language_model.model.layers.0.self_attn.o_proj.weight": "model-00001-of-00001.safetensors",
11
+ "language_model.model.layers.0.input_layernorm.weight": "model-00001-of-00001.safetensors",
12
+ "language_model.model.layers.0.post_attention_layernorm.weight": "model-00001-of-00001.safetensors",
13
+ "language_model.model.layers.1.self_attn.q_proj.weight": "model-00001-of-00001.safetensors",
14
+ "language_model.model.layers.1.self_attn.k_proj.weight": "model-00001-of-00001.safetensors",
15
+ "language_model.model.layers.1.self_attn.v_proj.weight": "model-00001-of-00001.safetensors",
16
+ "language_model.model.layers.1.self_attn.o_proj.weight": "model-00001-of-00001.safetensors",
17
+ "language_model.model.layers.1.feed_forward.experts.down_proj": "model-00001-of-00001.safetensors",
18
+ "language_model.model.layers.1.feed_forward.router.weight": "model-00001-of-00001.safetensors",
19
+ "language_model.model.layers.1.feed_forward.shared_expert.gate_proj.weight": "model-00001-of-00001.safetensors",
20
+ "language_model.model.layers.1.feed_forward.shared_expert.down_proj.weight": "model-00001-of-00001.safetensors",
21
+ "language_model.model.layers.1.feed_forward.shared_expert.up_proj.weight": "model-00001-of-00001.safetensors",
22
+ "language_model.model.layers.1.input_layernorm.weight": "model-00001-of-00001.safetensors",
23
+ "language_model.model.layers.1.post_attention_layernorm.weight": "model-00001-of-00001.safetensors",
24
+ "language_model.model.layers.2.self_attn.q_proj.weight": "model-00001-of-00001.safetensors",
25
+ "language_model.model.layers.2.self_attn.k_proj.weight": "model-00001-of-00001.safetensors",
26
+ "language_model.model.layers.2.self_attn.v_proj.weight": "model-00001-of-00001.safetensors",
27
+ "language_model.model.layers.2.self_attn.o_proj.weight": "model-00001-of-00001.safetensors",
28
+ "language_model.model.layers.2.input_layernorm.weight": "model-00001-of-00001.safetensors",
29
+ "language_model.model.layers.2.post_attention_layernorm.weight": "model-00001-of-00001.safetensors",
30
+ "language_model.model.layers.3.self_attn.q_proj.weight": "model-00001-of-00001.safetensors",
31
+ "language_model.model.layers.3.self_attn.k_proj.weight": "model-00001-of-00001.safetensors",
32
+ "language_model.model.layers.3.self_attn.v_proj.weight": "model-00001-of-00001.safetensors",
33
+ "language_model.model.layers.3.self_attn.o_proj.weight": "model-00001-of-00001.safetensors",
34
+ "language_model.model.layers.3.feed_forward.experts.down_proj": "model-00001-of-00001.safetensors",
35
+ "language_model.model.layers.3.feed_forward.router.weight": "model-00001-of-00001.safetensors",
36
+ "language_model.model.layers.3.feed_forward.shared_expert.gate_proj.weight": "model-00001-of-00001.safetensors",
37
+ "language_model.model.layers.3.feed_forward.shared_expert.down_proj.weight": "model-00001-of-00001.safetensors",
38
+ "language_model.model.layers.3.feed_forward.shared_expert.up_proj.weight": "model-00001-of-00001.safetensors",
39
+ "language_model.model.layers.3.input_layernorm.weight": "model-00001-of-00001.safetensors",
40
+ "language_model.model.layers.3.post_attention_layernorm.weight": "model-00001-of-00001.safetensors",
41
+ "language_model.model.layers.4.self_attn.q_proj.weight": "model-00001-of-00001.safetensors",
42
+ "language_model.model.layers.4.self_attn.k_proj.weight": "model-00001-of-00001.safetensors",
43
+ "language_model.model.layers.4.self_attn.v_proj.weight": "model-00001-of-00001.safetensors",
44
+ "language_model.model.layers.4.self_attn.o_proj.weight": "model-00001-of-00001.safetensors",
45
+ "language_model.model.layers.4.input_layernorm.weight": "model-00001-of-00001.safetensors",
46
+ "language_model.model.layers.4.post_attention_layernorm.weight": "model-00001-of-00001.safetensors",
47
+ "language_model.model.layers.5.self_attn.q_proj.weight": "model-00001-of-00001.safetensors",
48
+ "language_model.model.layers.5.self_attn.k_proj.weight": "model-00001-of-00001.safetensors",
49
+ "language_model.model.layers.5.self_attn.v_proj.weight": "model-00001-of-00001.safetensors",
50
+ "language_model.model.layers.5.self_attn.o_proj.weight": "model-00001-of-00001.safetensors",
51
+ "language_model.model.layers.5.feed_forward.experts.down_proj": "model-00001-of-00001.safetensors",
52
+ "language_model.model.layers.5.feed_forward.router.weight": "model-00001-of-00001.safetensors",
53
+ "language_model.model.layers.5.feed_forward.shared_expert.gate_proj.weight": "model-00001-of-00001.safetensors",
54
+ "language_model.model.layers.5.feed_forward.shared_expert.down_proj.weight": "model-00001-of-00001.safetensors",
55
+ "language_model.model.layers.5.feed_forward.shared_expert.up_proj.weight": "model-00001-of-00001.safetensors",
56
+ "language_model.model.layers.5.input_layernorm.weight": "model-00001-of-00001.safetensors",
57
+ "language_model.model.layers.5.post_attention_layernorm.weight": "model-00001-of-00001.safetensors",
58
+ "language_model.model.norm.weight": "model-00001-of-00001.safetensors",
59
+ "language_model.lm_head.weight": "model-00001-of-00001.safetensors",
60
+ "language_model.model.layers.1.feed_forward.experts.gate_up_proj": "model-00001-of-00001.safetensors",
61
+ "language_model.model.layers.3.feed_forward.experts.gate_up_proj": "model-00001-of-00001.safetensors",
62
+ "language_model.model.layers.5.feed_forward.experts.gate_up_proj": "model-00001-of-00001.safetensors"
63
+ }
64
+ }
sharded/shard-00001-model-00001-of-00001.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e704adfdbefd8340f462adeb1ff85063a7dad69cf32c58e67bb8649941cbfcd
3
+ size 231308256
special_tokens_map.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<|begin_of_text|>",
3
+ "eos_token": "<|eot|>",
4
+ "pad_token": "<|finetune_right_pad|>"
5
+ }
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:172c9eb4beafc72601690da3ccfcede5c2e6806a8d5ec1fca33e22acea8023a4
3
+ size 27948578
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d0bdbaf59b0762c8c807617e2d8ea51420eb1b1de266df2495be755c8e0ed6ed
3
+ size 3622230
tokenizer_config.json ADDED
The diff for this file is too large to render. See raw diff