This tiny model is intended for debugging. It is randomly initialized using the configuration adapted from
Qwen/Qwen3.6-35B-A3B
.
File path
Size
model.safetensors
12.3MB
Example usage:
# Multi-token prediction is supported
model_id=tiny-random/qwen3.6-moe
vllm serve $model_id \
--tensor-parallel-size 2 \
--speculative-config.method qwen3_next_mtp \
--speculative-config.num_speculative_tokens 2 \
--reasoning-parser qwen3 \
--tool-call-parser qwen3_coder \
--enable-auto-tool-choice \
--max-cudagraph-capture-size 8
# Multi-token prediction is supported
model_id=tiny-random/qwen3.6-moe
python3 -m sglang.launch_server \
--model-path $model_id \
--tp-size 2 \
--tool-call-parser qwen3_coder \
--reasoning-parser qwen3 \
--speculative-algo NEXTN \
--speculative-num-steps 3 \
--speculative-eagle-topk 1 \
--speculative-num-draft-tokens 4
import numpy as np
import torch
import transformers
from PIL import Image
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoProcessor,
AutoTokenizer,
Qwen3_5MoeForConditionalGeneration,
)
model_id = "tiny-random/qwen3.6-moe"
model = Qwen3_5MoeForConditionalGeneration.from_pretrained(
model_id, dtype=torch.bfloat16, device_map="auto" ,
)
processor = AutoProcessor.from_pretrained(model_id)
messages = [
{
"role" : "user" ,
"content" : [
{
"type" : "image" ,
"image" : "https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen-VL/assets/demo.jpeg" ,
},
{"type" : "text" , "text" : "Describe this image." },
],
}
]
inputs = processor.apply_chat_template(
messages,
tokenize=True ,
add_generation_prompt=True ,
return_dict=True ,
return_tensors="pt"
).to(model.device)
generated_ids = model.generate(**inputs, max_new_tokens=32 )
output_text = processor.batch_decode(generated_ids[0 ][inputs['input_ids' ].shape[1 ]:], skip_special_tokens=True )
print (output_text)
Codes to create this repo:
Click to expand
import json
from copy import deepcopy
from pathlib import Path
import torch
from huggingface_hub import file_exists, hf_hub_download
from transformers import (
AutoConfig,
AutoModelForCausalLM,
AutoProcessor,
GenerationConfig,
Qwen3_5MoeForConditionalGeneration,
set_seed,
)
source_model_id = "Qwen/Qwen3.6-35B-A3B"
save_folder = "/tmp/tiny-random/qwen36-moe"
processor = AutoProcessor.from_pretrained(source_model_id, trust_remote_code=True )
processor.save_pretrained(save_folder)
with open (hf_hub_download(source_model_id, filename='config.json' , repo_type='model' ), 'r' , encoding='utf-8' ) as f:
config_json = json.load(f)
config_json['text_config' ].update({
'head_dim' : 32 ,
'hidden_size' : 8 ,
"layer_types" : ['linear_attention' ] * 3 + ['full_attention' ],
'intermediate_size' : 32 ,
'moe_intermediate_size' : 32 ,
'num_hidden_layers' : 4 ,
'num_attention_heads' : 8 ,
'num_key_value_heads' : 4 ,
'shared_expert_intermediate_size' : 32 ,
# 3.6 has higher dim for linear attention vs 3.5
})
config_json['text_config' ]['rope_parameters' ]['mrope_section' ] = [1 , 1 , 2 ]
config_json["tie_word_embeddings" ] = False
config_json['vision_config' ].update(
{
'hidden_size' : 64 ,
'intermediate_size' : 128 ,
'num_heads' : 2 ,
'out_hidden_size' : 8 ,
'depth' : 2 ,
}
)
with open (f"{save_folder} /config.json" , "w" , encoding='utf-8' ) as f:
json.dump(config_json, f, indent=2 )
config = AutoConfig.from_pretrained(
save_folder,
trust_remote_code=True ,
)
print (config)
torch.set_default_dtype(torch.bfloat16)
model = Qwen3_5MoeForConditionalGeneration(config)
# with torch.no_grad():
# for i in range(3):
# attn = model.model.language_model.layers[i].linear_attn
# attn.A_log = torch.nn.Parameter(attn.A_log.float())
# attn.norm.float()
print (model.state_dict()['model.language_model.layers.0.linear_attn.A_log' ].dtype)
print (model.state_dict()['model.language_model.layers.0.linear_attn.norm.weight' ].dtype)
model.mtp = torch.nn.ModuleDict({
"pre_fc_norm_embedding" : torch.nn.RMSNorm(config.text_config.hidden_size),
"fc" : torch.nn.Linear(config.text_config.hidden_size * 2 , config.text_config.hidden_size, bias=False ),
"layers" : torch.nn.ModuleList([deepcopy(model.model.language_model.layers[3 ])]),
"norm" : torch.nn.RMSNorm(config.text_config.hidden_size),
"pre_fc_norm_hidden" : torch.nn.RMSNorm(config.text_config.hidden_size),
})
torch.set_default_dtype(torch.float32)
if file_exists(filename="generation_config.json" , repo_id=source_model_id, repo_type='model' ):
model.generation_config = GenerationConfig.from_pretrained(
source_model_id, trust_remote_code=True ,
)
model.generation_config.do_sample = True
print (model.generation_config)
model = model.cpu()
with torch.no_grad():
for name, p in sorted (model.named_parameters()):
torch.nn.init.normal_(p, 0 , 0.2 )
print (name, p.shape)
model.save_pretrained(save_folder)
Printing the model:
Click to expand
Qwen3_5MoeForConditionalGeneration(
(model): Qwen3_5MoeModel(
(visual): Qwen3_5MoeVisionModel(
(patch_embed): Qwen3_5MoeVisionPatchEmbed(
(proj): Conv3d(3, 64, kernel_size=(2, 16, 16), stride=(2, 16, 16))
)
(pos_embed): Embedding(2304, 64)
(rotary_pos_emb): Qwen3_5MoeVisionRotaryEmbedding()
(blocks): ModuleList(
(0-1): 2 x Qwen3_5MoeVisionBlock(
(norm1): LayerNorm((64,), eps=1e-06, elementwise_affine=True)
(norm2): LayerNorm((64,), eps=1e-06, elementwise_affine=True)
(attn): Qwen3_5MoeVisionAttention(
(qkv): Linear(in_features=64, out_features=192, bias=True)
(proj): Linear(in_features=64, out_features=64, bias=True)
)
(mlp): Qwen3_5MoeVisionMLP(
(linear_fc1): Linear(in_features=64, out_features=128, bias=True)
(linear_fc2): Linear(in_features=128, out_features=64, bias=True)
(act_fn): GELUTanh()
)
)
)
(merger): Qwen3_5MoeVisionPatchMerger(
(norm): LayerNorm((64,), eps=1e-06, elementwise_affine=True)
(linear_fc1): Linear(in_features=256, out_features=256, bias=True)
(act_fn): GELU(approximate='none')
(linear_fc2): Linear(in_features=256, out_features=8, bias=True)
)
)
(language_model): Qwen3_5MoeTextModel(
(embed_tokens): Embedding(248320, 8)
(layers): ModuleList(
(0-2): 3 x Qwen3_5MoeDecoderLayer(
(linear_attn): Qwen3_5MoeGatedDeltaNet(
(act): SiLUActivation()
(conv1d): Conv1d(8192, 8192, kernel_size=(4,), stride=(1,), padding=(3,), groups=8192, bias=False)
(norm): Qwen3_5MoeRMSNormGated()
(out_proj): Linear(in_features=4096, out_features=8, bias=False)
(in_proj_qkv): Linear(in_features=8, out_features=8192, bias=False)
(in_proj_z): Linear(in_features=8, out_features=4096, bias=False)
(in_proj_b): Linear(in_features=8, out_features=32, bias=False)
(in_proj_a): Linear(in_features=8, out_features=32, bias=False)
)
(mlp): Qwen3_5MoeSparseMoeBlock(
(gate): Qwen3_5MoeTopKRouter()
(experts): Qwen3_5MoeExperts(
(act_fn): SiLUActivation()
)
(shared_expert): Qwen3_5MoeMLP(
(gate_proj): Linear(in_features=8, out_features=32, bias=False)
(up_proj): Linear(in_features=8, out_features=32, bias=False)
(down_proj): Linear(in_features=32, out_features=8, bias=False)
(act_fn): SiLUActivation()
)
(shared_expert_gate): Linear(in_features=8, out_features=1, bias=False)
)
(input_layernorm): Qwen3_5MoeRMSNorm((8,), eps=1e-06)
(post_attention_layernorm): Qwen3_5MoeRMSNorm((8,), eps=1e-06)
)
(3): Qwen3_5MoeDecoderLayer(
(self_attn): Qwen3_5MoeAttention(
(q_proj): Linear(in_features=8, out_features=512, bias=False)
(k_proj): Linear(in_features=8, out_features=128, bias=False)
(v_proj): Linear(in_features=8, out_features=128, bias=False)
(o_proj): Linear(in_features=256, out_features=8, bias=False)
(q_norm): Qwen3_5MoeRMSNorm((32,), eps=1e-06)
(k_norm): Qwen3_5MoeRMSNorm((32,), eps=1e-06)
)
(mlp): Qwen3_5MoeSparseMoeBlock(
(gate): Qwen3_5MoeTopKRouter()
(experts): Qwen3_5MoeExperts(
(act_fn): SiLUActivation()
)
(shared_expert): Qwen3_5MoeMLP(
(gate_proj): Linear(in_features=8, out_features=32, bias=False)
(up_proj): Linear(in_features=8, out_features=32, bias=False)
(down_proj): Linear(in_features=32, out_features=8, bias=False)
(act_fn): SiLUActivation()
)
(shared_expert_gate): Linear(in_features=8, out_features=1, bias=False)
)
(input_layernorm): Qwen3_5MoeRMSNorm((8,), eps=1e-06)
(post_attention_layernorm): Qwen3_5MoeRMSNorm((8,), eps=1e-06)
)
)
(norm): Qwen3_5MoeRMSNorm((8,), eps=1e-06)
(rotary_emb): Qwen3_5MoeTextRotaryEmbedding()
)
)
(lm_head): Linear(in_features=8, out_features=248320, bias=False)
(mtp): ModuleDict(
(pre_fc_norm_embedding): RMSNorm((8,), eps=None, elementwise_affine=True)
(fc): Linear(in_features=16, out_features=8, bias=False)
(layers): ModuleList(
(0): Qwen3_5MoeDecoderLayer(
(self_attn): Qwen3_5MoeAttention(
(q_proj): Linear(in_features=8, out_features=512, bias=False)
(k_proj): Linear(in_features=8, out_features=128, bias=False)
(v_proj): Linear(in_features=8, out_features=128, bias=False)
(o_proj): Linear(in_features=256, out_features=8, bias=False)
(q_norm): Qwen3_5MoeRMSNorm((32,), eps=1e-06)
(k_norm): Qwen3_5MoeRMSNorm((32,), eps=1e-06)
)
(mlp): Qwen3_5MoeSparseMoeBlock(
(gate): Qwen3_5MoeTopKRouter()
(experts): Qwen3_5MoeExperts(
(act_fn): SiLUActivation()
)
(shared_expert): Qwen3_5MoeMLP(
(gate_proj): Linear(in_features=8, out_features=32, bias=False)
(up_proj): Linear(in_features=8, out_features=32, bias=False)
(down_proj): Linear(in_features=32, out_features=8, bias=False)
(act_fn): SiLUActivation()
)
(shared_expert_gate): Linear(in_features=8, out_features=1, bias=False)
)
(input_layernorm): Qwen3_5MoeRMSNorm((8,), eps=1e-06)
(post_attention_layernorm): Qwen3_5MoeRMSNorm((8,), eps=1e-06)
)
)
(norm): RMSNorm((8,), eps=None, elementwise_affine=True)
(pre_fc_norm_hidden): RMSNorm((8,), eps=None, elementwise_affine=True)
)
)
Test environment:
torch: 2.11.0
transformers: 5.5.0