| --- |
| library_name: transformers |
| pipeline_tag: text-generation |
| inference: true |
| widget: |
| - text: Hello! |
| example_title: Hello world |
| group: Python |
| base_model: |
| - moonshotai/Kimi-Linear-48B-A3B-Instruct |
| --- |
| |
| This tiny model is intended for debugging. It is randomly initialized using the configuration adapted from [moonshotai/Kimi-Linear-48B-A3B-Instruct](https://huggingface.co/moonshotai/Kimi-Linear-48B-A3B-Instruct). |
|
|
| ### Example usage: |
|
|
| - vLLM |
|
|
| ```bash |
| vllm serve tiny-random/kimi-linear --trust-remote-code |
| ``` |
|
|
| - Transformers |
|
|
| ```python |
| # tested on transformers==4.57.1 |
| import torch |
| import transformers |
| from transformers import AutoModelForCausalLM, AutoTokenizer |
| |
| model_id = "tiny-random/kimi-linear" |
| model = AutoModelForCausalLM.from_pretrained( |
| model_id, |
| dtype=torch.bfloat16, |
| device_map="cuda", |
| trust_remote_code=True |
| ) |
| tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True) |
| |
| messages = [ |
| {"role": "system", "content": "You are a helpful assistant provided by Moonshot-AI."}, |
| {"role": "user", "content": "Is 123 a prime?"} |
| ] |
| input_ids = tokenizer.apply_chat_template( |
| messages, |
| add_generation_prompt=True, |
| return_tensors="pt", |
| tokenize=True, |
| ).to(model.device) |
| print(input_ids) |
| generated_ids = model.generate(inputs=input_ids, max_new_tokens=500) |
| response = tokenizer.batch_decode(generated_ids)[0] |
| print(response) |
| ``` |
|
|
| ### Codes to create this repo: |
|
|
| ```python |
| import json |
| from pathlib import Path |
| |
| import accelerate |
| import torch |
| from huggingface_hub import file_exists, hf_hub_download |
| from transformers import ( |
| AutoConfig, |
| AutoModelForCausalLM, |
| AutoProcessor, |
| AutoTokenizer, |
| GenerationConfig, |
| set_seed, |
| ) |
| |
| source_model_id = "moonshotai/Kimi-Linear-48B-A3B-Instruct" |
| save_folder = "/tmp/tiny-random/kimi-linear" |
| |
| Path(save_folder).mkdir(parents=True, exist_ok=True) |
| tokenizer = AutoTokenizer.from_pretrained( |
| source_model_id, trust_remote_code=True) |
| tokenizer.save_pretrained(save_folder) |
| with open(hf_hub_download(source_model_id, filename='tokenizer_config.json', repo_type='model'), 'r', encoding='utf-8') as f: |
| tokenizer_config_json = json.load(f) |
| tokenizer_config_json['auto_map']['AutoTokenizer'][0] = f'{source_model_id}--' + \ |
| tokenizer_config_json["auto_map"]["AutoTokenizer"][0] |
| with open(f"{save_folder}/tokenizer_config.json", "w", encoding='utf-8') as f: |
| json.dump(tokenizer_config_json, f, indent=2) |
| # hf_hub_download(source_model_id, filename='tiktoken.model', repo_type='model', |
| # local_dir=save_folder, local_dir_use_symlinks=True, cache_dir='/tmp/') |
| |
| with open(hf_hub_download(source_model_id, filename='config.json', repo_type='model'), 'r', encoding='utf-8') as f: |
| config_json = json.load(f) |
| for k, v in config_json['auto_map'].items(): |
| config_json['auto_map'][k] = f'{source_model_id}--{v}' |
| config_json.update({ |
| "head_dim": 32, |
| "hidden_size": 8, |
| "intermediate_size": 32, |
| "linear_attn_config": { |
| "full_attn_layers": [4], |
| "head_dim": 32, |
| "kda_layers": [1, 2, 3], |
| "num_heads": 8, |
| "short_conv_kernel_size": 4, |
| }, |
| "num_attention_heads": 8, |
| "num_key_value_heads": 8, |
| "moe_intermediate_size": 32, |
| "num_hidden_layers": 5, |
| }) |
| with open(f"{save_folder}/config.json", "w", encoding='utf-8') as f: |
| json.dump(config_json, f, indent=2) |
| |
| config = AutoConfig.from_pretrained( |
| save_folder, |
| trust_remote_code=True, |
| ) |
| print(config) |
| torch.set_default_dtype(torch.bfloat16) |
| model = AutoModelForCausalLM.from_config(config, trust_remote_code=True) |
| torch.set_default_dtype(torch.float32) |
| if file_exists(filename="generation_config.json", repo_id=source_model_id, repo_type='model'): |
| model.generation_config = GenerationConfig.from_pretrained( |
| source_model_id, trust_remote_code=True, |
| ) |
| set_seed(42) |
| model = model.cpu() |
| n_parms = sum(p.numel() for p in model.parameters()) |
| with torch.no_grad(): |
| for name, p in sorted(model.named_parameters()): |
| torch.nn.init.normal_(p, 0, 0.1) |
| print(name, p.shape, (p.numel() / n_parms * 100), '%') |
| model.save_pretrained(save_folder) |
| |
| with open(f"{save_folder}/config.json", "r", encoding='utf-8') as f: |
| config_json = json.load(f) |
| config_json['auto_map'] = {k: f'{source_model_id}--' + v.split( |
| '--')[-1] for k, v in config_json['auto_map'].items()} |
| with open(f"{save_folder}/config.json", "w", encoding='utf-8') as f: |
| json.dump(config_json, f, indent=2) |
| for python_file in Path(save_folder).glob('*.py'): |
| python_file.unlink() |
| ``` |
|
|
| ### Printing the model: |
|
|
| ```text |
| KimiLinearForCausalLM( |
| (model): KimiLinearModel( |
| (embed_tokens): Embedding(163840, 8, padding_idx=163839) |
| (layers): ModuleList( |
| (0): KimiDecoderLayer( |
| (self_attn): KimiDeltaAttention( |
| (q_proj): Linear(in_features=8, out_features=256, bias=False) |
| (k_proj): Linear(in_features=8, out_features=256, bias=False) |
| (v_proj): Linear(in_features=8, out_features=256, bias=False) |
| (q_conv1d): ShortConvolution(256, 256, kernel_size=(4,), stride=(1,), padding=(3,), groups=256, bias=False, activation=silu, backend=triton) |
| (k_conv1d): ShortConvolution(256, 256, kernel_size=(4,), stride=(1,), padding=(3,), groups=256, bias=False, activation=silu, backend=triton) |
| (v_conv1d): ShortConvolution(256, 256, kernel_size=(4,), stride=(1,), padding=(3,), groups=256, bias=False, activation=silu, backend=triton) |
| (f_a_proj): Linear(in_features=8, out_features=32, bias=False) |
| (f_b_proj): Linear(in_features=32, out_features=256, bias=False) |
| (b_proj): Linear(in_features=8, out_features=8, bias=False) |
| (g_a_proj): Linear(in_features=8, out_features=32, bias=False) |
| (g_b_proj): Linear(in_features=32, out_features=256, bias=False) |
| (o_norm): FusedRMSNormGated(32, eps=1e-05, activation=sigmoid) |
| (o_proj): Linear(in_features=256, out_features=8, bias=False) |
| ) |
| (mlp): KimiMLP( |
| (gate_proj): Linear(in_features=8, out_features=32, bias=False) |
| (up_proj): Linear(in_features=8, out_features=32, bias=False) |
| (down_proj): Linear(in_features=32, out_features=8, bias=False) |
| (act_fn): SiLUActivation() |
| ) |
| (input_layernorm): KimiRMSNorm() |
| (post_attention_layernorm): KimiRMSNorm() |
| ) |
| (1-2): 2 x KimiDecoderLayer( |
| (self_attn): KimiDeltaAttention( |
| (q_proj): Linear(in_features=8, out_features=256, bias=False) |
| (k_proj): Linear(in_features=8, out_features=256, bias=False) |
| (v_proj): Linear(in_features=8, out_features=256, bias=False) |
| (q_conv1d): ShortConvolution(256, 256, kernel_size=(4,), stride=(1,), padding=(3,), groups=256, bias=False, activation=silu, backend=triton) |
| (k_conv1d): ShortConvolution(256, 256, kernel_size=(4,), stride=(1,), padding=(3,), groups=256, bias=False, activation=silu, backend=triton) |
| (v_conv1d): ShortConvolution(256, 256, kernel_size=(4,), stride=(1,), padding=(3,), groups=256, bias=False, activation=silu, backend=triton) |
| (f_a_proj): Linear(in_features=8, out_features=32, bias=False) |
| (f_b_proj): Linear(in_features=32, out_features=256, bias=False) |
| (b_proj): Linear(in_features=8, out_features=8, bias=False) |
| (g_a_proj): Linear(in_features=8, out_features=32, bias=False) |
| (g_b_proj): Linear(in_features=32, out_features=256, bias=False) |
| (o_norm): FusedRMSNormGated(32, eps=1e-05, activation=sigmoid) |
| (o_proj): Linear(in_features=256, out_features=8, bias=False) |
| ) |
| (block_sparse_moe): KimiSparseMoeBlock( |
| (experts): ModuleList( |
| (0-255): 256 x KimiBlockSparseMLP( |
| (w1): Linear(in_features=8, out_features=32, bias=False) |
| (w2): Linear(in_features=32, out_features=8, bias=False) |
| (w3): Linear(in_features=8, out_features=32, bias=False) |
| (act_fn): SiLUActivation() |
| ) |
| ) |
| (gate): KimiMoEGate() |
| (shared_experts): KimiMLP( |
| (gate_proj): Linear(in_features=8, out_features=32, bias=False) |
| (up_proj): Linear(in_features=8, out_features=32, bias=False) |
| (down_proj): Linear(in_features=32, out_features=8, bias=False) |
| (act_fn): SiLUActivation() |
| ) |
| ) |
| (input_layernorm): KimiRMSNorm() |
| (post_attention_layernorm): KimiRMSNorm() |
| ) |
| (3-4): 2 x KimiDecoderLayer( |
| (self_attn): KimiMLAAttention( |
| (q_proj): Linear(in_features=8, out_features=1536, bias=False) |
| (kv_a_proj_with_mqa): Linear(in_features=8, out_features=576, bias=False) |
| (kv_a_layernorm): KimiRMSNorm() |
| (kv_b_proj): Linear(in_features=512, out_features=2048, bias=False) |
| (o_proj): Linear(in_features=1024, out_features=8, bias=False) |
| ) |
| (block_sparse_moe): KimiSparseMoeBlock( |
| (experts): ModuleList( |
| (0-255): 256 x KimiBlockSparseMLP( |
| (w1): Linear(in_features=8, out_features=32, bias=False) |
| (w2): Linear(in_features=32, out_features=8, bias=False) |
| (w3): Linear(in_features=8, out_features=32, bias=False) |
| (act_fn): SiLUActivation() |
| ) |
| ) |
| (gate): KimiMoEGate() |
| (shared_experts): KimiMLP( |
| (gate_proj): Linear(in_features=8, out_features=32, bias=False) |
| (up_proj): Linear(in_features=8, out_features=32, bias=False) |
| (down_proj): Linear(in_features=32, out_features=8, bias=False) |
| (act_fn): SiLUActivation() |
| ) |
| ) |
| (input_layernorm): KimiRMSNorm() |
| (post_attention_layernorm): KimiRMSNorm() |
| ) |
| ) |
| (norm): KimiRMSNorm() |
| ) |
| (lm_head): Linear(in_features=8, out_features=163840, bias=False) |
| ) |
| ``` |