| import math |
| import os |
|
|
| import torch |
| import torch.nn as nn |
| from accelerate import Accelerator |
| from tqdm import tqdm |
| from transformers import AutoTokenizer, LlamaForCausalLM, AutoModelForCausalLM |
|
|
| from src.model.super_tokenizer import SuperTokenizer |
|
|
| DTYPE_DICT = { |
| "bf16": torch.bfloat16, |
| "fp16": torch.float16, |
| "fp32": torch.float32, |
| } |
|
|
|
|
| class LM(nn.Module): |
| def __init__( |
| self, |
| model_name_or_path: str, |
| super_tokenizer_name_or_path: str, |
| cache_dir: str = None, |
| super_tokenizer_num_hidden_layers: int = 6, |
| use_flash_attention_2: bool = True, |
| is_model_frozen: bool = True, |
| dtype: str = "bf16", |
| device_map=None, |
| accelerator: Accelerator = None, |
| ): |
| super().__init__() |
|
|
| |
| if dtype not in DTYPE_DICT: |
| raise ValueError(f"dtype must be one of {DTYPE_DICT.keys()}") |
| dtype = DTYPE_DICT[dtype] |
|
|
| |
| self.model = LlamaForCausalLM.from_pretrained( |
| model_name_or_path, |
| cache_dir=cache_dir, |
| local_files_only=True, |
| torch_dtype=dtype, |
| use_flash_attention_2=use_flash_attention_2, |
| device_map=device_map, |
| ) |
| |
| |
| |
| |
| |
| |
| |
| |
| self.super_tokenizer = None |
| if super_tokenizer_name_or_path != "no": |
| self.super_tokenizer = SuperTokenizer.from_pretrained( |
| super_tokenizer_name_or_path, |
| cache_dir=cache_dir, |
| local_files_only=True, |
| torch_dtype=dtype, |
| device_map=device_map, |
| num_hidden_layers=super_tokenizer_num_hidden_layers, |
| ) |
|
|
| |
| self.tokenizer = AutoTokenizer.from_pretrained( |
| "meta-llama/Llama-2-7b-chat-hf", |
| cache_dir=cache_dir, |
| local_files_only=True, |
| use_fast=False, |
| ) |
| self.tokenizer.pad_token = self.tokenizer.eos_token |
|
|
| |
| self.is_model_frozen = is_model_frozen |
| if self.is_model_frozen: |
| self.freeze_model() |
|
|
| |
| self.accelerator = accelerator |
| if device_map is None: |
| if self.accelerator is not None: |
| device = self.accelerator.device |
| else: |
| device = torch.device("cpu") |
| |
| self.model.to(device) |
| if self.super_tokenizer is not None: |
| self.super_tokenizer.to(device) |
|
|
| def forward( |
| self, |
| input_ids=None, |
| attention_mask=None, |
| super_input_ids=None, |
| super_attention_mask=None, |
| placeholder_indices=None, |
| super_token_indices=None, |
| labels=None, |
| ): |
| inputs_embeds = self.prepare_model_inputs_embeds( |
| input_ids, |
| super_input_ids, |
| super_attention_mask, |
| placeholder_indices, |
| super_token_indices, |
| ) |
|
|
| output = self.model( |
| inputs_embeds=inputs_embeds, |
| attention_mask=attention_mask, |
| labels=labels, |
| ) |
|
|
| return output |
|
|
| def prepare_model_inputs_embeds( |
| self, |
| input_ids=None, |
| super_input_ids=None, |
| super_attention_mask=None, |
| placeholder_indices=None, |
| super_token_indices=None, |
| ): |
| inputs_embeds = self.model.get_input_embeddings()(input_ids) |
|
|
| if self.super_tokenizer is not None and len(super_token_indices) != 0: |
| super_inputs_embeds = self.super_tokenizer( |
| super_input_ids, |
| super_attention_mask, |
| super_token_indices, |
| ) |
|
|
| inputs_embeds = inputs_embeds.type_as(super_inputs_embeds) |
| cur_idx = 0 |
| for i, idx_lst in enumerate(placeholder_indices): |
| if len(idx_lst) == 0: |
| continue |
|
|
| inputs_embeds[i][idx_lst] = super_inputs_embeds[cur_idx:cur_idx + len(idx_lst)] |
| cur_idx += len(idx_lst) |
|
|
| return inputs_embeds |
|
|
| @torch.no_grad() |
| def generate(self, dataloader, return_new_tokens_only=True, decode=True, **gen_kwargs): |
| self.eval() |
|
|
| all_generations = [] |
| for _, inputs in enumerate(tqdm(dataloader, desc='Generate')): |
| inputs = self._move_to_device(inputs) |
| input_ids = inputs["input_ids"] |
| attention_mask = inputs["attention_mask"] |
| super_input_ids = inputs["super_input_ids"] |
| super_attention_mask = inputs["super_attention_mask"] |
| placeholder_indices = inputs["placeholder_indices"] |
| super_token_indices = inputs["super_token_indices"] |
|
|
| inputs_embeds = self.prepare_model_inputs_embeds( |
| input_ids=input_ids, |
| super_input_ids=super_input_ids, |
| super_attention_mask=super_attention_mask, |
| placeholder_indices=placeholder_indices, |
| super_token_indices=super_token_indices, |
| ) |
|
|
| |
| outputs = self.model.generate( |
| input_ids=input_ids, |
| inputs_embeds=inputs_embeds, |
| attention_mask=attention_mask, |
| use_cache=True, |
| **gen_kwargs, |
| ) |
| |
| if return_new_tokens_only: |
| start_idx = input_ids.shape[1] |
| outputs = outputs[:, start_idx:] |
| |
| if self.accelerator is not None: |
| outputs = outputs.contiguous() |
| |
| outputs = self.accelerator.pad_across_processes(outputs, pad_index=self.tokenizer.pad_token_id, dim=1) |
| outputs = self.accelerator.gather_for_metrics(outputs) |
|
|
| outputs = outputs.tolist() |
| if decode: |
| outputs = self.tokenizer.batch_decode(outputs, skip_special_tokens=True) |
| |
| all_generations.extend(outputs) |
|
|
| return all_generations |
|
|
| @torch.no_grad() |
| def compute_perplexity(self, dataloader): |
| self.eval() |
|
|
| all_nlls = [] |
| for inputs in tqdm(dataloader): |
| inputs = self._move_to_device(inputs) |
| outputs = self.forward(**inputs) |
| nll = outputs.loss |
|
|
| if self.accelerator is not None: |
| nll = self.accelerator.gather_for_metrics(nll).mean() |
| |
| all_nlls.append(nll.tolist()) |
|
|
| perplexity = math.exp(sum(all_nlls) / len(all_nlls)) |
|
|
| return perplexity |
|
|
| def freeze_model(self): |
| self.is_model_frozen = True |
| for _, param in self.model.named_parameters(): |
| param.requires_grad = False |
|
|
| def _move_to_device(self, inputs): |
| for k, v in inputs.items(): |
| if isinstance(v, torch.Tensor): |
| inputs[k] = v.to(self.device) |
| return inputs |
|
|
| @property |
| def device(self): |
| if self.accelerator is not None: |
| return self.accelerator.device |
| else: |
| return torch.device("cpu") |
| |
| def gradient_checkpointing_enable(self): |
| self.model.gradient_checkpointing_enable() |
| self.super_tokenizer.gradient_checkpointing_enable() |
|
|
| def save(self, output_dir, deepspeed=False): |
| if self.super_tokenizer is not None: |
| self.super_tokenizer.save_pretrained(os.path.join(output_dir, "super_tokenizer")) |
| self.tokenizer.save_pretrained(os.path.join(output_dir, "super_tokenizer")) |
| |
| if not self.is_model_frozen: |
| self.model.save_pretrained( |
| os.path.join(output_dir, "model") |
| ) |
| self.tokenizer.save_pretrained( |
| os.path.join(output_dir, "model") |
| ) |