| import json |
| import numpy as np |
| import pandas as pd |
| import tensorflow as tf |
| from tensorflow.keras import layers |
| import sentencepiece as spm |
| import requests |
|
|
| |
| sp = spm.SentencePieceProcessor() |
| sp.load("ko_unigram.model") |
|
|
| |
| pad_id = sp.piece_to_id("<pad>") if sp.piece_to_id("<pad>") != -1 else 0 |
| start_id = sp.piece_to_id("<start>") |
| sep_id = sp.piece_to_id("<sep>") |
| end_id = sp.piece_to_id("<end>") |
| unk_id = sp.piece_to_id("<unk>") |
|
|
| vocab_size = sp.get_piece_size() |
| print(f"โ
Vocabulary size: {vocab_size}") |
|
|
| |
| def text_to_ids(text): |
| return sp.encode(text, out_type=int) |
|
|
| def ids_to_text(ids): |
| return sp.decode(ids) |
|
|
| max_len = 230 |
| batch_size = 128 |
|
|
| class Lo(layers.Layer): |
| def __init__(self, d_model): |
| super().__init__() |
| |
| self.proj = layers.Dense(d_model, use_bias=True, dtype='float32') |
| self.p = layers.Dense(96, use_bias=True, dtype='float32') |
| self._out_dtype = 'float32' |
|
|
| def call(self, x): |
| |
| x_f32 = tf.cast(x, tf.float32) |
| x = self.proj(x_f32) |
| x = tf.nn.gelu(x) |
| x = self.p(x) |
| |
| return tf.cast(x, self._out_dtype) |
|
|
| class LoSoU(layers.Layer): |
| """ |
| ์์ ํ๋ LoSoU ๋ ์ด์ด (๋์ alpha ์ฌ์ฉ) |
| - alpha ๊ฐ์ ์
๋ ฅ์ ๋ฐ๋ผ ๋์ ์ผ๋ก ๊ณ์ฐ: alpha = sigmoid(Linear(x)) |
| - ๋์ ํฉ ๋์ ์ง์์ด๋ํ๊ท (EMA) ์ฌ์ฉ (alpha: smoothing factor) |
| - ๋ด๋ถ ๊ณ์ฐ์ float32๋ก ์ํ (TPU bfloat16 ์์ ์ฑ ํฅ์) |
| - EMA ๊ฒฐ๊ณผ ํด๋ฆฌํ ๋ฐ ์์ epsilon ์ ์ฉ |
| - ์์ ํ split ์ฒ๋ฆฌ (์ง์ ์ฐจ์ ๊ฐ์ ; ์๋๋ผ๋ฉด ๋ง์ง๋ง ์ฐจ์ pad ํ์) |
| """ |
| def __init__(self, d_model, clip_value=5.0, eps=1e-6): |
| super().__init__() |
| |
| self.d_model = d_model |
| self.clip_value = float(clip_value) |
| self.eps = float(eps) |
|
|
| |
| self.Q = layers.Dense(96, dtype='float32') |
| self.K = layers.Dense(96, dtype='float32') |
| self.V = layers.Dense(96, dtype='float32') |
| self.proj = layers.Dense(d_model, use_bias=True, dtype='float32') |
| self.norm = layers.LayerNormalization(epsilon=1e-5, dtype='float32') |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| self.alpha_linear = layers.Dense(1, activation='sigmoid', dtype='float32') |
|
|
| def _ema_over_time(self, score, alpha_dynamic): |
| |
| |
|
|
| |
| seq = tf.transpose(score, perm=[1, 0, 2]) |
| alpha_seq = tf.transpose(alpha_dynamic, perm=[1, 0, 2]) |
|
|
| def step(prev_ema, inputs): |
| x_t, alpha_t = inputs |
| |
| new = alpha_t * x_t + (1.0 - alpha_t) * prev_ema |
| return new |
|
|
| |
| init = seq[0] |
| first_alpha = alpha_seq[0] |
|
|
| |
| remaining_seq = seq[1:] |
| remaining_alpha = alpha_seq[1:] |
|
|
| |
| elems = (remaining_seq, remaining_alpha) |
|
|
| ema_seq = tf.scan(fn=step, elems=elems, initializer=init) |
| |
| ema_seq = tf.concat([tf.expand_dims(init, 0), ema_seq], axis=0) |
|
|
| |
| ema = tf.transpose(ema_seq, perm=[1, 0, 2]) |
| return ema |
|
|
| def call(self, x): |
| |
| |
| x_f32 = tf.cast(x, tf.float32) |
| residual = x_f32 |
|
|
| |
| q = self.Q(x_f32) |
| k = self.K(x_f32) |
| V = tf.cast(self.V(x), tf.float32) |
|
|
| |
| g_q = tf.nn.sigmoid(q) |
| g_k = tf.nn.tanh(k) |
|
|
| |
| score = g_q * g_k |
|
|
| |
| alpha_dynamic = self.alpha_linear(x_f32) * 0.8 + 0.1 |
| |
| |
|
|
| |
| score_ema = self._ema_over_time(score, alpha_dynamic) |
|
|
| |
| mean_last = tf.reduce_mean(score_ema, axis=-1, keepdims=True) |
| denom = tf.maximum(mean_last, self.eps) |
| score_norm = score_ema / denom |
|
|
| |
| score_clipped = tf.clip_by_value(score_norm, -self.clip_value, self.clip_value) |
|
|
| |
| x_comb = score_clipped * V |
|
|
| out = self.proj(x_comb) |
| out = self.norm(out) |
|
|
| |
| return tf.cast(out, x.dtype) |
|
|
| class Block(layers.Layer): |
| def __init__(self, d_model, hyper_n): |
| super().__init__() |
| self.losou = [LoSoU(d_model) for _ in range(hyper_n)] |
|
|
| def call(self, x): |
| for losou in self.losou: |
| x = losou(x) |
| return x |
|
|
| class ReLaM(tf.keras.Model): |
| def __init__(self, vocab_size, max_seq_len, d_model, n_layers, dropout_rate=0.1): |
| super().__init__() |
| self.token_embedding = layers.Embedding(vocab_size, 128) |
| self.pos_embedding = layers.Embedding(max_seq_len, 128) |
| self.blocks = [Block(d_model, hyper_n=1) for _ in range(n_layers)] |
| self.proj = layers.Dense(128) |
| self.ln_f = layers.LayerNormalization(epsilon=1e-5, dtype="float32") |
|
|
| def call(self, x, training=False): |
| batch_size, seq_len = tf.shape(x)[0], tf.shape(x)[1] |
| positions = tf.range(seq_len)[tf.newaxis, :] |
| x = self.token_embedding(x) + self.pos_embedding(positions) |
| for block in self.blocks: |
| x = block(x) |
| x = self.proj(x) |
| x = self.ln_f(x) |
| embedding_matrix = tf.cast(self.token_embedding.embeddings, x.dtype) |
| logits = tf.matmul(x, embedding_matrix, transpose_b=True) |
| return tf.cast(logits, tf.float32) |
|
|
| |
| model = ReLaM( |
| vocab_size=vocab_size, |
| max_seq_len=max_len, |
| d_model=256, |
| n_layers=1 |
| ) |
|
|
| dummy_input = tf.zeros((1, max_len), dtype=tf.int32) |
| _ = model(dummy_input) |
| model.load_weights('/content/Cobra.weights.h5') |
| print("๋ชจ๋ธ ๊ฐ์ค์น ๋ก๋ ์๋ฃ!") |
|
|
| def generate_text_topp(model, prompt, max_len=100, max_gen=98, p=0.9, temperature=0.8, min_len=30): |
| model_input = text_to_ids(f"<start> {prompt} <sep>") |
| model_input = model_input[:max_len] |
| generated = list(model_input) |
| for step in range(max_gen): |
| if len(generated) > max_len: |
| input_seq = generated[-max_len:] |
| else: |
| input_seq = generated |
| input_padded = np.pad(input_seq, (0, max_len - len(input_seq)), constant_values=pad_id) |
| input_tensor = tf.convert_to_tensor([input_padded]) |
| logits = model(input_tensor, training=False) |
| next_token_logits = logits[0, len(input_seq) - 1].numpy() |
| next_token_logits[end_id] -= 5.0 |
| next_token_logits[pad_id] -= 10.0 |
| probs = tf.nn.softmax(next_token_logits / temperature).numpy() |
| sorted_indices = np.argsort(probs)[::-1] |
| sorted_probs = probs[sorted_indices] |
| cumulative_probs = np.cumsum(sorted_probs) |
| cutoff = np.searchsorted(cumulative_probs, p) |
| top_indices = sorted_indices[:cutoff + 1] |
| top_probs = sorted_probs[:cutoff + 1] |
| top_probs /= np.sum(top_probs) |
| next_token_id = np.random.choice(top_indices, p=top_probs) |
| if next_token_id == end_id and len(generated) >= min_len: |
| break |
| generated.append(int(next_token_id)) |
| return ids_to_text(generated) |
|
|
| print("\n\n===== ์์ฑ ๊ฒฐ๊ณผ =====") |
| print(generate_text_topp(model, "์ ๊ฐ ์ด๋ฐ๊ฐ ๋ฒ์ค๋ฅผ ํ์ผ ํด์ ์ค๋น ์ข ํด์ผ๊ฒ ์ด์. ์ฌ๋ฏธ์๋ ๋ํ์์ต๋๋ค!", p=0.8)) |