Upload folder using huggingface_hub
Browse files- .gitattributes +0 -24
- README.md +12 -0
- logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.3_2e-1_connector-1.0_0.3_2e-1_ablation_20251010_055757.log +236 -0
- logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.3_2e-1_connector-1.0_0.3_2e-1_ablation_20251010_055857.log +0 -0
- logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.3_2e-1_connector-1.0_0.3_2e-1_ablation_20251010_060221.log +0 -0
- logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.3_2e-1_connector-1.0_0.3_2e-1_ablation_20251010_060753.log +0 -0
- logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.3_2e-1_connector-1.0_0.3_2e-1_ablation_20251010_164158.log +7 -0
- logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.5_2e-1_connector-1.0_0.5_2e-1_ablation_20251010_055850.log +9 -0
- logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.5_2e-1_connector-1.0_0.5_2e-1_ablation_20251010_060007.log +0 -0
- logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.5_2e-1_connector-1.0_0.5_2e-1_ablation_20251010_060330.log +0 -0
- logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.5_2e-1_connector-1.0_0.5_2e-1_ablation_20251010_064225.log +0 -0
- logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.7_2e-1_connector-1.0_0.7_2e-1_ablation_20251010_060116.log +11 -0
- logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.7_2e-1_connector-1.0_0.7_2e-1_ablation_20251010_060440.log +0 -0
- logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.7_2e-1_connector-1.0_0.7_2e-1_ablation_20251010_072425.log +0 -0
- logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.9_2e-1_connector-1.0_0.9_2e-1_ablation_20251010_060544.log +0 -0
- logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.9_2e-1_connector-1.0_0.9_2e-1_ablation_20251010_080556.log +0 -0
- logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_1.1_2e-1_connector-1.0_1.1_2e-1_ablation_20251010_060651.log +0 -0
- logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_1.1_2e-1_connector-1.0_1.1_2e-1_ablation_20251010_084100.log +0 -0
- logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_1.3_2e-1_connector-1.0_1.3_2e-1_ablation_20251010_060752.log +4 -0
- logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_1.3_2e-1_connector-1.0_1.3_2e-1_ablation_20251010_091502.log +0 -0
- logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_1.5_2e-1_connector-1.0_1.5_2e-1_ablation_20251010_095352.log +0 -0
- logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_1.7_2e-1_connector-1.0_1.7_2e-1_ablation_20251010_102831.log +0 -0
- logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_1.9_2e-1_connector-1.0_1.9_2e-1_ablation_20251010_162102.log +169 -0
- logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1_connector-3.0_0.5_1_ablation_20251010_053812.log +0 -0
- logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1_connector-3.0_0.5_1_ablation_20251010_055702.log +312 -0
- logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1_connector-3.0_0.5_1_ablation_20251010_055853.log +0 -0
- logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1_connector-3.0_0.5_1_ablation_20251010_060227.log +0 -0
- logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1e-1_connector-3.0_0.5_1e-1_ablation_20251010_091529.log +0 -0
- logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_3_connector-3.0_0.5_3_ablation_20251010_055759.log +225 -0
- logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_3_connector-3.0_0.5_3_ablation_20251010_060004.log +0 -0
- logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_3_connector-3.0_0.5_3_ablation_20251010_063715.log +0 -0
- logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_3e-1_connector-3.0_0.5_3e-1_ablation_20251010_095404.log +0 -0
- logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_5_connector-3.0_0.5_5_ablation_20251010_060110.log +0 -0
- logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_5_connector-3.0_0.5_5_ablation_20251010_072428.log +0 -0
- logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_5e-1_connector-3.0_0.5_5e-1_ablation_20251010_102903.log +0 -0
- logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_7_connector-3.0_0.5_7_ablation_20251010_060221.log +7 -0
- logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_7_connector-3.0_0.5_7_ablation_20251010_080549.log +0 -0
- logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_9_connector-3.0_0.5_9_ablation_20251010_084110.log +0 -0
- logs_oct9/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1_connector-3.0_0.5_1_ablation_20251009_043900.log +0 -0
- logs_oct9/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1e-1_connector-3.0_0.5_1e-1_ablation_20251009_073254.log +0 -0
- logs_oct9/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1e-2_connector-3.0_0.5_1e-2_ablation_20251009_105701.log +0 -0
- logs_oct9/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_3_connector-3.0_0.5_3_ablation_20251009_051327.log +0 -0
- logs_oct9/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_3e-1_connector-3.0_0.5_3e-1_ablation_20251009_081157.log +0 -0
- logs_oct9/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_3e-2_connector-3.0_0.5_3e-2_ablation_20251009_113104.log +0 -0
- logs_oct9/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_5_connector-3.0_0.5_5_ablation_20251009_054857.log +0 -0
- logs_oct9/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_5e-1_connector-3.0_0.5_5e-1_ablation_20251009_085249.log +0 -0
- logs_oct9/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_7_connector-3.0_0.5_7_ablation_20251009_062342.log +0 -0
- logs_oct9/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_7e-1_connector-3.0_0.5_7e-1_ablation_20251009_094325.log +0 -0
- logs_oct9/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_9_connector-3.0_0.5_9_ablation_20251009_065816.log +0 -0
- logs_oct9/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_9e-1_connector-3.0_0.5_9e-1_ablation_20251009_102218.log +0 -0
.gitattributes
CHANGED
|
@@ -8,8 +8,6 @@
|
|
| 8 |
*.h5 filter=lfs diff=lfs merge=lfs -text
|
| 9 |
*.joblib filter=lfs diff=lfs merge=lfs -text
|
| 10 |
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
| 11 |
-
*.lz4 filter=lfs diff=lfs merge=lfs -text
|
| 12 |
-
*.mds filter=lfs diff=lfs merge=lfs -text
|
| 13 |
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
| 14 |
*.model filter=lfs diff=lfs merge=lfs -text
|
| 15 |
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
|
@@ -35,25 +33,3 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 35 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 36 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 37 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 38 |
-
# Audio files - uncompressed
|
| 39 |
-
*.pcm filter=lfs diff=lfs merge=lfs -text
|
| 40 |
-
*.sam filter=lfs diff=lfs merge=lfs -text
|
| 41 |
-
*.raw filter=lfs diff=lfs merge=lfs -text
|
| 42 |
-
# Audio files - compressed
|
| 43 |
-
*.aac filter=lfs diff=lfs merge=lfs -text
|
| 44 |
-
*.flac filter=lfs diff=lfs merge=lfs -text
|
| 45 |
-
*.mp3 filter=lfs diff=lfs merge=lfs -text
|
| 46 |
-
*.ogg filter=lfs diff=lfs merge=lfs -text
|
| 47 |
-
*.wav filter=lfs diff=lfs merge=lfs -text
|
| 48 |
-
# Image files - uncompressed
|
| 49 |
-
*.bmp filter=lfs diff=lfs merge=lfs -text
|
| 50 |
-
*.gif filter=lfs diff=lfs merge=lfs -text
|
| 51 |
-
*.png filter=lfs diff=lfs merge=lfs -text
|
| 52 |
-
*.tiff filter=lfs diff=lfs merge=lfs -text
|
| 53 |
-
# Image files - compressed
|
| 54 |
-
*.jpg filter=lfs diff=lfs merge=lfs -text
|
| 55 |
-
*.jpeg filter=lfs diff=lfs merge=lfs -text
|
| 56 |
-
*.webp filter=lfs diff=lfs merge=lfs -text
|
| 57 |
-
# Video files - compressed
|
| 58 |
-
*.mp4 filter=lfs diff=lfs merge=lfs -text
|
| 59 |
-
*.webm filter=lfs diff=lfs merge=lfs -text
|
|
|
|
| 8 |
*.h5 filter=lfs diff=lfs merge=lfs -text
|
| 9 |
*.joblib filter=lfs diff=lfs merge=lfs -text
|
| 10 |
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
| 11 |
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
| 12 |
*.model filter=lfs diff=lfs merge=lfs -text
|
| 13 |
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
|
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
README.md
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
title: Mft Log
|
| 3 |
+
emoji: 📚
|
| 4 |
+
colorFrom: pink
|
| 5 |
+
colorTo: pink
|
| 6 |
+
sdk: gradio
|
| 7 |
+
sdk_version: 5.49.1
|
| 8 |
+
app_file: app.py
|
| 9 |
+
pinned: false
|
| 10 |
+
---
|
| 11 |
+
|
| 12 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.3_2e-1_connector-1.0_0.3_2e-1_ablation_20251010_055757.log
ADDED
|
@@ -0,0 +1,236 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
==== STARTING EXPERIMENT: qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.3_2e-1_connector-1.0_0.3_2e-1_ablation ====
|
| 2 |
+
Log File: qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.3_2e-1_connector-1.0_0.3_2e-1_ablation_20251010_055757.log
|
| 3 |
+
Timestamp: 2025-10-10 05:57:57
|
| 4 |
+
=====================================
|
| 5 |
+
/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you.
|
| 6 |
+
import pynvml # type: ignore[import]
|
| 7 |
+
The cache for model files in Transformers v4.22.0 has been updated. Migrating your old cache. This is a one-time only operation. You can interrupt this and resume the migration later on by calling `transformers.utils.move_cache()`.
|
| 8 |
+
|
| 9 |
+
[2025-10-10 05:58:00,052] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect)
|
| 10 |
+
[2025-10-10 05:58:03,277] [WARNING] [runner.py:202:fetch_hostfile] Unable to find hostfile, will proceed with training with local resources only.
|
| 11 |
+
[2025-10-10 05:58:03,278] [INFO] [runner.py:568:main] cmd = /opt/conda/envs/tinyllava/bin/python3.10 -u -m deepspeed.launcher.launch --world_info=eyJsb2NhbGhvc3QiOiBbMCwgMSwgMiwgMywgNCwgNSwgNiwgN119 --master_addr=127.0.0.1 --master_port=29501 --enable_each_rank_log=None tinyllava/train/train.py --deepspeed ./scripts/zero3.json --data_path /nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json --image_folder /nfs/ywang29/tinyLLaVA/dataset --is_multimodal True --conv_version qwen2_base --model_name_or_path Qwen/Qwen2.5-0.5B --vision_tower google/siglip-so400m-patch14-384 --vision_tower2 --connector_type mlp2x_gelu --mm_vision_select_layer -2 --image_aspect_ratio square --attn_implementation flash_attention_2 --bf16 True --training_recipe common --tune_type_llm full --tune_type_vision_tower frozen --tune_vision_tower_from_layer 0 --tune_type_connector full --group_by_modality_length True --pretrained_model_path /nfs/ywang29/tinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain --output_dir /nfs/ywang29/tinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.3_2e-1_connector-1.0_0.3_2e-1_ablation --num_train_epochs 1 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --gradient_accumulation_steps 4 --evaluation_strategy no --learning_rate 2e-1 --weight_decay 0. --warmup_ratio 0.03 --lr_scheduler_type cosine --logging_steps 1 --tf32 False --model_max_length 2048 --gradient_checkpointing True --dataloader_num_workers 8 --lazy_preprocess True --report_to tensorboard --tokenizer_use_fast False --run_name tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune --subnet_mode_text both --subnet_type_text None --mask_type_text soft --init_mean_text 1.0 --temperature_attn_text 0.3 --temperature_mlp_text 0.3 --backward_type_text normal --masked_layers_text all --subnet_mode_vision both --subnet_type_vision None --mask_type_vision soft --init_mean_vision 1.0 --temperature_attn_vision 0.3 --temperature_mlp_vision 0.3 --backward_type_vision normal --masked_layers_vision all --subnet_type_connector global --mask_type_connector soft --init_mean_connector 1.0 --temperature_connector 0.3 --backward_type_connector normal --mm_projector_lr 2e-1 --seed 42 --mask_model llm-connector --save_strategy steps --save_steps 50000 --save_total_limit 1 --train_data_ratio 0.1
|
| 12 |
+
/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you.
|
| 13 |
+
import pynvml # type: ignore[import]
|
| 14 |
+
[2025-10-10 05:58:05,879] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect)
|
| 15 |
+
[2025-10-10 05:58:06,963] [INFO] [launch.py:138:main] 0 NCCL_VERSION=2.21.5
|
| 16 |
+
[2025-10-10 05:58:06,963] [INFO] [launch.py:138:main] 0 NCCL_SOCKET_IFNAME=eth
|
| 17 |
+
[2025-10-10 05:58:06,963] [INFO] [launch.py:145:main] WORLD INFO DICT: {'localhost': [0, 1, 2, 3, 4, 5, 6, 7]}
|
| 18 |
+
[2025-10-10 05:58:06,963] [INFO] [launch.py:151:main] nnodes=1, num_local_procs=8, node_rank=0
|
| 19 |
+
[2025-10-10 05:58:06,963] [INFO] [launch.py:162:main] global_rank_mapping=defaultdict(<class 'list'>, {'localhost': [0, 1, 2, 3, 4, 5, 6, 7]})
|
| 20 |
+
[2025-10-10 05:58:06,963] [INFO] [launch.py:163:main] dist_world_size=8
|
| 21 |
+
[2025-10-10 05:58:06,963] [INFO] [launch.py:165:main] Setting CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
|
| 22 |
+
[2025-10-10 05:58:06,966] [INFO] [launch.py:253:main] process 548567 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=0', '--deepspeed', './scripts/zero3.json', '--data_path', '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/nfs/ywang29/tinyLLaVA/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/tinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/tinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.3_2e-1_connector-1.0_0.3_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '0.3', '--temperature_mlp_text', '0.3', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '0.3', '--temperature_mlp_vision', '0.3', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '0.3', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1']
|
| 23 |
+
[2025-10-10 05:58:06,968] [INFO] [launch.py:253:main] process 548568 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=1', '--deepspeed', './scripts/zero3.json', '--data_path', '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/nfs/ywang29/tinyLLaVA/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/tinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/tinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.3_2e-1_connector-1.0_0.3_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '0.3', '--temperature_mlp_text', '0.3', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '0.3', '--temperature_mlp_vision', '0.3', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '0.3', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1']
|
| 24 |
+
[2025-10-10 05:58:06,970] [INFO] [launch.py:253:main] process 548569 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=2', '--deepspeed', './scripts/zero3.json', '--data_path', '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/nfs/ywang29/tinyLLaVA/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/tinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/tinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.3_2e-1_connector-1.0_0.3_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '0.3', '--temperature_mlp_text', '0.3', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '0.3', '--temperature_mlp_vision', '0.3', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '0.3', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1']
|
| 25 |
+
[2025-10-10 05:58:06,972] [INFO] [launch.py:253:main] process 548570 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=3', '--deepspeed', './scripts/zero3.json', '--data_path', '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/nfs/ywang29/tinyLLaVA/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/tinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/tinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.3_2e-1_connector-1.0_0.3_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '0.3', '--temperature_mlp_text', '0.3', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '0.3', '--temperature_mlp_vision', '0.3', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '0.3', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1']
|
| 26 |
+
[2025-10-10 05:58:06,973] [INFO] [launch.py:253:main] process 548571 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=4', '--deepspeed', './scripts/zero3.json', '--data_path', '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/nfs/ywang29/tinyLLaVA/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/tinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/tinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.3_2e-1_connector-1.0_0.3_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '0.3', '--temperature_mlp_text', '0.3', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '0.3', '--temperature_mlp_vision', '0.3', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '0.3', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1']
|
| 27 |
+
[2025-10-10 05:58:06,975] [INFO] [launch.py:253:main] process 548572 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=5', '--deepspeed', './scripts/zero3.json', '--data_path', '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/nfs/ywang29/tinyLLaVA/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/tinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/tinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.3_2e-1_connector-1.0_0.3_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '0.3', '--temperature_mlp_text', '0.3', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '0.3', '--temperature_mlp_vision', '0.3', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '0.3', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1']
|
| 28 |
+
[2025-10-10 05:58:06,977] [INFO] [launch.py:253:main] process 548573 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=6', '--deepspeed', './scripts/zero3.json', '--data_path', '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/nfs/ywang29/tinyLLaVA/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/tinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/tinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.3_2e-1_connector-1.0_0.3_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '0.3', '--temperature_mlp_text', '0.3', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '0.3', '--temperature_mlp_vision', '0.3', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '0.3', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1']
|
| 29 |
+
[2025-10-10 05:58:06,979] [INFO] [launch.py:253:main] process 548574 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=7', '--deepspeed', './scripts/zero3.json', '--data_path', '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/nfs/ywang29/tinyLLaVA/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/tinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/tinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.3_2e-1_connector-1.0_0.3_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '0.3', '--temperature_mlp_text', '0.3', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '0.3', '--temperature_mlp_vision', '0.3', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '0.3', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1']
|
| 30 |
+
/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you.
|
| 31 |
+
import pynvml # type: ignore[import]
|
| 32 |
+
/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you.
|
| 33 |
+
import pynvml # type: ignore[import]
|
| 34 |
+
/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you.
|
| 35 |
+
import pynvml # type: ignore[import]
|
| 36 |
+
/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you.
|
| 37 |
+
import pynvml # type: ignore[import]
|
| 38 |
+
/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you.
|
| 39 |
+
import pynvml # type: ignore[import]
|
| 40 |
+
/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you.
|
| 41 |
+
import pynvml # type: ignore[import]
|
| 42 |
+
/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you.
|
| 43 |
+
import pynvml # type: ignore[import]
|
| 44 |
+
/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you.
|
| 45 |
+
import pynvml # type: ignore[import]
|
| 46 |
+
[2025-10-10 05:58:13,755] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect)
|
| 47 |
+
[2025-10-10 05:58:13,756] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect)
|
| 48 |
+
[2025-10-10 05:58:13,756] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect)
|
| 49 |
+
[2025-10-10 05:58:13,763] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect)
|
| 50 |
+
[2025-10-10 05:58:13,775] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect)
|
| 51 |
+
[2025-10-10 05:58:13,775] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect)
|
| 52 |
+
[2025-10-10 05:58:13,804] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect)
|
| 53 |
+
[2025-10-10 05:58:13,838] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect)
|
| 54 |
+
[2025-10-10 05:58:14,271] [INFO] [comm.py:637:init_distributed] cdb=None
|
| 55 |
+
[2025-10-10 05:58:14,271] [INFO] [comm.py:637:init_distributed] cdb=None
|
| 56 |
+
[2025-10-10 05:58:14,271] [INFO] [comm.py:637:init_distributed] cdb=None
|
| 57 |
+
[2025-10-10 05:58:14,271] [INFO] [comm.py:637:init_distributed] cdb=None
|
| 58 |
+
[2025-10-10 05:58:14,271] [INFO] [comm.py:637:init_distributed] cdb=None
|
| 59 |
+
[2025-10-10 05:58:14,271] [INFO] [comm.py:637:init_distributed] cdb=None
|
| 60 |
+
[2025-10-10 05:58:14,271] [INFO] [comm.py:637:init_distributed] cdb=None
|
| 61 |
+
[2025-10-10 05:58:14,271] [INFO] [comm.py:668:init_distributed] Initializing TorchBackend in DeepSpeed with backend nccl
|
| 62 |
+
[2025-10-10 05:58:14,271] [INFO] [comm.py:637:init_distributed] cdb=None
|
| 63 |
+
Apply masks for the following modules: ['llm', 'connector']
|
| 64 |
+
/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`.
|
| 65 |
+
warnings.warn(
|
| 66 |
+
Apply masks for the following modules: ['llm', 'connector']
|
| 67 |
+
{'llm': {'model_name_or_path': 'Qwen/Qwen2.5-0.5B', 'cache_dir': None, 'attn_implementation': 'flash_attention_2', 'subnet_mode': 'both', 'subnet_type': 'None', 'sparsity_attn': None, 'sparsity_mlp': None, 'threshold_attn': None, 'threshold_mlp': None, 'temperature_attn': 0.3, 'temperature_mlp': 0.3, 'masked_layers': 'all', 'mask_type': 'soft', 'backward_type': 'normal'}, 'vision_tower': {'model_name_or_path': 'google/siglip-so400m-patch14-384'}, 'connector': {'connector_type': 'mlp2x_gelu', 'subnet_type': 'global', 'threshold': None, 'sparsity': None, 'temperature': 0.3, 'mask_type': 'soft', 'backward_type': 'normal'}}
|
| 68 |
+
/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`.
|
| 69 |
+
warnings.warn(
|
| 70 |
+
Apply masks for the following modules: ['llm', 'connector']
|
| 71 |
+
Apply masks for the following modules: ['llm', 'connector']
|
| 72 |
+
/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`.
|
| 73 |
+
warnings.warn(
|
| 74 |
+
/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`.
|
| 75 |
+
warnings.warn(
|
| 76 |
+
Apply masks for the following modules: ['llm', 'connector']
|
| 77 |
+
Apply masks for the following modules: ['llm', 'connector']
|
| 78 |
+
/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`.
|
| 79 |
+
warnings.warn(
|
| 80 |
+
/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`.
|
| 81 |
+
warnings.warn(
|
| 82 |
+
Apply masks for the following modules: ['llm', 'connector']
|
| 83 |
+
Apply masks for the following modules: ['llm', 'connector']
|
| 84 |
+
/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`.
|
| 85 |
+
warnings.warn(
|
| 86 |
+
/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`.
|
| 87 |
+
warnings.warn(
|
| 88 |
+
TinyLlavaConfig {
|
| 89 |
+
"backward_type_connector": "normal",
|
| 90 |
+
"cache_dir": null,
|
| 91 |
+
"connector_type": "mlp2x_gelu",
|
| 92 |
+
"hidden_size": 896,
|
| 93 |
+
"ignore_index": -100,
|
| 94 |
+
"image_aspect_ratio": "square",
|
| 95 |
+
"image_token_index": -200,
|
| 96 |
+
"llm_model_name_or_path": "Qwen/Qwen2.5-0.5B",
|
| 97 |
+
"mask_model": [
|
| 98 |
+
"llm",
|
| 99 |
+
"connector"
|
| 100 |
+
],
|
| 101 |
+
"mask_type_connector": "soft",
|
| 102 |
+
"model_type": "tinyllava",
|
| 103 |
+
"num_queries": 128,
|
| 104 |
+
"num_resampler_layers": 3,
|
| 105 |
+
"pad_token": null,
|
| 106 |
+
"resampler_hidden_size": 768,
|
| 107 |
+
"sparsity_connector": null,
|
| 108 |
+
"subnet_type_connector": "global",
|
| 109 |
+
"temperature_connector": 0.3,
|
| 110 |
+
"text_config": {
|
| 111 |
+
"_name_or_path": "Qwen/Qwen2.5-0.5B",
|
| 112 |
+
"architectures": [
|
| 113 |
+
"Qwen2ForCausalLM"
|
| 114 |
+
],
|
| 115 |
+
"backward_type": "normal",
|
| 116 |
+
"bos_token_id": 151643,
|
| 117 |
+
"eos_token_id": 151643,
|
| 118 |
+
"hidden_size": 896,
|
| 119 |
+
"intermediate_size": 4864,
|
| 120 |
+
"mask_type": "soft",
|
| 121 |
+
"masked_layers": "all",
|
| 122 |
+
"max_position_embeddings": 32768,
|
| 123 |
+
"max_window_layers": 24,
|
| 124 |
+
"model_type": "qwen2",
|
| 125 |
+
"num_attention_heads": 14,
|
| 126 |
+
"num_hidden_layers": 24,
|
| 127 |
+
"num_key_value_heads": 2,
|
| 128 |
+
"rope_theta": 1000000.0,
|
| 129 |
+
"sliding_window": 32768,
|
| 130 |
+
"subnet_mode": "both",
|
| 131 |
+
"subnet_type": "None",
|
| 132 |
+
"temperature_attn": 0.3,
|
| 133 |
+
"temperature_mlp": 0.3,
|
| 134 |
+
"tie_word_embeddings": true,
|
| 135 |
+
"torch_dtype": "bfloat16",
|
| 136 |
+
"use_mrope": false,
|
| 137 |
+
"use_sliding_window": false,
|
| 138 |
+
"vocab_size": 151936
|
| 139 |
+
},
|
| 140 |
+
"threshold_connector": null,
|
| 141 |
+
"tokenizer_model_max_length": 2048,
|
| 142 |
+
"tokenizer_name_or_path": "Qwen/Qwen2.5-0.5B",
|
| 143 |
+
"tokenizer_padding_side": "right",
|
| 144 |
+
"tokenizer_use_fast": false,
|
| 145 |
+
"transformers_version": "4.40.1",
|
| 146 |
+
"tune_type_connector": "frozen",
|
| 147 |
+
"tune_type_llm": "frozen",
|
| 148 |
+
"tune_type_vision_tower": "frozen",
|
| 149 |
+
"tune_vision_tower_from_layer": -1,
|
| 150 |
+
"use_cache": false,
|
| 151 |
+
"vision_config": {
|
| 152 |
+
"hidden_act": "gelu_pytorch_tanh",
|
| 153 |
+
"hidden_size": 1152,
|
| 154 |
+
"image_size": 384,
|
| 155 |
+
"intermediate_size": 4304,
|
| 156 |
+
"layer_norm_eps": 1e-06,
|
| 157 |
+
"model_name_or_path": "google/siglip-so400m-patch14-384",
|
| 158 |
+
"model_name_or_path2": "",
|
| 159 |
+
"model_type": "siglip_vision_model",
|
| 160 |
+
"num_attention_heads": 16,
|
| 161 |
+
"num_hidden_layers": 27,
|
| 162 |
+
"patch_size": 14
|
| 163 |
+
},
|
| 164 |
+
"vision_feature_layer": -2,
|
| 165 |
+
"vision_feature_select_strategy": "patch",
|
| 166 |
+
"vision_hidden_size": 1152,
|
| 167 |
+
"vision_model_name_or_path": "google/siglip-so400m-patch14-384",
|
| 168 |
+
"vision_model_name_or_path2": "",
|
| 169 |
+
"vocab_size": 151936
|
| 170 |
+
}
|
| 171 |
+
|
| 172 |
+
Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained.
|
| 173 |
+
Traceback (most recent call last):
|
| 174 |
+
File "/nfs/ywang29/TinyLLaVA/transformers/src/transformers/utils/hub.py", line 398, in cached_file
|
| 175 |
+
resolved_file = hf_hub_download(
|
| 176 |
+
File "/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/utils/_validators.py", line 106, in _inner_fn
|
| 177 |
+
validate_repo_id(arg_value)
|
| 178 |
+
File "/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/utils/_validators.py", line 154, in validate_repo_id
|
| 179 |
+
raise HFValidationError(
|
| 180 |
+
huggingface_hub.errors.HFValidationError: Repo id must be in the form 'repo_name' or 'namespace/repo_name': '/nfs/ywang29/tinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model'. Use `repo_type` argument if needed.
|
| 181 |
+
|
| 182 |
+
The above exception was the direct cause of the following exception:
|
| 183 |
+
|
| 184 |
+
Traceback (most recent call last):
|
| 185 |
+
File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 193, in <module>
|
| 186 |
+
train()
|
| 187 |
+
File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 149, in train
|
| 188 |
+
model = training_recipe.load(model, model_args)
|
| 189 |
+
File "/nfs/ywang29/TinyLLaVA/tinyllava/training_recipe/base.py", line 144, in load
|
| 190 |
+
model.load_llm(**model_args['llm'])
|
| 191 |
+
File "/nfs/ywang29/TinyLLaVA/tinyllava/model/modeling_tinyllava.py", line 360, in load_llm
|
| 192 |
+
self.language_model = self.language_model.from_pretrained(
|
| 193 |
+
File "/nfs/ywang29/TinyLLaVA/transformers/src/transformers/modeling_utils.py", line 3015, in from_pretrained
|
| 194 |
+
resolved_config_file = cached_file(
|
| 195 |
+
File "/nfs/ywang29/TinyLLaVA/transformers/src/transformers/utils/hub.py", line 462, in cached_file
|
| 196 |
+
raise EnvironmentError(
|
| 197 |
+
OSError: Incorrect path_or_model_id: '/nfs/ywang29/tinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model'. Please provide either the path to a local folder or the repo_id of a model on the Hub.
|
| 198 |
+
[2025-10-10 05:58:46,032] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 548567
|
| 199 |
+
[2025-10-10 05:58:46,418] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 548568
|
| 200 |
+
[2025-10-10 05:58:46,881] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 548569
|
| 201 |
+
[2025-10-10 05:58:47,265] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 548570
|
| 202 |
+
[2025-10-10 05:58:47,683] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 548571
|
| 203 |
+
[2025-10-10 05:58:48,100] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 548572
|
| 204 |
+
Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained.
|
| 205 |
+
Traceback (most recent call last):
|
| 206 |
+
File "/nfs/ywang29/TinyLLaVA/transformers/src/transformers/utils/hub.py", line 398, in cached_file
|
| 207 |
+
resolved_file = hf_hub_download(
|
| 208 |
+
File "/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/utils/_validators.py", line 106, in _inner_fn
|
| 209 |
+
validate_repo_id(arg_value)
|
| 210 |
+
File "/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/utils/_validators.py", line 154, in validate_repo_id
|
| 211 |
+
raise HFValidationError(
|
| 212 |
+
huggingface_hub.errors.HFValidationError: Repo id must be in the form 'repo_name' or 'namespace/repo_name': '/nfs/ywang29/tinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model'. Use `repo_type` argument if needed.
|
| 213 |
+
|
| 214 |
+
The above exception was the direct cause of the following exception:
|
| 215 |
+
|
| 216 |
+
Traceback (most recent call last):
|
| 217 |
+
File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 193, in <module>
|
| 218 |
+
train()
|
| 219 |
+
File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 149, in train
|
| 220 |
+
model = training_recipe.load(model, model_args)
|
| 221 |
+
File "/nfs/ywang29/TinyLLaVA/tinyllava/training_recipe/base.py", line 144, in load
|
| 222 |
+
model.load_llm(**model_args['llm'])
|
| 223 |
+
File "/nfs/ywang29/TinyLLaVA/tinyllava/model/modeling_tinyllava.py", line 360, in load_llm
|
| 224 |
+
self.language_model = self.language_model.from_pretrained(
|
| 225 |
+
File "/nfs/ywang29/TinyLLaVA/transformers/src/transformers/modeling_utils.py", line 3015, in from_pretrained
|
| 226 |
+
[2025-10-10 05:58:48,477] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 548573
|
| 227 |
+
resolved_config_file = cached_file(
|
| 228 |
+
File "/nfs/ywang29/TinyLLaVA/transformers/src/transformers/utils/hub.py", line 462, in cached_file
|
| 229 |
+
raise EnvironmentError(
|
| 230 |
+
OSError: Incorrect path_or_model_id: '/nfs/ywang29/tinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model'. Please provide either the path to a local folder or the repo_id of a model on the Hub.
|
| 231 |
+
[2025-10-10 05:58:48,893] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 548574
|
| 232 |
+
[2025-10-10 05:58:48,894] [ERROR] [launch.py:322:sigkill_handler] ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=7', '--deepspeed', './scripts/zero3.json', '--data_path', '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/nfs/ywang29/tinyLLaVA/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/tinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/tinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.3_2e-1_connector-1.0_0.3_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '0.3', '--temperature_mlp_text', '0.3', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '0.3', '--temperature_mlp_vision', '0.3', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '0.3', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] exits with return code = 1
|
| 233 |
+
==== EXPERIMENT COMPLETED: qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.3_2e-1_connector-1.0_0.3_2e-1_ablation ====
|
| 234 |
+
Log File: qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.3_2e-1_connector-1.0_0.3_2e-1_ablation_20251010_055757.log
|
| 235 |
+
Timestamp: 2025-10-10 05:58:50
|
| 236 |
+
=====================================
|
logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.3_2e-1_connector-1.0_0.3_2e-1_ablation_20251010_055857.log
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.3_2e-1_connector-1.0_0.3_2e-1_ablation_20251010_060221.log
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.3_2e-1_connector-1.0_0.3_2e-1_ablation_20251010_060753.log
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.3_2e-1_connector-1.0_0.3_2e-1_ablation_20251010_164158.log
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
==== STARTING EXPERIMENT: qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.3_2e-1_connector-1.0_0.3_2e-1_ablation ====
|
| 2 |
+
Log File: qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.3_2e-1_connector-1.0_0.3_2e-1_ablation_20251010_164158.log
|
| 3 |
+
Timestamp: 2025-10-10 16:41:58
|
| 4 |
+
=====================================
|
| 5 |
+
/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you.
|
| 6 |
+
import pynvml # type: ignore[import]
|
| 7 |
+
[2025-10-10 16:42:00,884] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect)
|
logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.5_2e-1_connector-1.0_0.5_2e-1_ablation_20251010_055850.log
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
==== STARTING EXPERIMENT: qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.5_2e-1_connector-1.0_0.5_2e-1_ablation ====
|
| 2 |
+
Log File: qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.5_2e-1_connector-1.0_0.5_2e-1_ablation_20251010_055850.log
|
| 3 |
+
Timestamp: 2025-10-10 05:58:50
|
| 4 |
+
=====================================
|
| 5 |
+
/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you.
|
| 6 |
+
import pynvml # type: ignore[import]
|
| 7 |
+
[2025-10-10 05:58:53,031] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect)
|
| 8 |
+
[2025-10-10 05:58:55,712] [WARNING] [runner.py:202:fetch_hostfile] Unable to find hostfile, will proceed with training with local resources only.
|
| 9 |
+
[2025-10-10 05:58:55,713] [INFO] [runner.py:568:main] cmd = /opt/conda/envs/tinyllava/bin/python3.10 -u -m deepspeed.launcher.launch --world_info=eyJsb2NhbGhvc3QiOiBbMCwgMSwgMiwgMywgNCwgNSwgNiwgN119 --master_addr=127.0.0.1 --master_port=29501 --enable_each_rank_log=None tinyllava/train/train.py --deepspeed ./scripts/zero3.json --data_path /nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json --image_folder /nfs/ywang29/tinyLLaVA/dataset --is_multimodal True --conv_version qwen2_base --model_name_or_path Qwen/Qwen2.5-0.5B --vision_tower google/siglip-so400m-patch14-384 --vision_tower2 --connector_type mlp2x_gelu --mm_vision_select_layer -2 --image_aspect_ratio square --attn_implementation flash_attention_2 --bf16 True --training_recipe common --tune_type_llm full --tune_type_vision_tower frozen --tune_vision_tower_from_layer 0 --tune_type_connector full --group_by_modality_length True --pretrained_model_path /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain --output_dir /nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.5_2e-1_connector-1.0_0.5_2e-1_ablation --num_train_epochs 1 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --gradient_accumulation_steps 4 --evaluation_strategy no --learning_rate 2e-1 --weight_decay 0. --warmup_ratio 0.03 --lr_scheduler_type cosine --logging_steps 1 --tf32 False --model_max_length 2048 --gradient_checkpointing True --dataloader_num_workers 8 --lazy_preprocess True --report_to tensorboard --tokenizer_use_fast False --run_name tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune --subnet_mode_text both --subnet_type_text None --mask_type_text soft --init_mean_text 1.0 --temperature_attn_text 0.5 --temperature_mlp_text 0.5 --backward_type_text normal --masked_layers_text all --subnet_mode_vision both --subnet_type_vision None --mask_type_vision soft --init_mean_vision 1.0 --temperature_attn_vision 0.5 --temperature_mlp_vision 0.5 --backward_type_vision normal --masked_layers_vision all --subnet_type_connector global --mask_type_connector soft --init_mean_connector 1.0 --temperature_connector 0.5 --backward_type_connector normal --mm_projector_lr 2e-1 --seed 42 --mask_model llm-connector --save_strategy steps --save_steps 50000 --save_total_limit 1 --train_data_ratio 0.1
|
logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.5_2e-1_connector-1.0_0.5_2e-1_ablation_20251010_060007.log
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.5_2e-1_connector-1.0_0.5_2e-1_ablation_20251010_060330.log
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.5_2e-1_connector-1.0_0.5_2e-1_ablation_20251010_064225.log
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.7_2e-1_connector-1.0_0.7_2e-1_ablation_20251010_060116.log
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
==== STARTING EXPERIMENT: qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.7_2e-1_connector-1.0_0.7_2e-1_ablation ====
|
| 2 |
+
Log File: qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.7_2e-1_connector-1.0_0.7_2e-1_ablation_20251010_060116.log
|
| 3 |
+
Timestamp: 2025-10-10 06:01:16
|
| 4 |
+
=====================================
|
| 5 |
+
/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you.
|
| 6 |
+
import pynvml # type: ignore[import]
|
| 7 |
+
[2025-10-10 06:01:19,451] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect)
|
| 8 |
+
[2025-10-10 06:01:22,190] [WARNING] [runner.py:202:fetch_hostfile] Unable to find hostfile, will proceed with training with local resources only.
|
| 9 |
+
[2025-10-10 06:01:22,192] [INFO] [runner.py:568:main] cmd = /opt/conda/envs/tinyllava/bin/python3.10 -u -m deepspeed.launcher.launch --world_info=eyJsb2NhbGhvc3QiOiBbMCwgMSwgMiwgMywgNCwgNSwgNiwgN119 --master_addr=127.0.0.1 --master_port=29501 --enable_each_rank_log=None tinyllava/train/train.py --deepspeed ./scripts/zero3.json --data_path /nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json --image_folder /nfs/ywang29/tinyLLaVA/dataset --is_multimodal True --conv_version qwen2_base --model_name_or_path Qwen/Qwen2.5-0.5B --vision_tower google/siglip-so400m-patch14-384 --vision_tower2 --connector_type mlp2x_gelu --mm_vision_select_layer -2 --image_aspect_ratio square --attn_implementation flash_attention_2 --bf16 True --training_recipe common --tune_type_llm full --tune_type_vision_tower frozen --tune_vision_tower_from_layer 0 --tune_type_connector full --group_by_modality_length True --pretrained_model_path /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain --output_dir /nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.7_2e-1_connector-1.0_0.7_2e-1_ablation --num_train_epochs 1 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --gradient_accumulation_steps 4 --evaluation_strategy no --learning_rate 2e-1 --weight_decay 0. --warmup_ratio 0.03 --lr_scheduler_type cosine --logging_steps 1 --tf32 False --model_max_length 2048 --gradient_checkpointing True --dataloader_num_workers 8 --lazy_preprocess True --report_to tensorboard --tokenizer_use_fast False --run_name tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune --subnet_mode_text both --subnet_type_text None --mask_type_text soft --init_mean_text 1.0 --temperature_attn_text 0.7 --temperature_mlp_text 0.7 --backward_type_text normal --masked_layers_text all --subnet_mode_vision both --subnet_type_vision None --mask_type_vision soft --init_mean_vision 1.0 --temperature_attn_vision 0.7 --temperature_mlp_vision 0.7 --backward_type_vision normal --masked_layers_vision all --subnet_type_connector global --mask_type_connector soft --init_mean_connector 1.0 --temperature_connector 0.7 --backward_type_connector normal --mm_projector_lr 2e-1 --seed 42 --mask_model llm-connector --save_strategy steps --save_steps 50000 --save_total_limit 1 --train_data_ratio 0.1
|
| 10 |
+
/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you.
|
| 11 |
+
import pynvml # type: ignore[import]
|
logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.7_2e-1_connector-1.0_0.7_2e-1_ablation_20251010_060440.log
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.7_2e-1_connector-1.0_0.7_2e-1_ablation_20251010_072425.log
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.9_2e-1_connector-1.0_0.9_2e-1_ablation_20251010_060544.log
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.9_2e-1_connector-1.0_0.9_2e-1_ablation_20251010_080556.log
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_1.1_2e-1_connector-1.0_1.1_2e-1_ablation_20251010_060651.log
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_1.1_2e-1_connector-1.0_1.1_2e-1_ablation_20251010_084100.log
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_1.3_2e-1_connector-1.0_1.3_2e-1_ablation_20251010_060752.log
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
==== STARTING EXPERIMENT: qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_1.3_2e-1_connector-1.0_1.3_2e-1_ablation ====
|
| 2 |
+
Log File: qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_1.3_2e-1_connector-1.0_1.3_2e-1_ablation_20251010_060752.log
|
| 3 |
+
Timestamp: 2025-10-10 06:07:52
|
| 4 |
+
=====================================
|
logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_1.3_2e-1_connector-1.0_1.3_2e-1_ablation_20251010_091502.log
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_1.5_2e-1_connector-1.0_1.5_2e-1_ablation_20251010_095352.log
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_1.7_2e-1_connector-1.0_1.7_2e-1_ablation_20251010_102831.log
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_1.9_2e-1_connector-1.0_1.9_2e-1_ablation_20251010_162102.log
ADDED
|
@@ -0,0 +1,169 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
==== STARTING EXPERIMENT: qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_1.9_2e-1_connector-1.0_1.9_2e-1_ablation ====
|
| 2 |
+
Log File: qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_1.9_2e-1_connector-1.0_1.9_2e-1_ablation_20251010_162102.log
|
| 3 |
+
Timestamp: 2025-10-10 16:21:02
|
| 4 |
+
=====================================
|
| 5 |
+
/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you.
|
| 6 |
+
import pynvml # type: ignore[import]
|
| 7 |
+
[2025-10-10 16:21:04,987] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect)
|
| 8 |
+
[2025-10-10 16:21:08,361] [WARNING] [runner.py:202:fetch_hostfile] Unable to find hostfile, will proceed with training with local resources only.
|
| 9 |
+
[2025-10-10 16:21:08,363] [INFO] [runner.py:568:main] cmd = /opt/conda/envs/tinyllava/bin/python3.10 -u -m deepspeed.launcher.launch --world_info=eyJsb2NhbGhvc3QiOiBbMCwgMSwgMiwgMywgNCwgNSwgNiwgN119 --master_addr=127.0.0.1 --master_port=29501 --enable_each_rank_log=None tinyllava/train/train.py --deepspeed ./scripts/zero3.json --data_path /s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json --image_folder /s3-code/ywang29/datasets/tinyllava --is_multimodal True --conv_version qwen2_base --model_name_or_path Qwen/Qwen2.5-0.5B --vision_tower google/siglip-so400m-patch14-384 --vision_tower2 --connector_type mlp2x_gelu --mm_vision_select_layer -2 --image_aspect_ratio square --attn_implementation flash_attention_2 --bf16 True --training_recipe common --tune_type_llm full --tune_type_vision_tower frozen --tune_vision_tower_from_layer 0 --tune_type_connector full --group_by_modality_length True --pretrained_model_path /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain --output_dir /nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_1.9_2e-1_connector-1.0_1.9_2e-1_ablation --num_train_epochs 1 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --gradient_accumulation_steps 4 --evaluation_strategy no --learning_rate 2e-1 --weight_decay 0. --warmup_ratio 0.03 --lr_scheduler_type cosine --logging_steps 1 --tf32 False --model_max_length 2048 --gradient_checkpointing True --dataloader_num_workers 8 --lazy_preprocess True --report_to tensorboard --tokenizer_use_fast False --run_name tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune --subnet_mode_text both --subnet_type_text None --mask_type_text soft --init_mean_text 1.0 --temperature_attn_text 1.9 --temperature_mlp_text 1.9 --backward_type_text normal --masked_layers_text all --subnet_mode_vision both --subnet_type_vision None --mask_type_vision soft --init_mean_vision 1.0 --temperature_attn_vision 1.9 --temperature_mlp_vision 1.9 --backward_type_vision normal --masked_layers_vision all --subnet_type_connector global --mask_type_connector soft --init_mean_connector 1.0 --temperature_connector 1.9 --backward_type_connector normal --mm_projector_lr 2e-1 --seed 42 --mask_model llm-connector --save_strategy steps --save_steps 50000 --save_total_limit 1 --train_data_ratio 0.1
|
| 10 |
+
/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you.
|
| 11 |
+
import pynvml # type: ignore[import]
|
| 12 |
+
[2025-10-10 16:21:10,949] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect)
|
| 13 |
+
[2025-10-10 16:21:12,019] [INFO] [launch.py:138:main] 0 NCCL_VERSION=2.21.5
|
| 14 |
+
[2025-10-10 16:21:12,019] [INFO] [launch.py:138:main] 0 NCCL_SOCKET_IFNAME=eth
|
| 15 |
+
[2025-10-10 16:21:12,020] [INFO] [launch.py:145:main] WORLD INFO DICT: {'localhost': [0, 1, 2, 3, 4, 5, 6, 7]}
|
| 16 |
+
[2025-10-10 16:21:12,020] [INFO] [launch.py:151:main] nnodes=1, num_local_procs=8, node_rank=0
|
| 17 |
+
[2025-10-10 16:21:12,020] [INFO] [launch.py:162:main] global_rank_mapping=defaultdict(<class 'list'>, {'localhost': [0, 1, 2, 3, 4, 5, 6, 7]})
|
| 18 |
+
[2025-10-10 16:21:12,020] [INFO] [launch.py:163:main] dist_world_size=8
|
| 19 |
+
[2025-10-10 16:21:12,020] [INFO] [launch.py:165:main] Setting CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
|
| 20 |
+
[2025-10-10 16:21:12,022] [INFO] [launch.py:253:main] process 780015 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=0', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_1.9_2e-1_connector-1.0_1.9_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '1.9', '--temperature_mlp_text', '1.9', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '1.9', '--temperature_mlp_vision', '1.9', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '1.9', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1']
|
| 21 |
+
[2025-10-10 16:21:12,024] [INFO] [launch.py:253:main] process 780016 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=1', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_1.9_2e-1_connector-1.0_1.9_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '1.9', '--temperature_mlp_text', '1.9', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '1.9', '--temperature_mlp_vision', '1.9', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '1.9', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1']
|
| 22 |
+
[2025-10-10 16:21:12,026] [INFO] [launch.py:253:main] process 780017 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=2', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_1.9_2e-1_connector-1.0_1.9_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '1.9', '--temperature_mlp_text', '1.9', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '1.9', '--temperature_mlp_vision', '1.9', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '1.9', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1']
|
| 23 |
+
[2025-10-10 16:21:12,028] [INFO] [launch.py:253:main] process 780018 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=3', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_1.9_2e-1_connector-1.0_1.9_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '1.9', '--temperature_mlp_text', '1.9', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '1.9', '--temperature_mlp_vision', '1.9', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '1.9', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1']
|
| 24 |
+
[2025-10-10 16:21:12,030] [INFO] [launch.py:253:main] process 780019 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=4', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_1.9_2e-1_connector-1.0_1.9_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '1.9', '--temperature_mlp_text', '1.9', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '1.9', '--temperature_mlp_vision', '1.9', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '1.9', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1']
|
| 25 |
+
[2025-10-10 16:21:12,032] [INFO] [launch.py:253:main] process 780020 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=5', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_1.9_2e-1_connector-1.0_1.9_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '1.9', '--temperature_mlp_text', '1.9', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '1.9', '--temperature_mlp_vision', '1.9', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '1.9', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1']
|
| 26 |
+
[2025-10-10 16:21:12,034] [INFO] [launch.py:253:main] process 780021 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=6', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_1.9_2e-1_connector-1.0_1.9_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '1.9', '--temperature_mlp_text', '1.9', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '1.9', '--temperature_mlp_vision', '1.9', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '1.9', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1']
|
| 27 |
+
[2025-10-10 16:21:12,036] [INFO] [launch.py:253:main] process 780022 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=7', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_1.9_2e-1_connector-1.0_1.9_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '1.9', '--temperature_mlp_text', '1.9', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '1.9', '--temperature_mlp_vision', '1.9', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '1.9', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1']
|
| 28 |
+
/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you.
|
| 29 |
+
import pynvml # type: ignore[import]
|
| 30 |
+
/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you.
|
| 31 |
+
import pynvml # type: ignore[import]
|
| 32 |
+
/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you.
|
| 33 |
+
import pynvml # type: ignore[import]
|
| 34 |
+
/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you.
|
| 35 |
+
import pynvml # type: ignore[import]
|
| 36 |
+
/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you.
|
| 37 |
+
import pynvml # type: ignore[import]
|
| 38 |
+
/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you.
|
| 39 |
+
import pynvml # type: ignore[import]
|
| 40 |
+
/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you.
|
| 41 |
+
import pynvml # type: ignore[import]
|
| 42 |
+
/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you.
|
| 43 |
+
import pynvml # type: ignore[import]
|
| 44 |
+
[2025-10-10 16:21:18,873] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect)
|
| 45 |
+
[2025-10-10 16:21:19,003] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect)
|
| 46 |
+
[2025-10-10 16:21:19,097] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect)
|
| 47 |
+
[2025-10-10 16:21:19,190] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect)
|
| 48 |
+
[2025-10-10 16:21:19,217] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect)
|
| 49 |
+
[2025-10-10 16:21:19,225] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect)
|
| 50 |
+
[2025-10-10 16:21:19,227] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect)
|
| 51 |
+
[2025-10-10 16:21:19,230] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect)
|
| 52 |
+
[2025-10-10 16:21:19,397] [INFO] [comm.py:637:init_distributed] cdb=None
|
| 53 |
+
[2025-10-10 16:21:19,406] [INFO] [comm.py:637:init_distributed] cdb=None
|
| 54 |
+
[2025-10-10 16:21:19,494] [INFO] [comm.py:637:init_distributed] cdb=None
|
| 55 |
+
[2025-10-10 16:21:19,589] [INFO] [comm.py:637:init_distributed] cdb=None
|
| 56 |
+
[2025-10-10 16:21:19,589] [INFO] [comm.py:668:init_distributed] Initializing TorchBackend in DeepSpeed with backend nccl
|
| 57 |
+
[2025-10-10 16:21:19,619] [INFO] [comm.py:637:init_distributed] cdb=None
|
| 58 |
+
[2025-10-10 16:21:19,626] [INFO] [comm.py:637:init_distributed] cdb=None
|
| 59 |
+
[2025-10-10 16:21:19,632] [INFO] [comm.py:637:init_distributed] cdb=None
|
| 60 |
+
[2025-10-10 16:21:19,633] [INFO] [comm.py:637:init_distributed] cdb=None
|
| 61 |
+
Apply masks for the following modules: ['llm', 'connector']
|
| 62 |
+
Apply masks for the following modules: ['llm', 'connector']
|
| 63 |
+
/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`.
|
| 64 |
+
warnings.warn(
|
| 65 |
+
/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`.
|
| 66 |
+
warnings.warn(
|
| 67 |
+
Apply masks for the following modules: ['llm', 'connector']
|
| 68 |
+
Apply masks for the following modules: ['llm', 'connector']
|
| 69 |
+
/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`.
|
| 70 |
+
warnings.warn(
|
| 71 |
+
/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`.
|
| 72 |
+
warnings.warn(
|
| 73 |
+
Apply masks for the following modules: ['llm', 'connector']
|
| 74 |
+
{'llm': {'model_name_or_path': 'Qwen/Qwen2.5-0.5B', 'cache_dir': None, 'attn_implementation': 'flash_attention_2', 'subnet_mode': 'both', 'subnet_type': 'None', 'sparsity_attn': None, 'sparsity_mlp': None, 'threshold_attn': None, 'threshold_mlp': None, 'temperature_attn': 1.9, 'temperature_mlp': 1.9, 'masked_layers': 'all', 'mask_type': 'soft', 'backward_type': 'normal'}, 'vision_tower': {'model_name_or_path': 'google/siglip-so400m-patch14-384'}, 'connector': {'connector_type': 'mlp2x_gelu', 'subnet_type': 'global', 'threshold': None, 'sparsity': None, 'temperature': 1.9, 'mask_type': 'soft', 'backward_type': 'normal'}}
|
| 75 |
+
/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`.
|
| 76 |
+
warnings.warn(
|
| 77 |
+
Apply masks for the following modules: ['llm', 'connector']
|
| 78 |
+
Apply masks for the following modules: ['llm', 'connector']
|
| 79 |
+
/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`.
|
| 80 |
+
warnings.warn(
|
| 81 |
+
/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`.
|
| 82 |
+
warnings.warn(
|
| 83 |
+
Apply masks for the following modules: ['llm', 'connector']
|
| 84 |
+
/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`.
|
| 85 |
+
warnings.warn(
|
| 86 |
+
TinyLlavaConfig {
|
| 87 |
+
"backward_type_connector": "normal",
|
| 88 |
+
"cache_dir": null,
|
| 89 |
+
"connector_type": "mlp2x_gelu",
|
| 90 |
+
"hidden_size": 896,
|
| 91 |
+
"ignore_index": -100,
|
| 92 |
+
"image_aspect_ratio": "square",
|
| 93 |
+
"image_token_index": -200,
|
| 94 |
+
"llm_model_name_or_path": "Qwen/Qwen2.5-0.5B",
|
| 95 |
+
"mask_model": [
|
| 96 |
+
"llm",
|
| 97 |
+
"connector"
|
| 98 |
+
],
|
| 99 |
+
"mask_type_connector": "soft",
|
| 100 |
+
"model_type": "tinyllava",
|
| 101 |
+
"num_queries": 128,
|
| 102 |
+
"num_resampler_layers": 3,
|
| 103 |
+
"pad_token": null,
|
| 104 |
+
"resampler_hidden_size": 768,
|
| 105 |
+
"sparsity_connector": null,
|
| 106 |
+
"subnet_type_connector": "global",
|
| 107 |
+
"temperature_connector": 1.9,
|
| 108 |
+
"text_config": {
|
| 109 |
+
"_name_or_path": "Qwen/Qwen2.5-0.5B",
|
| 110 |
+
"architectures": [
|
| 111 |
+
"Qwen2ForCausalLM"
|
| 112 |
+
],
|
| 113 |
+
"backward_type": "normal",
|
| 114 |
+
"bos_token_id": 151643,
|
| 115 |
+
"eos_token_id": 151643,
|
| 116 |
+
"hidden_size": 896,
|
| 117 |
+
"intermediate_size": 4864,
|
| 118 |
+
"mask_type": "soft",
|
| 119 |
+
"masked_layers": "all",
|
| 120 |
+
"max_position_embeddings": 32768,
|
| 121 |
+
"max_window_layers": 24,
|
| 122 |
+
"model_type": "qwen2",
|
| 123 |
+
"num_attention_heads": 14,
|
| 124 |
+
"num_hidden_layers": 24,
|
| 125 |
+
"num_key_value_heads": 2,
|
| 126 |
+
"rope_theta": 1000000.0,
|
| 127 |
+
"sliding_window": 32768,
|
| 128 |
+
"subnet_mode": "both",
|
| 129 |
+
"subnet_type": "None",
|
| 130 |
+
"temperature_attn": 1.9,
|
| 131 |
+
"temperature_mlp": 1.9,
|
| 132 |
+
"tie_word_embeddings": true,
|
| 133 |
+
"torch_dtype": "bfloat16",
|
| 134 |
+
"use_mrope": false,
|
| 135 |
+
"use_sliding_window": false,
|
| 136 |
+
"vocab_size": 151936
|
| 137 |
+
},
|
| 138 |
+
"threshold_connector": null,
|
| 139 |
+
"tokenizer_model_max_length": 2048,
|
| 140 |
+
"tokenizer_name_or_path": "Qwen/Qwen2.5-0.5B",
|
| 141 |
+
"tokenizer_padding_side": "right",
|
| 142 |
+
"tokenizer_use_fast": false,
|
| 143 |
+
"transformers_version": "4.40.1",
|
| 144 |
+
"tune_type_connector": "frozen",
|
| 145 |
+
"tune_type_llm": "frozen",
|
| 146 |
+
"tune_type_vision_tower": "frozen",
|
| 147 |
+
"tune_vision_tower_from_layer": -1,
|
| 148 |
+
"use_cache": false,
|
| 149 |
+
"vision_config": {
|
| 150 |
+
"hidden_act": "gelu_pytorch_tanh",
|
| 151 |
+
"hidden_size": 1152,
|
| 152 |
+
"image_size": 384,
|
| 153 |
+
"intermediate_size": 4304,
|
| 154 |
+
"layer_norm_eps": 1e-06,
|
| 155 |
+
"model_name_or_path": "google/siglip-so400m-patch14-384",
|
| 156 |
+
"model_name_or_path2": "",
|
| 157 |
+
"model_type": "siglip_vision_model",
|
| 158 |
+
"num_attention_heads": 16,
|
| 159 |
+
"num_hidden_layers": 27,
|
| 160 |
+
"patch_size": 14
|
| 161 |
+
},
|
| 162 |
+
"vision_feature_layer": -2,
|
| 163 |
+
"vision_feature_select_strategy": "patch",
|
| 164 |
+
"vision_hidden_size": 1152,
|
| 165 |
+
"vision_model_name_or_path": "google/siglip-so400m-patch14-384",
|
| 166 |
+
"vision_model_name_or_path2": "",
|
| 167 |
+
"vocab_size": 151936
|
| 168 |
+
}
|
| 169 |
+
|
logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1_connector-3.0_0.5_1_ablation_20251010_053812.log
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1_connector-3.0_0.5_1_ablation_20251010_055702.log
ADDED
|
@@ -0,0 +1,312 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
==== STARTING EXPERIMENT: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1_connector-3.0_0.5_1_ablation ====
|
| 2 |
+
Log File: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1_connector-3.0_0.5_1_ablation_20251010_055702.log
|
| 3 |
+
Timestamp: 2025-10-10 05:57:02
|
| 4 |
+
=====================================
|
| 5 |
+
/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you.
|
| 6 |
+
import pynvml # type: ignore[import]
|
| 7 |
+
[2025-10-10 05:57:04,790] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect)
|
| 8 |
+
[2025-10-10 05:57:07,488] [WARNING] [runner.py:202:fetch_hostfile] Unable to find hostfile, will proceed with training with local resources only.
|
| 9 |
+
[2025-10-10 05:57:07,490] [INFO] [runner.py:568:main] cmd = /opt/conda/envs/tinyllava/bin/python3.10 -u -m deepspeed.launcher.launch --world_info=eyJsb2NhbGhvc3QiOiBbMCwgMSwgMiwgMywgNCwgNSwgNiwgN119 --master_addr=127.0.0.1 --master_port=29501 --enable_each_rank_log=None tinyllava/train/train.py --deepspeed ./scripts/zero3.json --data_path /nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json --image_folder /nfs/ywang29/tinyLLaVA/dataset --is_multimodal True --conv_version qwen2_base --model_name_or_path Qwen/Qwen2.5-0.5B --vision_tower google/siglip-so400m-patch14-384 --vision_tower2 --connector_type mlp2x_gelu --mm_vision_select_layer -2 --image_aspect_ratio square --attn_implementation flash_attention_2 --bf16 True --training_recipe common --tune_type_llm full --tune_type_vision_tower frozen --tune_vision_tower_from_layer 0 --tune_type_connector full --group_by_modality_length True --pretrained_model_path /nfs/ywang29/tinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain --output_dir /nfs/ywang29/tinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1_connector-3.0_0.5_1_ablation --num_train_epochs 1 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --gradient_accumulation_steps 4 --evaluation_strategy no --learning_rate 1 --weight_decay 0. --warmup_ratio 0.03 --lr_scheduler_type cosine --logging_steps 1 --tf32 False --model_max_length 2048 --gradient_checkpointing True --dataloader_num_workers 8 --lazy_preprocess True --report_to tensorboard --tokenizer_use_fast False --run_name tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune --subnet_mode_text both --subnet_type_text None --mask_type_text soft --init_mean_text 3.0 --temperature_attn_text 0.5 --temperature_mlp_text 0.5 --backward_type_text normal --masked_layers_text all --subnet_mode_vision both --subnet_type_vision None --mask_type_vision soft --init_mean_vision 3.0 --temperature_attn_vision 0.5 --temperature_mlp_vision 0.5 --backward_type_vision normal --masked_layers_vision all --subnet_type_connector global --mask_type_connector soft --init_mean_connector 3.0 --temperature_connector 0.5 --backward_type_connector normal --mm_projector_lr 1 --seed 42 --mask_model llm-connector --save_strategy steps --save_steps 50000 --save_total_limit 1 --train_data_ratio 0.1
|
| 10 |
+
/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you.
|
| 11 |
+
import pynvml # type: ignore[import]
|
| 12 |
+
[2025-10-10 05:57:10,112] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect)
|
| 13 |
+
[2025-10-10 05:57:11,166] [INFO] [launch.py:138:main] 0 NCCL_VERSION=2.21.5
|
| 14 |
+
[2025-10-10 05:57:11,167] [INFO] [launch.py:138:main] 0 NCCL_SOCKET_IFNAME=eth
|
| 15 |
+
[2025-10-10 05:57:11,167] [INFO] [launch.py:145:main] WORLD INFO DICT: {'localhost': [0, 1, 2, 3, 4, 5, 6, 7]}
|
| 16 |
+
[2025-10-10 05:57:11,167] [INFO] [launch.py:151:main] nnodes=1, num_local_procs=8, node_rank=0
|
| 17 |
+
[2025-10-10 05:57:11,167] [INFO] [launch.py:162:main] global_rank_mapping=defaultdict(<class 'list'>, {'localhost': [0, 1, 2, 3, 4, 5, 6, 7]})
|
| 18 |
+
[2025-10-10 05:57:11,167] [INFO] [launch.py:163:main] dist_world_size=8
|
| 19 |
+
[2025-10-10 05:57:11,167] [INFO] [launch.py:165:main] Setting CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
|
| 20 |
+
[2025-10-10 05:57:11,169] [INFO] [launch.py:253:main] process 1775550 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=0', '--deepspeed', './scripts/zero3.json', '--data_path', '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/nfs/ywang29/tinyLLaVA/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/tinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/tinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1_connector-3.0_0.5_1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1']
|
| 21 |
+
[2025-10-10 05:57:11,171] [INFO] [launch.py:253:main] process 1775551 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=1', '--deepspeed', './scripts/zero3.json', '--data_path', '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/nfs/ywang29/tinyLLaVA/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/tinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/tinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1_connector-3.0_0.5_1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1']
|
| 22 |
+
[2025-10-10 05:57:11,173] [INFO] [launch.py:253:main] process 1775552 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=2', '--deepspeed', './scripts/zero3.json', '--data_path', '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/nfs/ywang29/tinyLLaVA/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/tinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/tinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1_connector-3.0_0.5_1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1']
|
| 23 |
+
[2025-10-10 05:57:11,175] [INFO] [launch.py:253:main] process 1775553 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=3', '--deepspeed', './scripts/zero3.json', '--data_path', '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/nfs/ywang29/tinyLLaVA/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/tinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/tinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1_connector-3.0_0.5_1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1']
|
| 24 |
+
[2025-10-10 05:57:11,177] [INFO] [launch.py:253:main] process 1775554 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=4', '--deepspeed', './scripts/zero3.json', '--data_path', '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/nfs/ywang29/tinyLLaVA/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/tinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/tinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1_connector-3.0_0.5_1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1']
|
| 25 |
+
[2025-10-10 05:57:11,179] [INFO] [launch.py:253:main] process 1775555 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=5', '--deepspeed', './scripts/zero3.json', '--data_path', '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/nfs/ywang29/tinyLLaVA/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/tinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/tinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1_connector-3.0_0.5_1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1']
|
| 26 |
+
[2025-10-10 05:57:11,181] [INFO] [launch.py:253:main] process 1775556 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=6', '--deepspeed', './scripts/zero3.json', '--data_path', '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/nfs/ywang29/tinyLLaVA/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/tinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/tinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1_connector-3.0_0.5_1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1']
|
| 27 |
+
[2025-10-10 05:57:11,183] [INFO] [launch.py:253:main] process 1775557 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=7', '--deepspeed', './scripts/zero3.json', '--data_path', '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/nfs/ywang29/tinyLLaVA/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/tinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/tinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1_connector-3.0_0.5_1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1']
|
| 28 |
+
/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you.
|
| 29 |
+
import pynvml # type: ignore[import]
|
| 30 |
+
/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you.
|
| 31 |
+
import pynvml # type: ignore[import]
|
| 32 |
+
/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you.
|
| 33 |
+
import pynvml # type: ignore[import]
|
| 34 |
+
/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you.
|
| 35 |
+
import pynvml # type: ignore[import]
|
| 36 |
+
/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you.
|
| 37 |
+
import pynvml # type: ignore[import]
|
| 38 |
+
/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you.
|
| 39 |
+
import pynvml # type: ignore[import]
|
| 40 |
+
/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you.
|
| 41 |
+
import pynvml # type: ignore[import]
|
| 42 |
+
/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you.
|
| 43 |
+
import pynvml # type: ignore[import]
|
| 44 |
+
[2025-10-10 05:57:17,634] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect)
|
| 45 |
+
[2025-10-10 05:57:17,967] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect)
|
| 46 |
+
[2025-10-10 05:57:18,000] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect)
|
| 47 |
+
[2025-10-10 05:57:18,022] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect)
|
| 48 |
+
[2025-10-10 05:57:18,022] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect)
|
| 49 |
+
[2025-10-10 05:57:18,023] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect)
|
| 50 |
+
[2025-10-10 05:57:18,023] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect)
|
| 51 |
+
[2025-10-10 05:57:18,048] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect)
|
| 52 |
+
[2025-10-10 05:57:18,052] [INFO] [comm.py:637:init_distributed] cdb=None
|
| 53 |
+
[2025-10-10 05:57:18,370] [INFO] [comm.py:637:init_distributed] cdb=None
|
| 54 |
+
[2025-10-10 05:57:18,406] [INFO] [comm.py:637:init_distributed] cdb=None
|
| 55 |
+
[2025-10-10 05:57:18,429] [INFO] [comm.py:637:init_distributed] cdb=None
|
| 56 |
+
[2025-10-10 05:57:18,429] [INFO] [comm.py:637:init_distributed] cdb=None
|
| 57 |
+
[2025-10-10 05:57:18,429] [INFO] [comm.py:668:init_distributed] Initializing TorchBackend in DeepSpeed with backend nccl
|
| 58 |
+
[2025-10-10 05:57:18,430] [INFO] [comm.py:637:init_distributed] cdb=None
|
| 59 |
+
[2025-10-10 05:57:18,431] [INFO] [comm.py:637:init_distributed] cdb=None
|
| 60 |
+
[2025-10-10 05:57:18,453] [INFO] [comm.py:637:init_distributed] cdb=None
|
| 61 |
+
Apply masks for the following modules: ['llm', 'connector']
|
| 62 |
+
Apply masks for the following modules: ['llm', 'connector']
|
| 63 |
+
/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`.
|
| 64 |
+
warnings.warn(
|
| 65 |
+
/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`.
|
| 66 |
+
warnings.warn(
|
| 67 |
+
Apply masks for the following modules: ['llm', 'connector']
|
| 68 |
+
/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`.
|
| 69 |
+
warnings.warn(
|
| 70 |
+
Apply masks for the following modules: ['llm', 'connector']
|
| 71 |
+
/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`.
|
| 72 |
+
warnings.warn(
|
| 73 |
+
Apply masks for the following modules: ['llm', 'connector']
|
| 74 |
+
{'llm': {'model_name_or_path': 'Qwen/Qwen2.5-0.5B', 'cache_dir': None, 'attn_implementation': 'flash_attention_2', 'subnet_mode': 'both', 'subnet_type': 'None', 'sparsity_attn': None, 'sparsity_mlp': None, 'threshold_attn': None, 'threshold_mlp': None, 'temperature_attn': 0.5, 'temperature_mlp': 0.5, 'masked_layers': 'all', 'mask_type': 'soft', 'backward_type': 'normal'}, 'vision_tower': {'model_name_or_path': 'google/siglip-so400m-patch14-384'}, 'connector': {'connector_type': 'mlp2x_gelu', 'subnet_type': 'global', 'threshold': None, 'sparsity': None, 'temperature': 0.5, 'mask_type': 'soft', 'backward_type': 'normal'}}
|
| 75 |
+
/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`.
|
| 76 |
+
warnings.warn(
|
| 77 |
+
Apply masks for the following modules: ['llm', 'connector']
|
| 78 |
+
Apply masks for the following modules: ['llm', 'connector']
|
| 79 |
+
/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`.
|
| 80 |
+
warnings.warn(
|
| 81 |
+
/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`.
|
| 82 |
+
warnings.warn(
|
| 83 |
+
Apply masks for the following modules: ['llm', 'connector']
|
| 84 |
+
/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`.
|
| 85 |
+
warnings.warn(
|
| 86 |
+
TinyLlavaConfig {
|
| 87 |
+
"backward_type_connector": "normal",
|
| 88 |
+
"cache_dir": null,
|
| 89 |
+
"connector_type": "mlp2x_gelu",
|
| 90 |
+
"hidden_size": 896,
|
| 91 |
+
"ignore_index": -100,
|
| 92 |
+
"image_aspect_ratio": "square",
|
| 93 |
+
"image_token_index": -200,
|
| 94 |
+
"llm_model_name_or_path": "Qwen/Qwen2.5-0.5B",
|
| 95 |
+
"mask_model": [
|
| 96 |
+
"llm",
|
| 97 |
+
"connector"
|
| 98 |
+
],
|
| 99 |
+
"mask_type_connector": "soft",
|
| 100 |
+
"model_type": "tinyllava",
|
| 101 |
+
"num_queries": 128,
|
| 102 |
+
"num_resampler_layers": 3,
|
| 103 |
+
"pad_token": null,
|
| 104 |
+
"resampler_hidden_size": 768,
|
| 105 |
+
"sparsity_connector": null,
|
| 106 |
+
"subnet_type_connector": "global",
|
| 107 |
+
"temperature_connector": 0.5,
|
| 108 |
+
"text_config": {
|
| 109 |
+
"_name_or_path": "Qwen/Qwen2.5-0.5B",
|
| 110 |
+
"architectures": [
|
| 111 |
+
"Qwen2ForCausalLM"
|
| 112 |
+
],
|
| 113 |
+
"backward_type": "normal",
|
| 114 |
+
"bos_token_id": 151643,
|
| 115 |
+
"eos_token_id": 151643,
|
| 116 |
+
"hidden_size": 896,
|
| 117 |
+
"intermediate_size": 4864,
|
| 118 |
+
"mask_type": "soft",
|
| 119 |
+
"masked_layers": "all",
|
| 120 |
+
"max_position_embeddings": 32768,
|
| 121 |
+
"max_window_layers": 24,
|
| 122 |
+
"model_type": "qwen2",
|
| 123 |
+
"num_attention_heads": 14,
|
| 124 |
+
"num_hidden_layers": 24,
|
| 125 |
+
"num_key_value_heads": 2,
|
| 126 |
+
"rope_theta": 1000000.0,
|
| 127 |
+
"sliding_window": 32768,
|
| 128 |
+
"subnet_mode": "both",
|
| 129 |
+
"subnet_type": "None",
|
| 130 |
+
"temperature_attn": 0.5,
|
| 131 |
+
"temperature_mlp": 0.5,
|
| 132 |
+
"tie_word_embeddings": true,
|
| 133 |
+
"torch_dtype": "bfloat16",
|
| 134 |
+
"use_mrope": false,
|
| 135 |
+
"use_sliding_window": false,
|
| 136 |
+
"vocab_size": 151936
|
| 137 |
+
},
|
| 138 |
+
"threshold_connector": null,
|
| 139 |
+
"tokenizer_model_max_length": 2048,
|
| 140 |
+
"tokenizer_name_or_path": "Qwen/Qwen2.5-0.5B",
|
| 141 |
+
"tokenizer_padding_side": "right",
|
| 142 |
+
"tokenizer_use_fast": false,
|
| 143 |
+
"transformers_version": "4.40.1",
|
| 144 |
+
"tune_type_connector": "frozen",
|
| 145 |
+
"tune_type_llm": "frozen",
|
| 146 |
+
"tune_type_vision_tower": "frozen",
|
| 147 |
+
"tune_vision_tower_from_layer": -1,
|
| 148 |
+
"use_cache": false,
|
| 149 |
+
"vision_config": {
|
| 150 |
+
"hidden_act": "gelu_pytorch_tanh",
|
| 151 |
+
"hidden_size": 1152,
|
| 152 |
+
"image_size": 384,
|
| 153 |
+
"intermediate_size": 4304,
|
| 154 |
+
"layer_norm_eps": 1e-06,
|
| 155 |
+
"model_name_or_path": "google/siglip-so400m-patch14-384",
|
| 156 |
+
"model_name_or_path2": "",
|
| 157 |
+
"model_type": "siglip_vision_model",
|
| 158 |
+
"num_attention_heads": 16,
|
| 159 |
+
"num_hidden_layers": 27,
|
| 160 |
+
"patch_size": 14
|
| 161 |
+
},
|
| 162 |
+
"vision_feature_layer": -2,
|
| 163 |
+
"vision_feature_select_strategy": "patch",
|
| 164 |
+
"vision_hidden_size": 1152,
|
| 165 |
+
"vision_model_name_or_path": "google/siglip-so400m-patch14-384",
|
| 166 |
+
"vision_model_name_or_path2": "",
|
| 167 |
+
"vocab_size": 151936
|
| 168 |
+
}
|
| 169 |
+
|
| 170 |
+
Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained.
|
| 171 |
+
Traceback (most recent call last):
|
| 172 |
+
File "/nfs/ywang29/TinyLLaVA/transformers/src/transformers/utils/hub.py", line 398, in cached_file
|
| 173 |
+
resolved_file = hf_hub_download(
|
| 174 |
+
File "/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/utils/_validators.py", line 106, in _inner_fn
|
| 175 |
+
validate_repo_id(arg_value)
|
| 176 |
+
File "/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/utils/_validators.py", line 154, in validate_repo_id
|
| 177 |
+
raise HFValidationError(
|
| 178 |
+
huggingface_hub.errors.HFValidationError: Repo id must be in the form 'repo_name' or 'namespace/repo_name': '/nfs/ywang29/tinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model'. Use `repo_type` argument if needed.
|
| 179 |
+
|
| 180 |
+
The above exception was the direct cause of the following exception:
|
| 181 |
+
|
| 182 |
+
Traceback (most recent call last):
|
| 183 |
+
File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 193, in <module>
|
| 184 |
+
train()
|
| 185 |
+
File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 149, in train
|
| 186 |
+
model = training_recipe.load(model, model_args)
|
| 187 |
+
File "/nfs/ywang29/TinyLLaVA/tinyllava/training_recipe/base.py", line 144, in load
|
| 188 |
+
model.load_llm(**model_args['llm'])
|
| 189 |
+
File "/nfs/ywang29/TinyLLaVA/tinyllava/model/modeling_tinyllava.py", line 360, in load_llm
|
| 190 |
+
self.language_model = self.language_model.from_pretrained(
|
| 191 |
+
File "/nfs/ywang29/TinyLLaVA/transformers/src/transformers/modeling_utils.py", line 3015, in from_pretrained
|
| 192 |
+
resolved_config_file = cached_file(
|
| 193 |
+
File "/nfs/ywang29/TinyLLaVA/transformers/src/transformers/utils/hub.py", line 462, in cached_file
|
| 194 |
+
raise EnvironmentError(
|
| 195 |
+
OSError: Incorrect path_or_model_id: '/nfs/ywang29/tinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model'. Please provide either the path to a local folder or the repo_id of a model on the Hub.
|
| 196 |
+
Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained.
|
| 197 |
+
Traceback (most recent call last):
|
| 198 |
+
File "/nfs/ywang29/TinyLLaVA/transformers/src/transformers/utils/hub.py", line 398, in cached_file
|
| 199 |
+
resolved_file = hf_hub_download(
|
| 200 |
+
File "/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/utils/_validators.py", line 106, in _inner_fn
|
| 201 |
+
validate_repo_id(arg_value)
|
| 202 |
+
File "/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/utils/_validators.py", line 154, in validate_repo_id
|
| 203 |
+
raise HFValidationError(
|
| 204 |
+
huggingface_hub.errors.HFValidationError: Repo id must be in the form 'repo_name' or 'namespace/repo_name': '/nfs/ywang29/tinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model'. Use `repo_type` argument if needed.
|
| 205 |
+
|
| 206 |
+
The above exception was the direct cause of the following exception:
|
| 207 |
+
|
| 208 |
+
Traceback (most recent call last):
|
| 209 |
+
File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 193, in <module>
|
| 210 |
+
train()
|
| 211 |
+
File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 149, in train
|
| 212 |
+
model = training_recipe.load(model, model_args)
|
| 213 |
+
File "/nfs/ywang29/TinyLLaVA/tinyllava/training_recipe/base.py", line 144, in load
|
| 214 |
+
model.load_llm(**model_args['llm'])
|
| 215 |
+
File "/nfs/ywang29/TinyLLaVA/tinyllava/model/modeling_tinyllava.py", line 360, in load_llm
|
| 216 |
+
self.language_model = self.language_model.from_pretrained(
|
| 217 |
+
File "/nfs/ywang29/TinyLLaVA/transformers/src/transformers/modeling_utils.py", line 3015, in from_pretrained
|
| 218 |
+
resolved_config_file = cached_file(
|
| 219 |
+
File "/nfs/ywang29/TinyLLaVA/transformers/src/transformers/utils/hub.py", line 462, in cached_file
|
| 220 |
+
raise EnvironmentError(
|
| 221 |
+
OSError: Incorrect path_or_model_id: '/nfs/ywang29/tinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model'. Please provide either the path to a local folder or the repo_id of a model on the Hub.
|
| 222 |
+
Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained.
|
| 223 |
+
Traceback (most recent call last):
|
| 224 |
+
File "/nfs/ywang29/TinyLLaVA/transformers/src/transformers/utils/hub.py", line 398, in cached_file
|
| 225 |
+
resolved_file = hf_hub_download(
|
| 226 |
+
File "/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/utils/_validators.py", line 106, in _inner_fn
|
| 227 |
+
validate_repo_id(arg_value)
|
| 228 |
+
File "/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/utils/_validators.py", line 154, in validate_repo_id
|
| 229 |
+
raise HFValidationError(
|
| 230 |
+
huggingface_hub.errors.HFValidationError: Repo id must be in the form 'repo_name' or 'namespace/repo_name': '/nfs/ywang29/tinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model'. Use `repo_type` argument if needed.
|
| 231 |
+
|
| 232 |
+
The above exception was the direct cause of the following exception:
|
| 233 |
+
|
| 234 |
+
Traceback (most recent call last):
|
| 235 |
+
File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 193, in <module>
|
| 236 |
+
train()
|
| 237 |
+
File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 149, in train
|
| 238 |
+
model = training_recipe.load(model, model_args)
|
| 239 |
+
File "/nfs/ywang29/TinyLLaVA/tinyllava/training_recipe/base.py", line 144, in load
|
| 240 |
+
model.load_llm(**model_args['llm'])
|
| 241 |
+
File "/nfs/ywang29/TinyLLaVA/tinyllava/model/modeling_tinyllava.py", line 360, in load_llm
|
| 242 |
+
self.language_model = self.language_model.from_pretrained(
|
| 243 |
+
File "/nfs/ywang29/TinyLLaVA/transformers/src/transformers/modeling_utils.py", line 3015, in from_pretrained
|
| 244 |
+
resolved_config_file = cached_file(
|
| 245 |
+
File "/nfs/ywang29/TinyLLaVA/transformers/src/transformers/utils/hub.py", line 462, in cached_file
|
| 246 |
+
raise EnvironmentError(
|
| 247 |
+
OSError: Incorrect path_or_model_id: '/nfs/ywang29/tinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model'. Please provide either the path to a local folder or the repo_id of a model on the Hub.
|
| 248 |
+
[2025-10-10 05:57:56,238] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 1775550
|
| 249 |
+
Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained.
|
| 250 |
+
Traceback (most recent call last):
|
| 251 |
+
File "/nfs/ywang29/TinyLLaVA/transformers/src/transformers/utils/hub.py", line 398, in cached_file
|
| 252 |
+
resolved_file = hf_hub_download(
|
| 253 |
+
File "/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/utils/_validators.py", line 106, in _inner_fn
|
| 254 |
+
validate_repo_id(arg_value)
|
| 255 |
+
File "/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/utils/_validators.py", line 154, in validate_repo_id
|
| 256 |
+
raise HFValidationError(
|
| 257 |
+
huggingface_hub.errors.HFValidationError: Repo id must be in the form 'repo_name' or 'namespace/repo_name': '/nfs/ywang29/tinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model'. Use `repo_type` argument if needed.
|
| 258 |
+
|
| 259 |
+
The above exception was the direct cause of the following exception:
|
| 260 |
+
|
| 261 |
+
Traceback (most recent call last):
|
| 262 |
+
File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 193, in <module>
|
| 263 |
+
train()
|
| 264 |
+
File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 149, in train
|
| 265 |
+
model = training_recipe.load(model, model_args)
|
| 266 |
+
File "/nfs/ywang29/TinyLLaVA/tinyllava/training_recipe/base.py", line 144, in load
|
| 267 |
+
model.load_llm(**model_args['llm'])
|
| 268 |
+
File "/nfs/ywang29/TinyLLaVA/tinyllava/model/modeling_tinyllava.py", line 360, in load_llm
|
| 269 |
+
self.language_model = self.language_model.from_pretrained(
|
| 270 |
+
File "/nfs/ywang29/TinyLLaVA/transformers/src/transformers/modeling_utils.py", line 3015, in from_pretrained
|
| 271 |
+
resolved_config_file = cached_file(
|
| 272 |
+
File "/nfs/ywang29/TinyLLaVA/transformers/src/transformers/utils/hub.py", line 462, in cached_file
|
| 273 |
+
raise EnvironmentError(
|
| 274 |
+
OSError: Incorrect path_or_model_id: '/nfs/ywang29/tinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model'. Please provide either the path to a local folder or the repo_id of a model on the Hub.
|
| 275 |
+
[2025-10-10 05:57:56,576] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 1775551
|
| 276 |
+
[2025-10-10 05:57:56,994] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 1775552
|
| 277 |
+
[2025-10-10 05:57:56,996] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 1775553
|
| 278 |
+
Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained.
|
| 279 |
+
Traceback (most recent call last):
|
| 280 |
+
File "/nfs/ywang29/TinyLLaVA/transformers/src/transformers/utils/hub.py", line 398, in cached_file
|
| 281 |
+
resolved_file = hf_hub_download(
|
| 282 |
+
File "/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/utils/_validators.py", line 106, in _inner_fn
|
| 283 |
+
validate_repo_id(arg_value)
|
| 284 |
+
File "/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/utils/_validators.py", line 154, in validate_repo_id
|
| 285 |
+
raise HFValidationError(
|
| 286 |
+
huggingface_hub.errors.HFValidationError: Repo id must be in the form 'repo_name' or 'namespace/repo_name': '/nfs/ywang29/tinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model'. Use `repo_type` argument if needed.
|
| 287 |
+
|
| 288 |
+
The above exception was the direct cause of the following exception:
|
| 289 |
+
|
| 290 |
+
Traceback (most recent call last):
|
| 291 |
+
File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 193, in <module>
|
| 292 |
+
train()
|
| 293 |
+
File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 149, in train
|
| 294 |
+
model = training_recipe.load(model, model_args)
|
| 295 |
+
File "/nfs/ywang29/TinyLLaVA/tinyllava/training_recipe/base.py", line 144, in load
|
| 296 |
+
model.load_llm(**model_args['llm'])
|
| 297 |
+
File "/nfs/ywang29/TinyLLaVA/tinyllava/model/modeling_tinyllava.py", line 360, in load_llm
|
| 298 |
+
self.language_model = self.language_model.from_pretrained(
|
| 299 |
+
File "/nfs/ywang29/TinyLLaVA/transformers/src/transformers/modeling_utils.py", line 3015, in from_pretrained
|
| 300 |
+
resolved_config_file = cached_file(
|
| 301 |
+
File "/nfs/ywang29/TinyLLaVA/transformers/src/transformers/utils/hub.py", line 462, in cached_file
|
| 302 |
+
raise EnvironmentError(
|
| 303 |
+
OSError: Incorrect path_or_model_id: '/nfs/ywang29/tinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model'. Please provide either the path to a local folder or the repo_id of a model on the Hub.
|
| 304 |
+
[2025-10-10 05:57:57,453] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 1775554
|
| 305 |
+
[2025-10-10 05:57:57,455] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 1775555
|
| 306 |
+
[2025-10-10 05:57:57,455] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 1775556
|
| 307 |
+
[2025-10-10 05:57:57,792] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 1775557
|
| 308 |
+
[2025-10-10 05:57:57,794] [ERROR] [launch.py:322:sigkill_handler] ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=7', '--deepspeed', './scripts/zero3.json', '--data_path', '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/nfs/ywang29/tinyLLaVA/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/tinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/tinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1_connector-3.0_0.5_1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] exits with return code = 1
|
| 309 |
+
==== EXPERIMENT COMPLETED: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1_connector-3.0_0.5_1_ablation ====
|
| 310 |
+
Log File: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1_connector-3.0_0.5_1_ablation_20251010_055702.log
|
| 311 |
+
Timestamp: 2025-10-10 05:57:59
|
| 312 |
+
=====================================
|
logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1_connector-3.0_0.5_1_ablation_20251010_055853.log
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1_connector-3.0_0.5_1_ablation_20251010_060227.log
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1e-1_connector-3.0_0.5_1e-1_ablation_20251010_091529.log
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_3_connector-3.0_0.5_3_ablation_20251010_055759.log
ADDED
|
@@ -0,0 +1,225 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
==== STARTING EXPERIMENT: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_3_connector-3.0_0.5_3_ablation ====
|
| 2 |
+
Log File: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_3_connector-3.0_0.5_3_ablation_20251010_055759.log
|
| 3 |
+
Timestamp: 2025-10-10 05:57:59
|
| 4 |
+
=====================================
|
| 5 |
+
/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you.
|
| 6 |
+
import pynvml # type: ignore[import]
|
| 7 |
+
[2025-10-10 05:58:01,905] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect)
|
| 8 |
+
[2025-10-10 05:58:04,550] [WARNING] [runner.py:202:fetch_hostfile] Unable to find hostfile, will proceed with training with local resources only.
|
| 9 |
+
[2025-10-10 05:58:04,552] [INFO] [runner.py:568:main] cmd = /opt/conda/envs/tinyllava/bin/python3.10 -u -m deepspeed.launcher.launch --world_info=eyJsb2NhbGhvc3QiOiBbMCwgMSwgMiwgMywgNCwgNSwgNiwgN119 --master_addr=127.0.0.1 --master_port=29501 --enable_each_rank_log=None tinyllava/train/train.py --deepspeed ./scripts/zero3.json --data_path /nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json --image_folder /nfs/ywang29/tinyLLaVA/dataset --is_multimodal True --conv_version qwen2_base --model_name_or_path Qwen/Qwen2.5-0.5B --vision_tower google/siglip-so400m-patch14-384 --vision_tower2 --connector_type mlp2x_gelu --mm_vision_select_layer -2 --image_aspect_ratio square --attn_implementation flash_attention_2 --bf16 True --training_recipe common --tune_type_llm full --tune_type_vision_tower frozen --tune_vision_tower_from_layer 0 --tune_type_connector full --group_by_modality_length True --pretrained_model_path /nfs/ywang29/tinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain --output_dir /nfs/ywang29/tinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_3_connector-3.0_0.5_3_ablation --num_train_epochs 1 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --gradient_accumulation_steps 4 --evaluation_strategy no --learning_rate 3 --weight_decay 0. --warmup_ratio 0.03 --lr_scheduler_type cosine --logging_steps 1 --tf32 False --model_max_length 2048 --gradient_checkpointing True --dataloader_num_workers 8 --lazy_preprocess True --report_to tensorboard --tokenizer_use_fast False --run_name tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune --subnet_mode_text both --subnet_type_text None --mask_type_text soft --init_mean_text 3.0 --temperature_attn_text 0.5 --temperature_mlp_text 0.5 --backward_type_text normal --masked_layers_text all --subnet_mode_vision both --subnet_type_vision None --mask_type_vision soft --init_mean_vision 3.0 --temperature_attn_vision 0.5 --temperature_mlp_vision 0.5 --backward_type_vision normal --masked_layers_vision all --subnet_type_connector global --mask_type_connector soft --init_mean_connector 3.0 --temperature_connector 0.5 --backward_type_connector normal --mm_projector_lr 3 --seed 42 --mask_model llm-connector --save_strategy steps --save_steps 50000 --save_total_limit 1 --train_data_ratio 0.1
|
| 10 |
+
/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you.
|
| 11 |
+
import pynvml # type: ignore[import]
|
| 12 |
+
[2025-10-10 05:58:07,202] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect)
|
| 13 |
+
[2025-10-10 05:58:08,240] [INFO] [launch.py:138:main] 0 NCCL_VERSION=2.21.5
|
| 14 |
+
[2025-10-10 05:58:08,240] [INFO] [launch.py:138:main] 0 NCCL_SOCKET_IFNAME=eth
|
| 15 |
+
[2025-10-10 05:58:08,240] [INFO] [launch.py:145:main] WORLD INFO DICT: {'localhost': [0, 1, 2, 3, 4, 5, 6, 7]}
|
| 16 |
+
[2025-10-10 05:58:08,240] [INFO] [launch.py:151:main] nnodes=1, num_local_procs=8, node_rank=0
|
| 17 |
+
[2025-10-10 05:58:08,240] [INFO] [launch.py:162:main] global_rank_mapping=defaultdict(<class 'list'>, {'localhost': [0, 1, 2, 3, 4, 5, 6, 7]})
|
| 18 |
+
[2025-10-10 05:58:08,240] [INFO] [launch.py:163:main] dist_world_size=8
|
| 19 |
+
[2025-10-10 05:58:08,240] [INFO] [launch.py:165:main] Setting CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
|
| 20 |
+
[2025-10-10 05:58:08,242] [INFO] [launch.py:253:main] process 1777438 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=0', '--deepspeed', './scripts/zero3.json', '--data_path', '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/nfs/ywang29/tinyLLaVA/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/tinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/tinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_3_connector-3.0_0.5_3_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '3', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '3', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1']
|
| 21 |
+
[2025-10-10 05:58:08,245] [INFO] [launch.py:253:main] process 1777439 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=1', '--deepspeed', './scripts/zero3.json', '--data_path', '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/nfs/ywang29/tinyLLaVA/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/tinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/tinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_3_connector-3.0_0.5_3_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '3', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '3', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1']
|
| 22 |
+
[2025-10-10 05:58:08,247] [INFO] [launch.py:253:main] process 1777440 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=2', '--deepspeed', './scripts/zero3.json', '--data_path', '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/nfs/ywang29/tinyLLaVA/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/tinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/tinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_3_connector-3.0_0.5_3_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '3', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '3', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1']
|
| 23 |
+
[2025-10-10 05:58:08,249] [INFO] [launch.py:253:main] process 1777441 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=3', '--deepspeed', './scripts/zero3.json', '--data_path', '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/nfs/ywang29/tinyLLaVA/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/tinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/tinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_3_connector-3.0_0.5_3_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '3', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '3', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1']
|
| 24 |
+
[2025-10-10 05:58:08,251] [INFO] [launch.py:253:main] process 1777442 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=4', '--deepspeed', './scripts/zero3.json', '--data_path', '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/nfs/ywang29/tinyLLaVA/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/tinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/tinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_3_connector-3.0_0.5_3_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '3', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '3', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1']
|
| 25 |
+
[2025-10-10 05:58:08,253] [INFO] [launch.py:253:main] process 1777443 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=5', '--deepspeed', './scripts/zero3.json', '--data_path', '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/nfs/ywang29/tinyLLaVA/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/tinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/tinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_3_connector-3.0_0.5_3_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '3', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '3', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1']
|
| 26 |
+
[2025-10-10 05:58:08,255] [INFO] [launch.py:253:main] process 1777444 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=6', '--deepspeed', './scripts/zero3.json', '--data_path', '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/nfs/ywang29/tinyLLaVA/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/tinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/tinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_3_connector-3.0_0.5_3_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '3', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '3', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1']
|
| 27 |
+
[2025-10-10 05:58:08,257] [INFO] [launch.py:253:main] process 1777445 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=7', '--deepspeed', './scripts/zero3.json', '--data_path', '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/nfs/ywang29/tinyLLaVA/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/tinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/tinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_3_connector-3.0_0.5_3_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '3', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '3', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1']
|
| 28 |
+
/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you.
|
| 29 |
+
import pynvml # type: ignore[import]
|
| 30 |
+
/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you.
|
| 31 |
+
import pynvml # type: ignore[import]
|
| 32 |
+
/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you.
|
| 33 |
+
import pynvml # type: ignore[import]
|
| 34 |
+
/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you.
|
| 35 |
+
import pynvml # type: ignore[import]
|
| 36 |
+
/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you.
|
| 37 |
+
import pynvml # type: ignore[import]
|
| 38 |
+
/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you.
|
| 39 |
+
import pynvml # type: ignore[import]
|
| 40 |
+
/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you.
|
| 41 |
+
import pynvml # type: ignore[import]
|
| 42 |
+
/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you.
|
| 43 |
+
import pynvml # type: ignore[import]
|
| 44 |
+
[2025-10-10 05:58:14,883] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect)
|
| 45 |
+
[2025-10-10 05:58:15,133] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect)
|
| 46 |
+
[2025-10-10 05:58:15,196] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect)
|
| 47 |
+
[2025-10-10 05:58:15,197] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect)
|
| 48 |
+
[2025-10-10 05:58:15,232] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect)
|
| 49 |
+
[2025-10-10 05:58:15,247] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect)
|
| 50 |
+
[2025-10-10 05:58:15,253] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect)
|
| 51 |
+
[2025-10-10 05:58:15,255] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect)
|
| 52 |
+
[2025-10-10 05:58:15,294] [INFO] [comm.py:637:init_distributed] cdb=None
|
| 53 |
+
[2025-10-10 05:58:15,539] [INFO] [comm.py:637:init_distributed] cdb=None
|
| 54 |
+
[2025-10-10 05:58:15,605] [INFO] [comm.py:637:init_distributed] cdb=None
|
| 55 |
+
[2025-10-10 05:58:15,607] [INFO] [comm.py:637:init_distributed] cdb=None
|
| 56 |
+
[2025-10-10 05:58:15,655] [INFO] [comm.py:637:init_distributed] cdb=None
|
| 57 |
+
[2025-10-10 05:58:15,658] [INFO] [comm.py:637:init_distributed] cdb=None
|
| 58 |
+
[2025-10-10 05:58:15,659] [INFO] [comm.py:668:init_distributed] Initializing TorchBackend in DeepSpeed with backend nccl
|
| 59 |
+
[2025-10-10 05:58:15,659] [INFO] [comm.py:637:init_distributed] cdb=None
|
| 60 |
+
[2025-10-10 05:58:15,669] [INFO] [comm.py:637:init_distributed] cdb=None
|
| 61 |
+
Apply masks for the following modules: ['llm', 'connector']
|
| 62 |
+
/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`.
|
| 63 |
+
warnings.warn(
|
| 64 |
+
Apply masks for the following modules: ['llm', 'connector']
|
| 65 |
+
/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`.
|
| 66 |
+
warnings.warn(
|
| 67 |
+
Apply masks for the following modules: ['llm', 'connector']
|
| 68 |
+
/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`.
|
| 69 |
+
warnings.warn(
|
| 70 |
+
Apply masks for the following modules: ['llm', 'connector']
|
| 71 |
+
{'llm': {'model_name_or_path': 'Qwen/Qwen2.5-0.5B', 'cache_dir': None, 'attn_implementation': 'flash_attention_2', 'subnet_mode': 'both', 'subnet_type': 'None', 'sparsity_attn': None, 'sparsity_mlp': None, 'threshold_attn': None, 'threshold_mlp': None, 'temperature_attn': 0.5, 'temperature_mlp': 0.5, 'masked_layers': 'all', 'mask_type': 'soft', 'backward_type': 'normal'}, 'vision_tower': {'model_name_or_path': 'google/siglip-so400m-patch14-384'}, 'connector': {'connector_type': 'mlp2x_gelu', 'subnet_type': 'global', 'threshold': None, 'sparsity': None, 'temperature': 0.5, 'mask_type': 'soft', 'backward_type': 'normal'}}
|
| 72 |
+
/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`.
|
| 73 |
+
warnings.warn(
|
| 74 |
+
Apply masks for the following modules: ['llm', 'connector']
|
| 75 |
+
Apply masks for the following modules: ['llm', 'connector']
|
| 76 |
+
/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`.
|
| 77 |
+
warnings.warn(
|
| 78 |
+
/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`.
|
| 79 |
+
warnings.warn(
|
| 80 |
+
Apply masks for the following modules: ['llm', 'connector']
|
| 81 |
+
/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`.
|
| 82 |
+
warnings.warn(
|
| 83 |
+
Apply masks for the following modules: ['llm', 'connector']
|
| 84 |
+
/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`.
|
| 85 |
+
warnings.warn(
|
| 86 |
+
TinyLlavaConfig {
|
| 87 |
+
"backward_type_connector": "normal",
|
| 88 |
+
"cache_dir": null,
|
| 89 |
+
"connector_type": "mlp2x_gelu",
|
| 90 |
+
"hidden_size": 896,
|
| 91 |
+
"ignore_index": -100,
|
| 92 |
+
"image_aspect_ratio": "square",
|
| 93 |
+
"image_token_index": -200,
|
| 94 |
+
"llm_model_name_or_path": "Qwen/Qwen2.5-0.5B",
|
| 95 |
+
"mask_model": [
|
| 96 |
+
"llm",
|
| 97 |
+
"connector"
|
| 98 |
+
],
|
| 99 |
+
"mask_type_connector": "soft",
|
| 100 |
+
"model_type": "tinyllava",
|
| 101 |
+
"num_queries": 128,
|
| 102 |
+
"num_resampler_layers": 3,
|
| 103 |
+
"pad_token": null,
|
| 104 |
+
"resampler_hidden_size": 768,
|
| 105 |
+
"sparsity_connector": null,
|
| 106 |
+
"subnet_type_connector": "global",
|
| 107 |
+
"temperature_connector": 0.5,
|
| 108 |
+
"text_config": {
|
| 109 |
+
"_name_or_path": "Qwen/Qwen2.5-0.5B",
|
| 110 |
+
"architectures": [
|
| 111 |
+
"Qwen2ForCausalLM"
|
| 112 |
+
],
|
| 113 |
+
"backward_type": "normal",
|
| 114 |
+
"bos_token_id": 151643,
|
| 115 |
+
"eos_token_id": 151643,
|
| 116 |
+
"hidden_size": 896,
|
| 117 |
+
"intermediate_size": 4864,
|
| 118 |
+
"mask_type": "soft",
|
| 119 |
+
"masked_layers": "all",
|
| 120 |
+
"max_position_embeddings": 32768,
|
| 121 |
+
"max_window_layers": 24,
|
| 122 |
+
"model_type": "qwen2",
|
| 123 |
+
"num_attention_heads": 14,
|
| 124 |
+
"num_hidden_layers": 24,
|
| 125 |
+
"num_key_value_heads": 2,
|
| 126 |
+
"rope_theta": 1000000.0,
|
| 127 |
+
"sliding_window": 32768,
|
| 128 |
+
"subnet_mode": "both",
|
| 129 |
+
"subnet_type": "None",
|
| 130 |
+
"temperature_attn": 0.5,
|
| 131 |
+
"temperature_mlp": 0.5,
|
| 132 |
+
"tie_word_embeddings": true,
|
| 133 |
+
"torch_dtype": "bfloat16",
|
| 134 |
+
"use_mrope": false,
|
| 135 |
+
"use_sliding_window": false,
|
| 136 |
+
"vocab_size": 151936
|
| 137 |
+
},
|
| 138 |
+
"threshold_connector": null,
|
| 139 |
+
"tokenizer_model_max_length": 2048,
|
| 140 |
+
"tokenizer_name_or_path": "Qwen/Qwen2.5-0.5B",
|
| 141 |
+
"tokenizer_padding_side": "right",
|
| 142 |
+
"tokenizer_use_fast": false,
|
| 143 |
+
"transformers_version": "4.40.1",
|
| 144 |
+
"tune_type_connector": "frozen",
|
| 145 |
+
"tune_type_llm": "frozen",
|
| 146 |
+
"tune_type_vision_tower": "frozen",
|
| 147 |
+
"tune_vision_tower_from_layer": -1,
|
| 148 |
+
"use_cache": false,
|
| 149 |
+
"vision_config": {
|
| 150 |
+
"hidden_act": "gelu_pytorch_tanh",
|
| 151 |
+
"hidden_size": 1152,
|
| 152 |
+
"image_size": 384,
|
| 153 |
+
"intermediate_size": 4304,
|
| 154 |
+
"layer_norm_eps": 1e-06,
|
| 155 |
+
"model_name_or_path": "google/siglip-so400m-patch14-384",
|
| 156 |
+
"model_name_or_path2": "",
|
| 157 |
+
"model_type": "siglip_vision_model",
|
| 158 |
+
"num_attention_heads": 16,
|
| 159 |
+
"num_hidden_layers": 27,
|
| 160 |
+
"patch_size": 14
|
| 161 |
+
},
|
| 162 |
+
"vision_feature_layer": -2,
|
| 163 |
+
"vision_feature_select_strategy": "patch",
|
| 164 |
+
"vision_hidden_size": 1152,
|
| 165 |
+
"vision_model_name_or_path": "google/siglip-so400m-patch14-384",
|
| 166 |
+
"vision_model_name_or_path2": "",
|
| 167 |
+
"vocab_size": 151936
|
| 168 |
+
}
|
| 169 |
+
|
| 170 |
+
Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained.
|
| 171 |
+
Traceback (most recent call last):
|
| 172 |
+
File "/nfs/ywang29/TinyLLaVA/transformers/src/transformers/utils/hub.py", line 398, in cached_file
|
| 173 |
+
resolved_file = hf_hub_download(
|
| 174 |
+
File "/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/utils/_validators.py", line 106, in _inner_fn
|
| 175 |
+
validate_repo_id(arg_value)
|
| 176 |
+
File "/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/utils/_validators.py", line 154, in validate_repo_id
|
| 177 |
+
raise HFValidationError(
|
| 178 |
+
huggingface_hub.errors.HFValidationError: Repo id must be in the form 'repo_name' or 'namespace/repo_name': '/nfs/ywang29/tinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model'. Use `repo_type` argument if needed.
|
| 179 |
+
|
| 180 |
+
The above exception was the direct cause of the following exception:
|
| 181 |
+
|
| 182 |
+
Traceback (most recent call last):
|
| 183 |
+
File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 193, in <module>
|
| 184 |
+
train()
|
| 185 |
+
File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 149, in train
|
| 186 |
+
model = training_recipe.load(model, model_args)
|
| 187 |
+
File "/nfs/ywang29/TinyLLaVA/tinyllava/training_recipe/base.py", line 144, in load
|
| 188 |
+
model.load_llm(**model_args['llm'])
|
| 189 |
+
File "/nfs/ywang29/TinyLLaVA/tinyllava/model/modeling_tinyllava.py", line 360, in load_llm
|
| 190 |
+
self.language_model = self.language_model.from_pretrained(
|
| 191 |
+
File "/nfs/ywang29/TinyLLaVA/transformers/src/transformers/modeling_utils.py", line 3015, in from_pretrained
|
| 192 |
+
resolved_config_file = cached_file(
|
| 193 |
+
File "/nfs/ywang29/TinyLLaVA/transformers/src/transformers/utils/hub.py", line 462, in cached_file
|
| 194 |
+
raise EnvironmentError(
|
| 195 |
+
OSError: Incorrect path_or_model_id: '/nfs/ywang29/tinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model'. Please provide either the path to a local folder or the repo_id of a model on the Hub.
|
| 196 |
+
Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained.
|
| 197 |
+
Traceback (most recent call last):
|
| 198 |
+
File "/nfs/ywang29/TinyLLaVA/transformers/src/transformers/utils/hub.py", line 398, in cached_file
|
| 199 |
+
resolved_file = hf_hub_download(
|
| 200 |
+
File "/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/utils/_validators.py", line 106, in _inner_fn
|
| 201 |
+
validate_repo_id(arg_value)
|
| 202 |
+
File "/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/utils/_validators.py", line 154, in validate_repo_id
|
| 203 |
+
raise HFValidationError(
|
| 204 |
+
huggingface_hub.errors.HFValidationError: Repo id must be in the form 'repo_name' or 'namespace/repo_name': '/nfs/ywang29/tinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model'. Use `repo_type` argument if needed.
|
| 205 |
+
|
| 206 |
+
The above exception was the direct cause of the following exception:
|
| 207 |
+
|
| 208 |
+
Traceback (most recent call last):
|
| 209 |
+
File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 193, in <module>
|
| 210 |
+
train()
|
| 211 |
+
File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 149, in train
|
| 212 |
+
model = training_recipe.load(model, model_args)
|
| 213 |
+
File "/nfs/ywang29/TinyLLaVA/tinyllava/training_recipe/base.py", line 144, in load
|
| 214 |
+
model.load_llm(**model_args['llm'])
|
| 215 |
+
File "/nfs/ywang29/TinyLLaVA/tinyllava/model/modeling_tinyllava.py", line 360, in load_llm
|
| 216 |
+
self.language_model = self.language_model.from_pretrained(
|
| 217 |
+
File "/nfs/ywang29/TinyLLaVA/transformers/src/transformers/modeling_utils.py", line 3015, in from_pretrained
|
| 218 |
+
resolved_config_file = cached_file(
|
| 219 |
+
File "/nfs/ywang29/TinyLLaVA/transformers/src/transformers/utils/hub.py", line 462, in cached_file
|
| 220 |
+
raise EnvironmentError(
|
| 221 |
+
OSError: Incorrect path_or_model_id: '/nfs/ywang29/tinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model'. Please provide either the path to a local folder or the repo_id of a model on the Hub.
|
| 222 |
+
[2025-10-10 05:58:50,304] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 1777438
|
| 223 |
+
[2025-10-10 05:58:50,689] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 1777439
|
| 224 |
+
[2025-10-10 05:58:50,689] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 1777440
|
| 225 |
+
[2025-10-10 05:58:51,067] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 1777441
|
logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_3_connector-3.0_0.5_3_ablation_20251010_060004.log
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_3_connector-3.0_0.5_3_ablation_20251010_063715.log
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_3e-1_connector-3.0_0.5_3e-1_ablation_20251010_095404.log
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_5_connector-3.0_0.5_5_ablation_20251010_060110.log
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_5_connector-3.0_0.5_5_ablation_20251010_072428.log
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_5e-1_connector-3.0_0.5_5e-1_ablation_20251010_102903.log
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_7_connector-3.0_0.5_7_ablation_20251010_060221.log
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
==== STARTING EXPERIMENT: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_7_connector-3.0_0.5_7_ablation ====
|
| 2 |
+
Log File: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_7_connector-3.0_0.5_7_ablation_20251010_060221.log
|
| 3 |
+
Timestamp: 2025-10-10 06:02:21
|
| 4 |
+
=====================================
|
| 5 |
+
/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you.
|
| 6 |
+
import pynvml # type: ignore[import]
|
| 7 |
+
[2025-10-10 06:02:24,057] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect)
|
logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_7_connector-3.0_0.5_7_ablation_20251010_080549.log
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_9_connector-3.0_0.5_9_ablation_20251010_084110.log
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
logs_oct9/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1_connector-3.0_0.5_1_ablation_20251009_043900.log
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
logs_oct9/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1e-1_connector-3.0_0.5_1e-1_ablation_20251009_073254.log
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
logs_oct9/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1e-2_connector-3.0_0.5_1e-2_ablation_20251009_105701.log
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
logs_oct9/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_3_connector-3.0_0.5_3_ablation_20251009_051327.log
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
logs_oct9/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_3e-1_connector-3.0_0.5_3e-1_ablation_20251009_081157.log
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
logs_oct9/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_3e-2_connector-3.0_0.5_3e-2_ablation_20251009_113104.log
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
logs_oct9/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_5_connector-3.0_0.5_5_ablation_20251009_054857.log
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
logs_oct9/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_5e-1_connector-3.0_0.5_5e-1_ablation_20251009_085249.log
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
logs_oct9/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_7_connector-3.0_0.5_7_ablation_20251009_062342.log
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
logs_oct9/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_7e-1_connector-3.0_0.5_7e-1_ablation_20251009_094325.log
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
logs_oct9/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_9_connector-3.0_0.5_9_ablation_20251009_065816.log
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
logs_oct9/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_9e-1_connector-3.0_0.5_9e-1_ablation_20251009_102218.log
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|